├── VERSION ├── .gitignore ├── waf ├── libimxdmabuffer.pc.in ├── imxdmabuffer ├── imxdmabuffer_ipu_priv.h ├── imxdmabuffer_physaddr.h ├── imxdmabuffer_g2d_allocator.h ├── imxdmabuffer_dwl_allocator.h ├── imxdmabuffer_priv.h ├── imxdmabuffer_ipu_priv.c ├── imxdmabuffer_ipu_allocator.h ├── imxdmabuffer_pxp_allocator.h ├── imxdmabuffer_dma_heap_allocator.h ├── imxdmabuffer_ion_allocator.h ├── imxdmabuffer.c ├── imxdmabuffer_g2d_allocator.c ├── imxdmabuffer_ipu_allocator.c ├── imxdmabuffer_pxp_allocator.c ├── imxdmabuffer_dwl_allocator.c ├── imxdmabuffer.h ├── imxdmabuffer_ion_allocator.c └── imxdmabuffer_dma_heap_allocator.c ├── ChangeLog ├── test └── test-alloc.c ├── README.md ├── wscript └── LICENSE /VERSION: -------------------------------------------------------------------------------- 1 | 1.1.3 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.lock-* 2 | /build 3 | /.waf* 4 | -------------------------------------------------------------------------------- /waf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Freescale/libimxdmabuffer/HEAD/waf -------------------------------------------------------------------------------- /libimxdmabuffer.pc.in: -------------------------------------------------------------------------------- 1 | prefix=@prefix@ 2 | exec_prefix=@exec_prefix@ 3 | libdir=@libdir@ 4 | includedir=@includedir@ 5 | 6 | libimxdmabuffer_ion=@WITH_ION_ALLOCATOR@ 7 | 8 | Name: libimxdmabuffer 9 | Description: library for allocating and managing physically contiguous memory ("DMA memory" or "DMA buffers") on i.MX devices 10 | Version: @IMXDMABUFFER_VERSION@ 11 | Libs: -L${libdir} -limxdmabuffer 12 | Cflags: -I${includedir} 13 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ipu_priv.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_IPU_PRIV_H 2 | #define IMXDMABUFFER_IPU_PRIV_H 3 | 4 | #include 5 | #include "imxdmabuffer_physaddr.h" 6 | 7 | 8 | #ifdef __cplusplus 9 | extern "C" { 10 | #endif 11 | 12 | 13 | imx_physical_address_t imx_dma_buffer_ipu_allocate(int ipu_fd, size_t size, int *error); 14 | void imx_dma_buffer_ipu_deallocate(int ipu_fd, imx_physical_address_t physical_address); 15 | 16 | 17 | #ifdef __cplusplus 18 | } 19 | #endif 20 | 21 | 22 | #endif /* IMXDMABUFFER_IPU_PRIV_H */ 23 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_physaddr.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_PHYSADDR_H 2 | #define IMXDMABUFFER_PHYSADDR_H 3 | 4 | 5 | #ifdef __cplusplus 6 | extern "C" { 7 | #endif 8 | 9 | 10 | /* Format string for printf-compatible format-strings. 11 | * Example use: printf("physical address: %" IMX_PHYSICAL_ADDRESS_FORMAT, phys_addr); */ 12 | #define IMX_PHYSICAL_ADDRESS_FORMAT "#lx" 13 | 14 | 15 | /* Typedef for physical addresses */ 16 | typedef unsigned long imx_physical_address_t; 17 | 18 | 19 | #ifdef __cplusplus 20 | } 21 | #endif 22 | 23 | 24 | #endif // IMXDMABUFFER_PHYSADDR_H 25 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_g2d_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_G2D_ALLOCATOR_H 2 | #define IMXDMABUFFER_G2D_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | #define IMX_DMA_BUFFER_IPU_ALLOCATOR_DEFAULT_IPU_FD (-1) 13 | 14 | 15 | /* Creates a new DMA buffer allocator that uses the Vivante G2D allocator. 16 | * 17 | * This allocator does not support file descriptors. imx_dma_buffer_get_fd() 18 | * function calls return -1. */ 19 | ImxDmaBufferAllocator* imx_dma_buffer_g2d_allocator_new(void); 20 | 21 | 22 | #ifdef __cplusplus 23 | } 24 | #endif 25 | 26 | 27 | #endif /* IMXDMABUFFER_G2D_ALLOCATOR_H */ 28 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_dwl_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_DWL_ALLOCATOR_H 2 | #define IMXDMABUFFER_DWL_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | /* Creates a new DMA buffer allocator that uses Hantro's DWL API. 13 | * 14 | * The allocator needs the Hantro decoder type (G1 or G2) to be specified in the 15 | * libimxdmabuffer build configuration. 16 | * 17 | * This allocator supports file descriptors. 18 | * 19 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 20 | * the pointer refers to is set to an error code from errno.h. If creating 21 | * the allocator succeeds, the integer is not modified. 22 | * @return Pointer to the newly created DWL DMA allocator, or NULL in case of an error. 23 | */ 24 | ImxDmaBufferAllocator* imx_dma_buffer_dwl_allocator_new(int *error); 25 | 26 | 27 | #ifdef __cplusplus 28 | } 29 | #endif 30 | 31 | 32 | #endif // IMXDMABUFFER_DWL_ALLOCATOR_H 33 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_priv.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_PRIV_H 2 | #define IMXDMABUFFER_PRIV_H 3 | 4 | #include 5 | #include 6 | 7 | #include "imxdmabuffer.h" 8 | 9 | 10 | #ifdef __cplusplus 11 | extern "C" { 12 | #endif 13 | 14 | 15 | #define IMX_DMA_BUFFER_UNUSED_PARAM(x) ((void)(x)) 16 | #define IMX_DMA_BUFFER_ALIGN_VAL_TO(LENGTH, ALIGN_SIZE) ( ((uintptr_t)(((uint8_t*)(LENGTH)) + (ALIGN_SIZE) - 1) / (ALIGN_SIZE)) * (ALIGN_SIZE) ) 17 | 18 | 19 | /* These two functions exist since most allocators do not allocate 20 | * cached DMA memory and thus do not need any syncing. */ 21 | 22 | static inline void imx_dma_buffer_noop_start_sync_session_func(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 23 | { 24 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 25 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 26 | } 27 | 28 | static inline void imx_dma_buffer_noop_stop_sync_session_func(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 29 | { 30 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 31 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 32 | } 33 | 34 | 35 | 36 | #ifdef __cplusplus 37 | } 38 | #endif 39 | 40 | 41 | #endif /* IMXDMABUFFER_PRIV_H */ 42 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ipu_priv.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | /* This is necessary to turn off these warning that originate in ipu.h : 5 | * "ISO C99 doesn’t support unnamed structs/unions" */ 6 | #ifdef __GNUC__ 7 | #pragma GCC diagnostic push 8 | #pragma GCC diagnostic ignored "-Wpedantic" 9 | #endif 10 | 11 | #include 12 | 13 | #ifdef __GNUC__ 14 | #pragma GCC diagnostic pop 15 | #endif 16 | 17 | #include "imxdmabuffer_ipu_priv.h" 18 | 19 | 20 | /* These functions are isolated in this separate .c file to avoid 21 | * conflicts between uint*_t definitions from linux/ipu.h and definitions 22 | * from stdint.h. */ 23 | 24 | 25 | imx_physical_address_t imx_dma_buffer_ipu_allocate(int ipu_fd, size_t size, int *error) 26 | { 27 | dma_addr_t m = (dma_addr_t)size; 28 | if (ioctl(ipu_fd, IPU_ALLOC, &m) < 0) 29 | { 30 | if (error != NULL) 31 | *error = errno; 32 | return 0; 33 | } 34 | else 35 | return (imx_physical_address_t)m; 36 | } 37 | 38 | 39 | void imx_dma_buffer_ipu_deallocate(int ipu_fd, imx_physical_address_t physical_address) 40 | { 41 | dma_addr_t m = physical_address; 42 | ioctl(ipu_fd, IPU_FREE, &m); 43 | } 44 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ipu_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_IPU_ALLOCATOR_H 2 | #define IMXDMABUFFER_IPU_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | #define IMX_DMA_BUFFER_IPU_ALLOCATOR_DEFAULT_IPU_FD (-1) 13 | 14 | 15 | /* Creates a new DMA buffer allocator that uses the IPU allocator. 16 | * 17 | * This allocator does not support file descriptors. imx_dma_buffer_get_fd() 18 | * function calls return -1. 19 | * 20 | * @param ipu_fd /dev/mxc_ipu file descriptor to use, or a negative value if the allocator 21 | * shall open and use its own file descriptor. The preprocessor macro 22 | * IMX_DMA_BUFFER_IPU_ALLOCATOR_DEFAULT_IPU_FD can be used for the latter case. 23 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 24 | * the pointer refers to is set to an error code from errno.h. If creating 25 | * the allocator succeeds, the integer is not modified. 26 | * @return Pointer to the newly created IPU DMA allocator, or NULL in case of an error. 27 | */ 28 | ImxDmaBufferAllocator* imx_dma_buffer_ipu_allocator_new(int ipu_fd, int *error); 29 | 30 | 31 | #ifdef __cplusplus 32 | } 33 | #endif 34 | 35 | 36 | #endif /* IMXDMABUFFER_IPU_ALLOCATOR_H */ 37 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_pxp_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_PXP_ALLOCATOR_H 2 | #define IMXDMABUFFER_PXP_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | #define IMX_DMA_BUFFER_PXP_ALLOCATOR_DEFAULT_PXP_FD (-1) 13 | 14 | 15 | /* Creates a new DMA buffer allocator that uses the PxP allocator. 16 | * 17 | * This allocator does not support file descriptors. imx_dma_buffer_get_fd() 18 | * function calls return -1. 19 | * 20 | * @param pxp_fd /dev/pxp_device file descriptor to use, or a negative value if the 21 | * allocator shall open and use its own file descriptor. The preprocessor macro 22 | * IMX_DMA_BUFFER_PXP_ALLOCATOR_DEFAULT_PXP_FD can be used for the latter case. 23 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 24 | * the pointer refers to is set to an error code from errno.h. If creating 25 | * the allocator succeeds, the integer is not modified. 26 | * @return Pointer to the newly created PxP DMA allocator, or NULL in case of an error. 27 | */ 28 | ImxDmaBufferAllocator* imx_dma_buffer_pxp_allocator_new(int pxp_fd, int *error); 29 | 30 | 31 | #ifdef __cplusplus 32 | } 33 | #endif 34 | 35 | 36 | #endif /* IMXDMABUFFER_PXP_ALLOCATOR_H */ 37 | -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | ==== version 1.1.3 (2023-06-29) ==== 2 | 3 | * waf: update to 2.0.25 4 | * g2d: Fix typo in G2D allocator that caused build errors 5 | * Don't check for mxcfb.h in build script 6 | mxcfb.h is no longer present in some sysroots anymore, 7 | so it is not a reliable way for verifying the imx linux 8 | headers path. Just use the path directly; if it is wrong, 9 | the build will fail anyway. 10 | 11 | ==== version 1.1.2 (2022-05-06) ==== 12 | 13 | * waf: update to 2.0.23 14 | * dma-heap: Add support for uncached dma-heap memory 15 | imx_dma_buffer_dma_heap_allocator_new_from_fd() is 16 | a new function that partially deprecates the dma_heap_fd 17 | argument of imx_dma_buffer_dma_heap_allocator_new(). 18 | 19 | ==== version 1.1.1 (2022-04-30) ==== 20 | 21 | * Add sync access functions to ensure cache 22 | coherency when allocating cached DMA memory; 23 | only done by dma-heap allocator at this time 24 | * Relax buffer mapping flags checks 25 | * Add API functions to retrieve dma-heap / ION FDs 26 | 27 | ==== version 1.1.0 (2021-10-29) ==== 28 | 29 | * Add dma-heap allocator 30 | * Documentation and typo fixes 31 | * Use RW flags in ION allocator if no mapping 32 | flags are specified 33 | * Add checks for attempts to map already mapped 34 | buffers with different flags than in the 35 | original mapping 36 | 37 | ==== version 1.0.1 (2020-04-09) ==== 38 | 39 | * Changes to migrate build system to Python 3: 40 | * waf: update to 2.0.12 41 | * waf: use python3 42 | 43 | ==== version 1.0.0 (2019-07-07) ==== 44 | 45 | Initial release 46 | -------------------------------------------------------------------------------- /test/test-alloc.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "imxdmabuffer_config.h" 5 | #include "imxdmabuffer/imxdmabuffer.h" 6 | #include "imxdmabuffer/imxdmabuffer_priv.h" 7 | 8 | #ifdef IMXDMABUFFER_DMA_HEAP_ALLOCATOR_ENABLED 9 | #include "imxdmabuffer/imxdmabuffer_dma_heap_allocator.h" 10 | #endif 11 | 12 | #ifdef IMXDMABUFFER_ION_ALLOCATOR_ENABLED 13 | #include "imxdmabuffer/imxdmabuffer_ion_allocator.h" 14 | #endif 15 | 16 | #ifdef IMXDMABUFFER_DWL_ALLOCATOR_ENABLED 17 | #include "imxdmabuffer/imxdmabuffer_dwl_allocator.h" 18 | #endif 19 | 20 | #ifdef IMXDMABUFFER_IPU_ALLOCATOR_ENABLED 21 | #include "imxdmabuffer/imxdmabuffer_ipu_allocator.h" 22 | #endif 23 | 24 | #ifdef IMXDMABUFFER_G2D_ALLOCATOR_ENABLED 25 | #include "imxdmabuffer/imxdmabuffer_g2d_allocator.h" 26 | #endif 27 | 28 | #ifdef IMXDMABUFFER_PXP_ALLOCATOR_ENABLED 29 | #include "imxdmabuffer/imxdmabuffer_pxp_allocator.h" 30 | #endif 31 | 32 | 33 | int check_allocation(ImxDmaBufferAllocator *allocator, char const *name) 34 | { 35 | static size_t const expected_buffer_size = 4096; 36 | static size_t const expected_alignment = 16; 37 | size_t actual_buffer_size; 38 | int retval = 0; 39 | int err; 40 | void *mapped_virtual_address = NULL; 41 | void *second_mapped_virtual_address = NULL; 42 | imx_physical_address_t physical_address; 43 | ImxDmaBuffer *dma_buffer = NULL; 44 | 45 | if (allocator == NULL) 46 | { 47 | fprintf(stderr, "Could not create %s allocator\n", name); 48 | goto finish; 49 | } 50 | 51 | dma_buffer = imx_dma_buffer_allocate(allocator, expected_buffer_size, expected_alignment, &err); 52 | if (dma_buffer == NULL) 53 | { 54 | fprintf(stderr, "Could not allocate DMA buffer with %s allocator: %s (%d)\n", name, strerror(err), err); 55 | goto finish; 56 | } 57 | 58 | actual_buffer_size = imx_dma_buffer_get_size(dma_buffer); 59 | if (actual_buffer_size != expected_buffer_size) 60 | { 61 | fprintf(stderr, "DMA buffer allocated with %s allocator has incorrect size: expected %zu got %zu\n", name, expected_buffer_size, actual_buffer_size); 62 | goto finish; 63 | } 64 | 65 | mapped_virtual_address = imx_dma_buffer_map(dma_buffer, 0, &err); 66 | if (mapped_virtual_address == NULL) 67 | { 68 | fprintf(stderr, "Could not map DMA buffer allocated with %s allocator: %s (%d)\n", name, strerror(err), err); 69 | goto finish; 70 | } 71 | 72 | second_mapped_virtual_address = imx_dma_buffer_map(dma_buffer, 0, &err); 73 | if (second_mapped_virtual_address == NULL) 74 | { 75 | fprintf(stderr, "Could not map DMA buffer allocated with %s allocator: %s (%d)\n", name, strerror(err), err); 76 | goto finish; 77 | } 78 | 79 | if (mapped_virtual_address != second_mapped_virtual_address) 80 | { 81 | fprintf(stderr, "Redundant mapping attempts must always return the same virtual pointer as the previous mapping\n"); 82 | goto finish; 83 | } 84 | 85 | physical_address = imx_dma_buffer_get_physical_address(dma_buffer); 86 | if (physical_address == 0) 87 | { 88 | fprintf(stderr, "Could not get physical address for DMA buffer allocated %s allocator\n", name); 89 | goto finish; 90 | } 91 | if ((physical_address & expected_alignment) != 0) 92 | { 93 | fprintf(stderr, "Physical address %" IMX_PHYSICAL_ADDRESS_FORMAT " for DMA buffer allocated %s allocator is not aligned to %zu-byte boundaries\n", physical_address, name, expected_alignment); 94 | goto finish; 95 | } 96 | 97 | fprintf(stderr, "%s allocator works correctly\n", name); 98 | retval = 1; 99 | 100 | finish: 101 | if (second_mapped_virtual_address != NULL) 102 | imx_dma_buffer_unmap(dma_buffer); 103 | if (mapped_virtual_address != NULL) 104 | imx_dma_buffer_unmap(dma_buffer); 105 | if (dma_buffer != NULL) 106 | imx_dma_buffer_deallocate(dma_buffer); 107 | if (allocator != NULL) 108 | imx_dma_buffer_allocator_destroy(allocator); 109 | 110 | return retval; 111 | } 112 | 113 | 114 | int main() 115 | { 116 | int err; 117 | ImxDmaBufferAllocator *allocator; 118 | int retval = 0; 119 | 120 | #ifdef IMXDMABUFFER_DMA_HEAP_ALLOCATOR_ENABLED 121 | allocator = imx_dma_buffer_dma_heap_allocator_new(-1, IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS, IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS, &err); 122 | if (allocator == NULL) 123 | { 124 | fprintf(stderr, "Could not create dma-heap allocator: %s (%d)\n", strerror(err), err); 125 | retval = -1; 126 | } 127 | else if (check_allocation(allocator, "dma-heap") == 0) 128 | retval = -1; 129 | #endif 130 | 131 | #ifdef IMXDMABUFFER_ION_ALLOCATOR_ENABLED 132 | allocator = imx_dma_buffer_ion_allocator_new(-1, IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_ID_MASK, IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_FLAGS, &err); 133 | if (allocator == NULL) 134 | { 135 | fprintf(stderr, "Could not create ION allocator: %s (%d)\n", strerror(err), err); 136 | retval = -1; 137 | } 138 | else if (check_allocation(allocator, "ION") == 0) 139 | retval = -1; 140 | #endif 141 | 142 | #ifdef IMXDMABUFFER_DWL_ALLOCATOR_ENABLED 143 | allocator = imx_dma_buffer_dwl_allocator_new(&err); 144 | if (allocator == NULL) 145 | { 146 | fprintf(stderr, "Could not create DWL allocator: %s (%d)\n", strerror(err), err); 147 | retval = -1; 148 | } 149 | else if (check_allocation(allocator, "DWL") == 0) 150 | retval = -1; 151 | #endif 152 | 153 | #ifdef IMXDMABUFFER_IPU_ALLOCATOR_ENABLED 154 | allocator = imx_dma_buffer_ipu_allocator_new(-1, &err); 155 | if (allocator == NULL) 156 | { 157 | fprintf(stderr, "Could not create IPU allocator: %s (%d)\n", strerror(err), err); 158 | retval = -1; 159 | } 160 | else if (check_allocation(allocator, "IPU") == 0) 161 | retval = -1; 162 | #endif 163 | 164 | #ifdef IMXDMABUFFER_G2D_ALLOCATOR_ENABLED 165 | allocator = imx_dma_buffer_g2d_allocator_new(); 166 | if (check_allocation(allocator, "G2D") == 0) 167 | retval = -1; 168 | #endif 169 | 170 | #ifdef IMXDMABUFFER_PXP_ALLOCATOR_ENABLED 171 | allocator = imx_dma_buffer_pxp_allocator_new(-1, &err); 172 | if (allocator == NULL) 173 | { 174 | fprintf(stderr, "Could not create PxP allocator: %s (%d)\n", strerror(err), err); 175 | retval = -1; 176 | } 177 | else if (check_allocation(allocator, "PxP") == 0) 178 | retval = -1; 179 | #endif 180 | 181 | return retval; 182 | } 183 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_dma_heap_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_DMA_HEAP_ALLOCATOR_H 2 | #define IMXDMABUFFER_DMA_HEAP_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | extern unsigned int const IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS; 13 | extern unsigned int const IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS; 14 | 15 | 16 | /* Creates a new DMA buffer allocator that uses a modified dma-heap allocator. 17 | * 18 | * The i.MX kernel contains a modified version of the dma-heap allocator which 19 | * was introduced in Linux 5.6 and is intended to replace ION. The modified 20 | * i.MX kernel variant has an extra ioctl for fetching a physical address for 21 | * a DMA-BUF FD. This allocator produces ImxDmaBuffer instances that are 22 | * DMA-BUF backed. imx_dma_buffer_get_fd() returns the DMA-BUF FD. 23 | * 24 | * If dma_heap_fd is <0, an internal dma-heap FD (not to be confused with 25 | * DMA-BUF FDs) is used. The device node path to use is configured at the time 26 | * when libimxdmabuffer is built. Typically, the default device node path 27 | * is set to "/dev/dma_heap/linux,cma". Whether that dma-heap allocates 28 | * cached or uncached memory is also defined at build time. 29 | * 30 | * NOTE: Using this with dma_heap_fd set to a valid FD is deprecated, because 31 | * this function does not allow for specifying whether this dma-heap allocates 32 | * cached memory. Instead, use imx_dma_buffer_dma_heap_allocator_new_from_fd() 33 | * to reuse an already openeded dma-heap device node. 34 | * 35 | * @param dma_heap_fd File descriptor of an open instance of the dma-heap 36 | * device node, or <0 to let the allocator open an internal FD. 37 | * @param heap_flags dma-heap flags. To use default flags, set this to 38 | * IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS. 39 | * @param fd_flags Flags for the DMA-BUF FD of newly allocated buffers. 40 | * Set this to IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS 41 | * to use default flags. 42 | * @param error If this pointer is non-NULL, and if an error occurs, then the 43 | * integer the pointer refers to is set to an error code from errno.h. 44 | * If creating the allocator succeeds, the integer is not modified. 45 | */ 46 | ImxDmaBufferAllocator* imx_dma_buffer_dma_heap_allocator_new( 47 | int dma_heap_fd, 48 | unsigned int heap_flags, 49 | unsigned int fd_flags, 50 | int *error 51 | ); 52 | 53 | /* Creates a new DMA buffer allocator that uses an already opened dma-heap FD. 54 | * 55 | * This is similar to imx_dma_buffer_dma_heap_allocator_new(), except that it 56 | * does not open its own dma-heap FD. Instead, it reuses an existing one. 57 | * 58 | * This also allows for specifying whether that dma-heap allocates cached 59 | * memory or not, which is important for performance reasons: If the dma-heap 60 | * allocates uncached memory, then the imx_dma_buffer_start_sync_session() 61 | * and imx_dma_buffer_stop_sync_session() functions do nothing. 62 | * 63 | * @param dma_heap_fd File descriptor of an open instance of the dma-heap 64 | * device node. Must be a valid FD. 65 | * @param heap_flags dma-heap flags. To use default flags, set this to 66 | * IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS. 67 | * @param fd_flags Flags for the DMA-BUF FD of newly allocated buffers. 68 | * Set this to IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS 69 | * to use default flags. 70 | * @param is_cached_memory_heap If nonzero, then this dma-heap is considered 71 | * as being one that allocates cached memory. If zero, it is considered 72 | * an uncached DMA memory allocator. 73 | */ 74 | ImxDmaBufferAllocator* imx_dma_buffer_dma_heap_allocator_new_from_fd( 75 | int dma_heap_fd, 76 | unsigned int heap_flags, 77 | unsigned int fd_flags, 78 | int is_cached_memory_heap 79 | ); 80 | 81 | /* Returns the file descriptor of the opened dma-heap device node this allocator uses. */ 82 | int imx_dma_buffer_dma_heap_allocator_get_dma_heap_fd(ImxDmaBufferAllocator *allocator); 83 | 84 | 85 | /* Allocates a DMA buffer with dma-heap and returns the file descriptor representing the buffer. 86 | * 87 | * This function is useful for assembling a custom allocator that uses dma-heap. 88 | * This may be necessary in frameworks that have their own memory allocation 89 | * infrastructure and already have code in place for mapping/unmapping file 90 | * descriptors for example. Usually it is better to just use the predefined 91 | * dma-heap imxdmabuffer allocator instead. To create an instance of that 92 | * allocator, use imx_dma_buffer_dma_heap_allocator_new(). 93 | * 94 | * @param dma_heap_fd dma-heap file descriptor to use. Must not be negative. 95 | * @param size Size of the DMA buffer to allocate, in bytes. 96 | * Must be greater than 0. 97 | * @param heap_flags dma-heap flags. To use default flags, set this to 98 | * IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS. 99 | * @param fd_flags Flags for the DMA-BUF FD of newly allocated buffers. 100 | * Set this to IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS 101 | * to use default flags. 102 | * @param error If this pointer is non-NULL, and if an error occurs, then the 103 | * integer the pointer refers to is set to an error code from errno.h. 104 | * If allocation succeeds, the integer is not modified. 105 | * @return DMA-BUF FD for the allocated DMA buffer, or a negative value 106 | * if allocation failed. 107 | */ 108 | int imx_dma_buffer_dma_heap_allocate_dmabuf( 109 | int dma_heap_fd, 110 | size_t size, 111 | unsigned int heap_flags, 112 | unsigned int fd_flags, 113 | int *error 114 | ); 115 | 116 | /* Retrieves a physical address for the DMA buffer with the given DMA-BUF FD. 117 | * 118 | * @param dmabuf_fd DMA-BUF file descriptor to retrieve a physical address for. 119 | * @param error If this pointer is non-NULL, and if an error occurs, then the 120 | * integer the pointer refers to is set to an error code from errno.h. If 121 | * retrieving the physical address succeeds, the integer is not modified. 122 | * @return Physical address to the DMA buffer represented by the DMA-BUF FD, or 123 | * 0 if retrieving the address failed. 124 | */ 125 | imx_physical_address_t imx_dma_buffer_dma_heap_get_physical_address_from_dmabuf_fd(int dmabuf_fd, int *error); 126 | 127 | 128 | #ifdef __cplusplus 129 | } 130 | #endif 131 | 132 | 133 | #endif /* IMXDMABUFFER_DMA_HEAP_ALLOCATOR_H */ 134 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | libimxdmabuffer - library for allocating and managing physically contiguous memory ("DMA memory" or "DMA buffers") on i.MX devices 2 | ================================================================================================================================== 3 | 4 | The purpose of this library is to provide an API for allocating memory blocks 5 | that are physically contiguous. Typically, a process allocates *virtual* 6 | memory blocks, which are contiguous, but only in the virtual address space 7 | the process got assigned by the operating system. Underneath the virtual 8 | address space layer, the *actual* memory might be fragmented. The machine's 9 | MMU is what typically takes care of mapping parts of the physical memory 10 | into one contiguous virtual address space. 11 | 12 | However, DMA (Direct Memory Access) channels usually cannot work with virtually 13 | contiguous but physically fragmented memory blocks, because they are not 14 | linked to the MMU. Therefore, for such DMA transfers, it is necessary to make 15 | sure that the memory blocks that are to be transferred are *physically* 16 | contiguous. This library provides APIs for allocators that can produce memory 17 | blocks which are physicall contiguous. These memory blocks are referred to as 18 | "contiguous memory", "physical memory", "DMA memory", or "DMA buffers". 19 | 20 | This library is designed specifically for DMA buffer allocations on i.MX 21 | machines. One can use a specific allocator, or use the default one. What the 22 | default allocator is gets decided by the libimxdmabuffer build configuration. 23 | On different i.MX variant, different allocators are available, which is an 24 | important reason for why this library was written: To provide one consistent 25 | interface for DMA buffer allocation in various i.MX variants, even though the 26 | underlying allocator might differ from i.MX variant to i.MX variant. 27 | 28 | 29 | License 30 | ------- 31 | 32 | This library is licensed under the LGPL v2.1. 33 | 34 | 35 | Available allocators 36 | -------------------- 37 | 38 | * DWL: Uses the Hantro DWL API for allocation. Only works on machines 39 | with a Hantro VPU decoder. 40 | * G2D: Uses the Vivante G2D API for allocation. Only works on machines 41 | with the G2D API enabled. i.MX6 machines with the Vivante GPU drivers 42 | have this API. 43 | * ION: Uses the ION allocator that has been originally ported from 44 | Android to Linux. 45 | * dma-heap: Uses the new dma-heap userspace DMA-BUF allocation API 46 | that was introduced in Linux 5.6. 47 | * IPU: Uses IPU ioctls for allocation. Available on machines with an 48 | IPU, which includes most i.MX6 variants, but no i.MX7 or i.MX8 ones. 49 | * PxP: Uses PxP ioctls for allocation. Available on machines with a 50 | PxP, which includes the i.MX7, and some i.MX6 variants. 51 | 52 | The ION and dma-heap allocators allocate DMA-BUF buffers, so it is possible 53 | to use `imx_dma_buffer_get_fd()` on `ImxDmaBuffer` instances produces by 54 | those allocators. The other allocators don't; that function will always 55 | return `-1` when used with those allocators. For this reason, these days, 56 | it is generally recommended to use dma-heap or ION. dma-heap is preferred 57 | when using kernel 5.6 or newer. 58 | 59 | Also, linux-imx contains additions to both ION and dma-heap to be able 60 | to fetch a physical address that is associated with an allocated buffer. 61 | This is necessary, because some other NXP specific APIs expect physical 62 | addresses, not DMA-BUF FDs. 63 | 64 | NOTE: ION is not available out-of-the-box on i.MX6 machines. However, it can 65 | be used on these by adding the following lines to the kernel configuration: 66 | 67 | CONFIG_ION=y 68 | CONFIG_ION_CMA_HEAP=y 69 | 70 | 71 | Building and installing 72 | ----------------------- 73 | 74 | This project uses the [waf meta build system](https://code.google.com/p/waf/). 75 | To configure , first set the following environment variables to whatever is 76 | necessary for cross compilation for your platform: 77 | 78 | * `CC` 79 | * `CFLAGS` 80 | * `LDFLAGS` 81 | * `PKG_CONFIG_PATH` 82 | * `PKG_CONFIG_SYSROOT_DIR` 83 | 84 | Then, run: 85 | 86 | ./waf configure --prefix=PREFIX 87 | 88 | (The aforementioned environment variables are only necessary for this 89 | configure call.) 90 | 91 | PREFIX defines the installation prefix, that is, where the built binaries 92 | will be installed. 93 | 94 | Once configuration is complete, run: 95 | 96 | ./waf 97 | 98 | This builds the library. 99 | Finally, to install, run: 100 | 101 | ./waf install 102 | 103 | This will install the headers in `$PREFIX/include/imxdmabuffer/` , the 104 | libraries in `$PREFIX/lib/` , and generate a pkg-config .pc file, which is 105 | placed in `$PREFIX/lib/pkgconfig/` . 106 | 107 | 108 | Notes about dma-heap allocator and imx kernel version 109 | ----------------------------------------------------- 110 | 111 | The dma-heap allocator is currently the only one that allocates cached 112 | memory. However, the way cache coherence is maintained involves an imx 113 | kernel specific workaround which just flushes / repopulates the entire 114 | buffer. This is slow, and typically not what users want. 115 | 116 | Starting with kernel 5.15.5, the imx-kernel contains an additional, 117 | uncached dma-heap. This one does not have this problem, since nothing 118 | needs to be flushed / repopulated. 119 | 120 | For this reason, the recommended configuration is: 121 | 122 | * imx-kernel older than 5.15.5 : Prefer ION allocator. It allocates 123 | uncached memory and provides DMA-BUFs. 124 | * imx-kernel 5.15.5 and newer : Prefer dma-heap allocator and 125 | configure it to use the device node path to the uncached dma-heap. 126 | 127 | If an uncached dma-heap is to be used, use the `--dma-heap-uncached-memory` 128 | configuration switch. The device node path to the uncached dma-heap is given 129 | using the `--dma-heap-device-node-path` configuration switch. 130 | 131 | 132 | Configuring the default allocator 133 | --------------------------------- 134 | 135 | By default, this is the order by which allocators are tried: 136 | 137 | dma-heap -> ION -> DWL -> IPU -> G2D -> PxP 138 | 139 | The first one that is available will be used. Individual allocators can be 140 | enabled or disabled by using the `--with--allocator=` 141 | configuration switches, where ` is the lowercase name of the 142 | allocator, and `` is either `yes`, `no`, or `auto`. `yes` means that 143 | the build configuration fails if the allocator is available. `no` disables 144 | the allocator. `auto` is similar to `yes`, except that build configuration 145 | won't fail if the allocator is not available, it's just that allocator that 146 | will be turned off in the build. 147 | 148 | 149 | API documentation 150 | ----------------- 151 | 152 | The API is documented in this header: 153 | 154 | * `imxdmabuffer/imxdmabuffer.h` : main allocation API 155 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ion_allocator.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_ION_ALLOCATOR_H 2 | #define IMXDMABUFFER_ION_ALLOCATOR_H 3 | 4 | #include "imxdmabuffer.h" 5 | 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | 12 | #define IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_ION_FD (-1) 13 | #define IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_ID_MASK (1 << 0) 14 | #define IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_FLAGS (0) 15 | 16 | 17 | /* Creates a new DMA buffer allocator that uses the modified i.MX Android ION allocator. 18 | * 19 | * The i.MX kernel contains a modified version of the ION allocator, which 20 | * got extra ioctl's added for handling physical addresses. Buffers are shared 21 | * via DMA-BUF file descriptors. 22 | * 23 | * One restriction of ION is that there can not be more more than one client per 24 | * user process. A client is represented by a file descriptor that corresponds to 25 | * the device node /dev/ion . If the process already opened that device node, then 26 | * an error would occur if this function is called, because it would try to open the 27 | * device node, which in turn would mean that there would be an attempt to get a 28 | * second /dev/ion file descriptor in the same process, and as mentioned before, 29 | * this is not permitted. 30 | * 31 | * The solution to this is the ion_fd argument. If set to a negative value, then 32 | * the allocator will open its own internal file descriptor to /dev/ion (and close 33 | * it when it gets destroyed). If however ion_fd is set to a valid file descriptor, 34 | * then the allocator uses it instead and does not try to create its own /dev/ion 35 | * file descriptor (and this external /dev/ion file descriptor is not closed when 36 | * the allocator is destroyed). 37 | * 38 | * @param ion_fd /dev/ion file descriptor to use, or a negative value if the allocator 39 | * shall open and use its own file descriptor. The preprocessor macro 40 | * IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_ION_FD can be used for the latter case. 41 | * @param ion_heap_id_mask Bitmask for selecting ION heaps during allocations. This is 42 | * a bitwise OR combination of heap mask IDs. The IDs are combined by using their 43 | * values as powers of 2. Example: mask = (1 << ID_1) | (1 << ID_2) . 44 | * The IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_ID_MASK macro selects heap ID #0. 45 | * Note however that starting with kernel 4.14.34, this argument is ignored, 46 | * since the heap ID mask is autodetected (all heaps with type ION_HEAP_TYPE_DMA 47 | * are selected). 48 | * @param ion_heap_flags Flags to pass to the ION heap during allocations. The 49 | * preprocessor macro IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_FLAGS can be 50 | * used as a default value (= no flags selected). 51 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 52 | * the pointer refers to is set to an error code from errno.h. If creating 53 | * the allocator succeeds, the integer is not modified. 54 | * @return Pointer to the newly created ION DMA allocator, or NULL in case of an error. 55 | */ 56 | ImxDmaBufferAllocator* imx_dma_buffer_ion_allocator_new(int ion_fd, unsigned int ion_heap_id_mask, unsigned int ion_heap_flags, int *error); 57 | 58 | /* Returns the file descriptor of the opened ION device node this allocator uses. */ 59 | int imx_dma_buffer_ion_allocator_get_ion_fd(ImxDmaBufferAllocator *allocator); 60 | 61 | 62 | /* Allocates a DMA buffer via ION and returns the file descriptor representing the buffer. 63 | * 64 | * This function is useful for assembling a custom allocator that uses ION. This may 65 | * be necessary in frameworks that have their own memory allocation infrastructure 66 | * and already have code in place for mapping/unmapping file descriptors for example. 67 | * Usually it is better to just use the predefined ION imxdmabuffer allocator instead. 68 | * Use imx_dma_buffer_ion_allocator_new() to create one instance. 69 | * 70 | * NOTE: Currently, the alignment argument is not actually doing anything. This is 71 | * because there is no clear way to enforce a minimum physical address alignment over 72 | * ION. In the ION ImxDmaBufferAllocator implementation, it would be possible to use 73 | * this value by allocating a bit more memory than requested & aligning the physical 74 | * and mapped virtual addresses manually. But, this only works if the caller only ever 75 | * accesses the memory block over the imx_dma_buffer_* functions. With ION allocation 76 | * though it is _also_ possible to access it by memory-mapping its DMA-BUF fd, and 77 | * any virtual address that gets memory-mapped that way will _not_ be aligned in the 78 | * same way. This means that there are no options left for enforcing specific memory 79 | * alignment with ION at this stage. Fortunately, allocated pages typically are aligned 80 | * at a page level, meaning an alignment to 4096 bytes. This alignment typically 81 | * fulfills requirement of all practical use cases (since the requirements usually 82 | * are just something like "align to 8 bytes", "align to 16 bytes" etc.) Still, leaving 83 | * this argument in place, in case future ION revisions allow for specifying alignment. 84 | * 85 | * @param ion_fd /dev/ion file descriptor to use. Must not be negative. 86 | * @param size Size of the DMA buffer to allocate, in bytes. Must be greater than 0. 87 | * @param alignment Memory alignment for the newly allocated DMA buffer. 88 | * @param ion_heap_id_mask Bitmask for ION heaps during allocations. For more details, 89 | * see the imx_dma_buffer_ion_allocator_new() ion_heap_id_mask reference. 90 | * @param ion_heap_flags Flags to pass to the ION heap during allocations. 91 | * @param error If this pointer is non-NULL, and if an error occurs, then the 92 | * integer the pointer refers to is set to an error code from errno.h. If 93 | * allocation succeeds, the integer is not modified. 94 | * @return DMA-BUF file descriptor for the allocated DMA buffer, or a negative value 95 | * if allocation failed. 96 | */ 97 | int imx_dma_buffer_ion_allocate_dmabuf(int ion_fd, size_t size, size_t alignment, unsigned int ion_heap_id_mask, unsigned int ion_heap_flags, int *error); 98 | 99 | /* Retrieves a physical address for the DMA buffer with the given DMA-BUF FD. 100 | * 101 | * @param ion_fd /dev/ion file descriptor to use. Must not be negative. 102 | * @param dmabuf_fd DMA-BUF file descriptor to retrieve a physical address for. 103 | * @param error If this pointer is non-NULL, and if an error occurs, then the 104 | * integer the pointer refers to is set to an error code from errno.h. If 105 | * retrieving the physical address succeeds, the integer is not modified. 106 | * @return Physical address to the DMA buffer represented by the DMA-BUF FD, or 107 | * 0 if retrieving the address failed. 108 | */ 109 | imx_physical_address_t imx_dma_buffer_ion_get_physical_address_from_dmabuf_fd(int ion_fd, int dmabuf_fd, int *error); 110 | 111 | 112 | #ifdef __cplusplus 113 | } 114 | #endif 115 | 116 | 117 | #endif /* IMXDMABUFFER_ION_ALLOCATOR_H */ 118 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "imxdmabuffer.h" 5 | #include "imxdmabuffer_priv.h" 6 | 7 | #ifdef IMXDMABUFFER_DMA_HEAP_ALLOCATOR_ENABLED 8 | #include "imxdmabuffer_dma_heap_allocator.h" 9 | #endif 10 | 11 | #ifdef IMXDMABUFFER_ION_ALLOCATOR_ENABLED 12 | #include "imxdmabuffer_ion_allocator.h" 13 | #endif 14 | 15 | #ifdef IMXDMABUFFER_DWL_ALLOCATOR_ENABLED 16 | #include "imxdmabuffer_dwl_allocator.h" 17 | #endif 18 | 19 | #ifdef IMXDMABUFFER_IPU_ALLOCATOR_ENABLED 20 | #include "imxdmabuffer_ipu_allocator.h" 21 | #endif 22 | 23 | #ifdef IMXDMABUFFER_G2D_ALLOCATOR_ENABLED 24 | #include "imxdmabuffer_g2d_allocator.h" 25 | #endif 26 | 27 | #ifdef IMXDMABUFFER_PXP_ALLOCATOR_ENABLED 28 | #include "imxdmabuffer_pxp_allocator.h" 29 | #endif 30 | 31 | 32 | ImxDmaBufferAllocator* imx_dma_buffer_allocator_new(int *error) 33 | { 34 | #ifdef IMXDMABUFFER_DMA_HEAP_ALLOCATOR_ENABLED 35 | return imx_dma_buffer_dma_heap_allocator_new( 36 | -1, 37 | IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS, 38 | IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS, 39 | error 40 | ); 41 | #endif 42 | #ifdef IMXDMABUFFER_ION_ALLOCATOR_ENABLED 43 | return imx_dma_buffer_ion_allocator_new( 44 | IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_ION_FD, 45 | IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_ID_MASK, 46 | IMX_DMA_BUFFER_ION_ALLOCATOR_DEFAULT_HEAP_FLAGS, 47 | error 48 | ); 49 | #endif 50 | #ifdef IMXDMABUFFER_DWL_ALLOCATOR_ENABLED 51 | return imx_dma_buffer_dwl_allocator_new(error); 52 | #endif 53 | #ifdef IMXDMABUFFER_IPU_ALLOCATOR_ENABLED 54 | return imx_dma_buffer_ipu_allocator_new(IMX_DMA_BUFFER_IPU_ALLOCATOR_DEFAULT_IPU_FD, error); 55 | #endif 56 | #ifdef IMXDMABUFFER_G2D_ALLOCATOR_ENABLED 57 | return imx_dma_buffer_g2d_allocator_new(); 58 | #endif 59 | #ifdef IMXDMABUFFER_PXP_ALLOCATOR_ENABLED 60 | return imx_dma_buffer_pxp_allocator_new(IMX_DMA_BUFFER_PXP_ALLOCATOR_DEFAULT_PXP_FD, error); 61 | #endif 62 | } 63 | 64 | 65 | void imx_dma_buffer_allocator_destroy(ImxDmaBufferAllocator *allocator) 66 | { 67 | assert(allocator != NULL); 68 | assert(allocator->destroy != NULL); 69 | allocator->destroy(allocator); 70 | } 71 | 72 | 73 | ImxDmaBuffer* imx_dma_buffer_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 74 | { 75 | assert(allocator != NULL); 76 | assert(allocator->allocate != NULL); 77 | assert(size >= 1); 78 | return allocator->allocate(allocator, size, alignment, error); 79 | } 80 | 81 | 82 | void imx_dma_buffer_deallocate(ImxDmaBuffer *buffer) 83 | { 84 | assert(buffer != NULL); 85 | assert(buffer->allocator != NULL); 86 | assert(buffer->allocator->deallocate != NULL); 87 | buffer->allocator->deallocate(buffer->allocator, buffer); 88 | } 89 | 90 | 91 | uint8_t* imx_dma_buffer_map(ImxDmaBuffer *buffer, unsigned int flags, int *error) 92 | { 93 | assert(buffer != NULL); 94 | assert(buffer->allocator != NULL); 95 | assert(buffer->allocator->map != NULL); 96 | return buffer->allocator->map(buffer->allocator, buffer, flags, error); 97 | } 98 | 99 | 100 | void imx_dma_buffer_unmap(ImxDmaBuffer *buffer) 101 | { 102 | assert(buffer != NULL); 103 | assert(buffer->allocator != NULL); 104 | assert(buffer->allocator->unmap != NULL); 105 | buffer->allocator->unmap(buffer->allocator, buffer); 106 | } 107 | 108 | 109 | void imx_dma_buffer_start_sync_session(ImxDmaBuffer *buffer) 110 | { 111 | assert(buffer != NULL); 112 | assert(buffer->allocator != NULL); 113 | assert(buffer->allocator->start_sync_session != NULL); 114 | buffer->allocator->start_sync_session(buffer->allocator, buffer); 115 | } 116 | 117 | 118 | void imx_dma_buffer_stop_sync_session(ImxDmaBuffer *buffer) 119 | { 120 | assert(buffer != NULL); 121 | assert(buffer->allocator != NULL); 122 | assert(buffer->allocator->stop_sync_session != NULL); 123 | buffer->allocator->stop_sync_session(buffer->allocator, buffer); 124 | } 125 | 126 | 127 | imx_physical_address_t imx_dma_buffer_get_physical_address(ImxDmaBuffer *buffer) 128 | { 129 | assert(buffer != NULL); 130 | assert(buffer->allocator != NULL); 131 | assert(buffer->allocator->get_physical_address != NULL); 132 | return buffer->allocator->get_physical_address(buffer->allocator, buffer); 133 | } 134 | 135 | 136 | int imx_dma_buffer_get_fd(ImxDmaBuffer *buffer) 137 | { 138 | assert(buffer != NULL); 139 | assert(buffer->allocator != NULL); 140 | return (buffer->allocator->get_fd != NULL) ? buffer->allocator->get_fd(buffer->allocator, buffer) : -1; 141 | } 142 | 143 | 144 | size_t imx_dma_buffer_get_size(ImxDmaBuffer *buffer) 145 | { 146 | assert(buffer != NULL); 147 | assert(buffer->allocator != NULL); 148 | return buffer->allocator->get_size(buffer->allocator, buffer); 149 | } 150 | 151 | 152 | 153 | 154 | 155 | static ImxDmaBuffer* wrapped_dma_buffer_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 156 | { 157 | /* This allocator is used for wrapping existing DMA memory. Therefore, 158 | * it doesn't actually allocate anything. This also means that the 159 | * NULL return value does not actually indicate an error. This 160 | * inconsistency is okay, since the allocator will never be accessible 161 | * from the outside. */ 162 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 163 | IMX_DMA_BUFFER_UNUSED_PARAM(size); 164 | IMX_DMA_BUFFER_UNUSED_PARAM(alignment); 165 | IMX_DMA_BUFFER_UNUSED_PARAM(error); 166 | return NULL; 167 | } 168 | 169 | 170 | static void wrapped_dma_buffer_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 171 | { 172 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 173 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 174 | } 175 | 176 | 177 | static uint8_t* wrapped_dma_buffer_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 178 | { 179 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 180 | ImxWrappedDmaBuffer *wrapped_buf = (ImxWrappedDmaBuffer *)(buffer); 181 | return (wrapped_buf->map != NULL) ? wrapped_buf->map(wrapped_buf, flags, error) : NULL; 182 | } 183 | 184 | 185 | static void wrapped_dma_buffer_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 186 | { 187 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 188 | ImxWrappedDmaBuffer *wrapped_buf = (ImxWrappedDmaBuffer *)(buffer); 189 | if (wrapped_buf->unmap != NULL) 190 | wrapped_buf->unmap(wrapped_buf); 191 | } 192 | 193 | 194 | static imx_physical_address_t wrapped_dma_buffer_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 195 | { 196 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 197 | return ((ImxWrappedDmaBuffer *)(buffer))->physical_address; 198 | } 199 | 200 | 201 | static int wrapped_dma_buffer_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 202 | { 203 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 204 | return ((ImxWrappedDmaBuffer *)(buffer))->fd; 205 | } 206 | 207 | 208 | static size_t wrapped_dma_buffer_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 209 | { 210 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 211 | return ((ImxWrappedDmaBuffer *)(buffer))->size; 212 | } 213 | 214 | 215 | static ImxDmaBufferAllocator wrapped_dma_buffer_allocator = 216 | { 217 | NULL, /* the wrapped allocator is static and internal, so a destroy() function makes no sense */ 218 | wrapped_dma_buffer_allocator_allocate, 219 | wrapped_dma_buffer_allocator_deallocate, 220 | wrapped_dma_buffer_allocator_map, 221 | wrapped_dma_buffer_allocator_unmap, 222 | imx_dma_buffer_noop_start_sync_session_func, 223 | imx_dma_buffer_noop_stop_sync_session_func, 224 | wrapped_dma_buffer_allocator_get_physical_address, 225 | wrapped_dma_buffer_allocator_get_fd, 226 | wrapped_dma_buffer_allocator_get_size, 227 | { 0, } 228 | }; 229 | 230 | 231 | void imx_dma_buffer_init_wrapped_buffer(ImxWrappedDmaBuffer *buffer) 232 | { 233 | memset(buffer, 0, sizeof(ImxWrappedDmaBuffer)); 234 | buffer->parent.allocator = &wrapped_dma_buffer_allocator; 235 | } 236 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_g2d_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | 7 | #include 8 | #include "imxdmabuffer.h" 9 | #include "imxdmabuffer_priv.h" 10 | #include "imxdmabuffer_g2d_allocator.h" 11 | 12 | 13 | typedef struct 14 | { 15 | ImxDmaBuffer parent; 16 | 17 | size_t actual_size; 18 | size_t size; 19 | uint8_t* aligned_virtual_address; 20 | imx_physical_address_t aligned_physical_address; 21 | 22 | /* These are kept around to catch invalid redundant mapping attempts. 23 | * It is good practice to check for those even if the underlying 24 | * allocator (G2D in this case) does not actually need any mapping 25 | * or mapping flags. */ 26 | unsigned int map_flags; 27 | int mapping_refcount; 28 | 29 | struct g2d_buf *buf; 30 | } 31 | ImxDmaBufferG2dBuffer; 32 | 33 | 34 | typedef struct 35 | { 36 | ImxDmaBufferAllocator parent; 37 | } 38 | ImxDmaBufferG2dAllocator; 39 | 40 | 41 | static void imx_dma_buffer_g2d_allocator_destroy(ImxDmaBufferAllocator *allocator); 42 | static ImxDmaBuffer* imx_dma_buffer_g2d_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 43 | static void imx_dma_buffer_g2d_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 44 | static uint8_t* imx_dma_buffer_g2d_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 45 | static void imx_dma_buffer_g2d_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 46 | static imx_physical_address_t imx_dma_buffer_g2d_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 47 | static int imx_dma_buffer_g2d_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 48 | static size_t imx_dma_buffer_g2d_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 49 | 50 | 51 | static void imx_dma_buffer_g2d_allocator_destroy(ImxDmaBufferAllocator *allocator) 52 | { 53 | ImxDmaBufferG2dAllocator *imx_g2d_allocator = (ImxDmaBufferG2dAllocator *)allocator; 54 | free(imx_g2d_allocator); 55 | } 56 | 57 | 58 | static ImxDmaBuffer* imx_dma_buffer_g2d_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 59 | { 60 | size_t actual_size; 61 | ImxDmaBufferG2dBuffer *imx_g2d_buffer; 62 | ImxDmaBufferG2dAllocator *imx_g2d_allocator = (ImxDmaBufferG2dAllocator *)allocator; 63 | 64 | assert(imx_g2d_allocator != NULL); 65 | 66 | /* The G2D allocator does not have a parameter for alignment, so we resort to a trick. 67 | * We allocate some extra bytes. Then, once allocated, we take the returned physical 68 | * address, and add an offset to it to make sure the address is aligned as requested. 69 | * This modified physical address is stored in aligned_physical_address . The maximum 70 | * offset equals the alignment size, which is why we increase the allocation size by 71 | * the alignment amount. Alignment of 0 or 1 however means "no alignment", so we don't 72 | * actually do this trick in that case. */ 73 | actual_size = size; 74 | if (alignment == 0) 75 | alignment = 1; 76 | if (alignment > 1) 77 | actual_size += alignment; 78 | 79 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 80 | imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)malloc(sizeof(ImxDmaBufferG2dBuffer)); 81 | imx_g2d_buffer->parent.allocator = allocator; 82 | imx_g2d_buffer->actual_size = actual_size; 83 | imx_g2d_buffer->size = size; 84 | imx_g2d_buffer->mapping_refcount = 0; 85 | 86 | /* Perform the actual allocation. */ 87 | if ((imx_g2d_buffer->buf = g2d_alloc(actual_size, 0)) == NULL) 88 | { 89 | if (error != NULL) 90 | *error = ENOMEM; 91 | goto cleanup; 92 | } 93 | 94 | /* Align the returned address. We also align the virtual address here, which isn't 95 | * strictly necessary (alignment is only required for the physical address), but 96 | * we do it regardless for sake of consistency. */ 97 | imx_g2d_buffer->aligned_virtual_address = (uint8_t *)IMX_DMA_BUFFER_ALIGN_VAL_TO((uint8_t *)(imx_g2d_buffer->buf->buf_vaddr), alignment); 98 | imx_g2d_buffer->aligned_physical_address = (imx_physical_address_t)IMX_DMA_BUFFER_ALIGN_VAL_TO((imx_physical_address_t)(imx_g2d_buffer->buf->buf_paddr), alignment); 99 | 100 | finish: 101 | return (ImxDmaBuffer *)imx_g2d_buffer; 102 | 103 | cleanup: 104 | free(imx_g2d_buffer); 105 | imx_g2d_buffer = NULL; 106 | goto finish; 107 | } 108 | 109 | 110 | static void imx_dma_buffer_g2d_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 111 | { 112 | ImxDmaBufferG2dBuffer *imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)buffer; 113 | ImxDmaBufferG2dAllocator *imx_g2d_allocator = (ImxDmaBufferG2dAllocator *)allocator; 114 | 115 | assert(imx_g2d_allocator != NULL); 116 | assert(imx_g2d_buffer != NULL); 117 | assert(imx_g2d_buffer->buf != 0); 118 | 119 | g2d_free(imx_g2d_buffer->buf); 120 | 121 | free(imx_g2d_buffer); 122 | } 123 | 124 | 125 | static uint8_t* imx_dma_buffer_g2d_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 126 | { 127 | ImxDmaBufferG2dBuffer *imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)buffer; 128 | 129 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 130 | IMX_DMA_BUFFER_UNUSED_PARAM(error); 131 | 132 | assert(imx_g2d_buffer != NULL); 133 | 134 | if (flags == 0) 135 | flags = IMX_DMA_BUFFER_MAPPING_FLAG_READ | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE; 136 | 137 | /* As mentioned above, we keep the refcount and flags around 138 | * just to check correct API usage. Do this check here. 139 | * (Other allocators perform more steps than this.) */ 140 | if (imx_g2d_buffer->mapping_refcount > 0) 141 | { 142 | assert((imx_g2d_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 143 | imx_g2d_buffer->mapping_refcount++; 144 | } 145 | else 146 | { 147 | imx_g2d_buffer->map_flags = flags; 148 | imx_g2d_buffer->mapping_refcount = 1; 149 | } 150 | 151 | /* G2D allocated memory is always mapped, so we just returned the aligned virtual 152 | * address we stored in imx_dma_buffer_g2d_allocator_allocate(). */ 153 | 154 | return imx_g2d_buffer->aligned_virtual_address; 155 | } 156 | 157 | 158 | static void imx_dma_buffer_g2d_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 159 | { 160 | ImxDmaBufferG2dBuffer *imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)buffer; 161 | 162 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 163 | 164 | if (imx_g2d_buffer->mapping_refcount > 0) 165 | imx_g2d_buffer->mapping_refcount--; 166 | 167 | /* G2D allocated memory is always mapped, so we don't do anything here. */ 168 | } 169 | 170 | 171 | static imx_physical_address_t imx_dma_buffer_g2d_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 172 | { 173 | ImxDmaBufferG2dBuffer *imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)buffer; 174 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 175 | assert(imx_g2d_buffer != NULL); 176 | return imx_g2d_buffer->aligned_physical_address; 177 | } 178 | 179 | 180 | static int imx_dma_buffer_g2d_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 181 | { 182 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 183 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 184 | return -1; 185 | } 186 | 187 | 188 | static size_t imx_dma_buffer_g2d_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 189 | { 190 | ImxDmaBufferG2dBuffer *imx_g2d_buffer = (ImxDmaBufferG2dBuffer *)buffer; 191 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 192 | assert(imx_g2d_buffer != NULL); 193 | return imx_g2d_buffer->size; 194 | } 195 | 196 | 197 | ImxDmaBufferAllocator* imx_dma_buffer_g2d_allocator_new(void) 198 | { 199 | ImxDmaBufferG2dAllocator *imx_g2d_allocator = (ImxDmaBufferG2dAllocator *)malloc(sizeof(ImxDmaBufferG2dAllocator)); 200 | imx_g2d_allocator->parent.destroy = imx_dma_buffer_g2d_allocator_destroy; 201 | imx_g2d_allocator->parent.allocate = imx_dma_buffer_g2d_allocator_allocate; 202 | imx_g2d_allocator->parent.deallocate = imx_dma_buffer_g2d_allocator_deallocate; 203 | imx_g2d_allocator->parent.map = imx_dma_buffer_g2d_allocator_map; 204 | imx_g2d_allocator->parent.unmap = imx_dma_buffer_g2d_allocator_unmap; 205 | imx_g2d_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 206 | imx_g2d_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 207 | imx_g2d_allocator->parent.get_physical_address = imx_dma_buffer_g2d_allocator_get_physical_address; 208 | imx_g2d_allocator->parent.get_fd = imx_dma_buffer_g2d_allocator_get_fd; 209 | imx_g2d_allocator->parent.get_size = imx_dma_buffer_g2d_allocator_get_size; 210 | 211 | return (ImxDmaBufferAllocator*)imx_g2d_allocator; 212 | } 213 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ipu_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include "imxdmabuffer.h" 11 | #include "imxdmabuffer_priv.h" 12 | #include "imxdmabuffer_ipu_allocator.h" 13 | #include "imxdmabuffer_ipu_priv.h" 14 | 15 | 16 | typedef struct 17 | { 18 | ImxDmaBuffer parent; 19 | 20 | imx_physical_address_t physical_address; 21 | 22 | size_t actual_size; 23 | size_t size; 24 | uint8_t* mapped_virtual_address; 25 | imx_physical_address_t aligned_physical_address; 26 | unsigned int map_flags; 27 | 28 | int mapping_refcount; 29 | } 30 | ImxDmaBufferIpuBuffer; 31 | 32 | 33 | typedef struct 34 | { 35 | ImxDmaBufferAllocator parent; 36 | int ipu_fd; 37 | int ipu_fd_is_internal; 38 | } 39 | ImxDmaBufferIpuAllocator; 40 | 41 | 42 | static void imx_dma_buffer_ipu_allocator_destroy(ImxDmaBufferAllocator *allocator); 43 | static ImxDmaBuffer* imx_dma_buffer_ipu_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 44 | static void imx_dma_buffer_ipu_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 45 | static uint8_t* imx_dma_buffer_ipu_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 46 | static void imx_dma_buffer_ipu_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 47 | static imx_physical_address_t imx_dma_buffer_ipu_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 48 | static int imx_dma_buffer_ipu_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 49 | static size_t imx_dma_buffer_ipu_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 50 | 51 | 52 | static void imx_dma_buffer_ipu_allocator_destroy(ImxDmaBufferAllocator *allocator) 53 | { 54 | ImxDmaBufferIpuAllocator *imx_ipu_allocator = (ImxDmaBufferIpuAllocator *)allocator; 55 | 56 | assert(imx_ipu_allocator != NULL); 57 | 58 | if ((imx_ipu_allocator->ipu_fd >= 0) && imx_ipu_allocator->ipu_fd_is_internal) 59 | { 60 | close(imx_ipu_allocator->ipu_fd); 61 | imx_ipu_allocator->ipu_fd = -1; 62 | } 63 | 64 | free(imx_ipu_allocator); 65 | } 66 | 67 | 68 | static ImxDmaBuffer* imx_dma_buffer_ipu_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 69 | { 70 | size_t actual_size; 71 | imx_physical_address_t physical_address; 72 | ImxDmaBufferIpuBuffer *imx_ipu_buffer; 73 | ImxDmaBufferIpuAllocator *imx_ipu_allocator = (ImxDmaBufferIpuAllocator *)allocator; 74 | 75 | assert(imx_ipu_allocator != NULL); 76 | assert(imx_ipu_allocator->ipu_fd >= 0); 77 | 78 | /* The IPU allocator does not have a parameter for alignment, so we resort to a trick. 79 | * We allocate some extra bytes. Then, once allocated, we take the returned physical 80 | * address, and add an offset to it to make sure the address is aligned as requested. 81 | * This modified physical address is stored in aligned_physical_address . The maximum 82 | * offset equals the alignment size, which is why we increase the allocation size by 83 | * the alignment amount. Alignment of 0 or 1 however means "no alignment", so we don't 84 | * actually do this trick in that case. */ 85 | actual_size = size; 86 | if (alignment == 0) 87 | alignment = 1; 88 | if (alignment > 1) 89 | actual_size += alignment; 90 | 91 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 92 | imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)malloc(sizeof(ImxDmaBufferIpuBuffer)); 93 | imx_ipu_buffer->parent.allocator = allocator; 94 | imx_ipu_buffer->actual_size = actual_size; 95 | imx_ipu_buffer->size = size; 96 | imx_ipu_buffer->mapped_virtual_address = NULL; 97 | imx_ipu_buffer->mapping_refcount = 0; 98 | 99 | /* Perform the actual allocation. */ 100 | if ((physical_address = imx_dma_buffer_ipu_allocate(imx_ipu_allocator->ipu_fd, actual_size, error)) == 0) 101 | goto cleanup; 102 | imx_ipu_buffer->physical_address = physical_address; 103 | 104 | /* Align the physical address. */ 105 | imx_ipu_buffer->aligned_physical_address = (imx_physical_address_t)IMX_DMA_BUFFER_ALIGN_VAL_TO(physical_address, alignment); 106 | 107 | finish: 108 | return (ImxDmaBuffer *)imx_ipu_buffer; 109 | 110 | cleanup: 111 | free(imx_ipu_buffer); 112 | imx_ipu_buffer = NULL; 113 | goto finish; 114 | } 115 | 116 | 117 | static void imx_dma_buffer_ipu_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 118 | { 119 | ImxDmaBufferIpuBuffer *imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)buffer; 120 | ImxDmaBufferIpuAllocator *imx_ipu_allocator = (ImxDmaBufferIpuAllocator *)allocator; 121 | 122 | assert(imx_ipu_allocator != NULL); 123 | assert(imx_ipu_allocator->ipu_fd >= 0); 124 | assert(imx_ipu_buffer != NULL); 125 | assert(imx_ipu_buffer->physical_address != 0); 126 | 127 | if (imx_ipu_buffer->mapped_virtual_address != NULL) 128 | { 129 | /* Set mapping_refcount to 1 to force an 130 | * imx_dma_buffer_ipu_allocator_unmap() to actually unmap the buffer. */ 131 | imx_ipu_buffer->mapping_refcount = 1; 132 | imx_dma_buffer_ipu_allocator_unmap(allocator, buffer); 133 | } 134 | 135 | imx_dma_buffer_ipu_deallocate(imx_ipu_allocator->ipu_fd, imx_ipu_buffer->physical_address); 136 | 137 | free(imx_ipu_buffer); 138 | } 139 | 140 | 141 | static uint8_t* imx_dma_buffer_ipu_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 142 | { 143 | ImxDmaBufferIpuBuffer *imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)buffer; 144 | ImxDmaBufferIpuAllocator *imx_ipu_allocator = (ImxDmaBufferIpuAllocator *)allocator; 145 | 146 | assert(imx_ipu_allocator != NULL); 147 | assert(imx_ipu_allocator->ipu_fd >= 0); 148 | assert(imx_ipu_buffer != NULL); 149 | assert(imx_ipu_buffer->physical_address != 0); 150 | 151 | if (imx_ipu_buffer->mapped_virtual_address != NULL) 152 | { 153 | assert((imx_ipu_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 154 | 155 | /* Buffer is already mapped. Just increment the 156 | * refcount and otherwise do nothing. */ 157 | imx_ipu_buffer->mapping_refcount++; 158 | } 159 | else 160 | { 161 | /* Buffer is not mapped yet. Call mmap() to perform 162 | * the memory mapping. */ 163 | 164 | int mmap_prot = 0; 165 | int mmap_flags = MAP_SHARED; 166 | void *virtual_address; 167 | 168 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? PROT_READ : 0; 169 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? PROT_WRITE : 0; 170 | 171 | imx_ipu_buffer->map_flags = flags; 172 | 173 | virtual_address = mmap(0, imx_ipu_buffer->size, mmap_prot, mmap_flags, imx_ipu_allocator->ipu_fd, imx_ipu_buffer->physical_address); 174 | if (virtual_address == MAP_FAILED) 175 | { 176 | if (error != NULL) 177 | *error = errno; 178 | } 179 | else 180 | { 181 | imx_ipu_buffer->mapping_refcount = 1; 182 | imx_ipu_buffer->mapped_virtual_address = virtual_address; 183 | } 184 | } 185 | 186 | return imx_ipu_buffer->mapped_virtual_address; 187 | } 188 | 189 | 190 | static void imx_dma_buffer_ipu_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 191 | { 192 | ImxDmaBufferIpuBuffer *imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)buffer; 193 | 194 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 195 | 196 | assert(imx_ipu_buffer != NULL); 197 | assert(imx_ipu_buffer->physical_address != 0); 198 | 199 | if (imx_ipu_buffer->mapped_virtual_address == NULL) 200 | return; 201 | 202 | imx_ipu_buffer->mapping_refcount--; 203 | if (imx_ipu_buffer->mapping_refcount != 0) 204 | return; 205 | 206 | munmap((void *)(imx_ipu_buffer->mapped_virtual_address), imx_ipu_buffer->size); 207 | imx_ipu_buffer->mapped_virtual_address = NULL; 208 | } 209 | 210 | 211 | static imx_physical_address_t imx_dma_buffer_ipu_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 212 | { 213 | ImxDmaBufferIpuBuffer *imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)buffer; 214 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 215 | assert(imx_ipu_buffer != NULL); 216 | return imx_ipu_buffer->aligned_physical_address; 217 | } 218 | 219 | 220 | static int imx_dma_buffer_ipu_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 221 | { 222 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 223 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 224 | return -1; 225 | } 226 | 227 | 228 | static size_t imx_dma_buffer_ipu_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 229 | { 230 | ImxDmaBufferIpuBuffer *imx_ipu_buffer = (ImxDmaBufferIpuBuffer *)buffer; 231 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 232 | assert(imx_ipu_buffer != NULL); 233 | return imx_ipu_buffer->size; 234 | } 235 | 236 | 237 | ImxDmaBufferAllocator* imx_dma_buffer_ipu_allocator_new(int ipu_fd, int *error) 238 | { 239 | ImxDmaBufferIpuAllocator *imx_ipu_allocator = (ImxDmaBufferIpuAllocator *)malloc(sizeof(ImxDmaBufferIpuAllocator)); 240 | imx_ipu_allocator->parent.destroy = imx_dma_buffer_ipu_allocator_destroy; 241 | imx_ipu_allocator->parent.allocate = imx_dma_buffer_ipu_allocator_allocate; 242 | imx_ipu_allocator->parent.deallocate = imx_dma_buffer_ipu_allocator_deallocate; 243 | imx_ipu_allocator->parent.map = imx_dma_buffer_ipu_allocator_map; 244 | imx_ipu_allocator->parent.unmap = imx_dma_buffer_ipu_allocator_unmap; 245 | imx_ipu_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 246 | imx_ipu_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 247 | imx_ipu_allocator->parent.get_physical_address = imx_dma_buffer_ipu_allocator_get_physical_address; 248 | imx_ipu_allocator->parent.get_fd = imx_dma_buffer_ipu_allocator_get_fd; 249 | imx_ipu_allocator->parent.get_size = imx_dma_buffer_ipu_allocator_get_size; 250 | imx_ipu_allocator->ipu_fd = ipu_fd; 251 | imx_ipu_allocator->ipu_fd_is_internal = (ipu_fd < 0); 252 | 253 | if (ipu_fd < 0) 254 | { 255 | imx_ipu_allocator->ipu_fd = open("/dev/mxc_ipu", O_RDWR, 0); 256 | if (imx_ipu_allocator->ipu_fd < 0) 257 | { 258 | if (error != NULL) 259 | *error = errno; 260 | free(imx_ipu_allocator); 261 | return NULL; 262 | } 263 | } 264 | 265 | return (ImxDmaBufferAllocator*)imx_ipu_allocator; 266 | } 267 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_pxp_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #include 12 | #include "imxdmabuffer.h" 13 | #include "imxdmabuffer_priv.h" 14 | #include "imxdmabuffer_pxp_allocator.h" 15 | 16 | 17 | typedef struct 18 | { 19 | ImxDmaBuffer parent; 20 | 21 | imx_physical_address_t physical_address; 22 | 23 | size_t actual_size; 24 | size_t size; 25 | uint8_t* mapped_virtual_address; 26 | imx_physical_address_t aligned_physical_address; 27 | unsigned int map_flags; 28 | 29 | int mapping_refcount; 30 | 31 | struct pxp_mem_desc mem_desc; 32 | } 33 | ImxDmaBufferPxpBuffer; 34 | 35 | 36 | typedef struct 37 | { 38 | ImxDmaBufferAllocator parent; 39 | int pxp_fd; 40 | int pxp_fd_is_internal; 41 | } 42 | ImxDmaBufferPxpAllocator; 43 | 44 | 45 | static void imx_dma_buffer_pxp_allocator_destroy(ImxDmaBufferAllocator *allocator); 46 | static ImxDmaBuffer* imx_dma_buffer_pxp_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 47 | static void imx_dma_buffer_pxp_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 48 | static uint8_t* imx_dma_buffer_pxp_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 49 | static void imx_dma_buffer_pxp_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 50 | static imx_physical_address_t imx_dma_buffer_pxp_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 51 | static int imx_dma_buffer_pxp_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 52 | static size_t imx_dma_buffer_pxp_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 53 | 54 | 55 | static void imx_dma_buffer_pxp_allocator_destroy(ImxDmaBufferAllocator *allocator) 56 | { 57 | ImxDmaBufferPxpAllocator *imx_pxp_allocator = (ImxDmaBufferPxpAllocator *)allocator; 58 | 59 | assert(imx_pxp_allocator != NULL); 60 | 61 | if ((imx_pxp_allocator->pxp_fd >= 0) && imx_pxp_allocator->pxp_fd_is_internal) 62 | { 63 | close(imx_pxp_allocator->pxp_fd); 64 | imx_pxp_allocator->pxp_fd = -1; 65 | } 66 | 67 | free(imx_pxp_allocator); 68 | } 69 | 70 | 71 | static ImxDmaBuffer* imx_dma_buffer_pxp_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 72 | { 73 | size_t actual_size; 74 | ImxDmaBufferPxpBuffer *imx_pxp_buffer; 75 | ImxDmaBufferPxpAllocator *imx_pxp_allocator = (ImxDmaBufferPxpAllocator *)allocator; 76 | 77 | assert(imx_pxp_allocator != NULL); 78 | assert(imx_pxp_allocator->pxp_fd >= 0); 79 | 80 | /* The PXP allocator does not have a parameter for alignment, so we resort to a trick. 81 | * We allocate some extra bytes. Then, once allocated, we take the returned physical 82 | * address, and add an offset to it to make sure the address is aligned as requested. 83 | * This modified physical address is stored in aligned_physical_address . The maximum 84 | * offset equals the alignment size, which is why we increase the allocation size by 85 | * the alignment amount. Alignment of 0 or 1 however means "no alignment", so we don't 86 | * actually do this trick in that case. */ 87 | actual_size = size; 88 | if (alignment == 0) 89 | alignment = 1; 90 | if (alignment > 1) 91 | actual_size += alignment; 92 | 93 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 94 | imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)malloc(sizeof(ImxDmaBufferPxpBuffer)); 95 | imx_pxp_buffer->parent.allocator = allocator; 96 | imx_pxp_buffer->actual_size = actual_size; 97 | imx_pxp_buffer->size = size; 98 | imx_pxp_buffer->mapped_virtual_address = NULL; 99 | imx_pxp_buffer->mapping_refcount = 0; 100 | 101 | /* Perform the actual allocation. */ 102 | imx_pxp_buffer->mem_desc.size = size; 103 | imx_pxp_buffer->mem_desc.mtype = MEMORY_TYPE_WC; /* TODO: Use MEMORY_TYPE_UNCACHED instead? */ 104 | if (ioctl(imx_pxp_allocator->pxp_fd, PXP_IOC_GET_PHYMEM, &(imx_pxp_buffer->mem_desc)) != 0) 105 | { 106 | if (error != NULL) 107 | *error = errno; 108 | goto cleanup; 109 | } 110 | imx_pxp_buffer->physical_address = (imx_physical_address_t)((imx_pxp_buffer->mem_desc.phys_addr)); 111 | 112 | /* Align the physical address. */ 113 | imx_pxp_buffer->aligned_physical_address = (imx_physical_address_t)IMX_DMA_BUFFER_ALIGN_VAL_TO(imx_pxp_buffer->physical_address, alignment); 114 | 115 | finish: 116 | return (ImxDmaBuffer *)imx_pxp_buffer; 117 | 118 | cleanup: 119 | free(imx_pxp_buffer); 120 | imx_pxp_buffer = NULL; 121 | goto finish; 122 | } 123 | 124 | 125 | static void imx_dma_buffer_pxp_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 126 | { 127 | ImxDmaBufferPxpBuffer *imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)buffer; 128 | ImxDmaBufferPxpAllocator *imx_pxp_allocator = (ImxDmaBufferPxpAllocator *)allocator; 129 | 130 | assert(imx_pxp_allocator != NULL); 131 | assert(imx_pxp_allocator->pxp_fd >= 0); 132 | assert(imx_pxp_buffer != NULL); 133 | assert(imx_pxp_buffer->physical_address != 0); 134 | 135 | if (imx_pxp_buffer->mapped_virtual_address != NULL) 136 | { 137 | /* Set mapping_refcount to 1 to force an 138 | * imx_dma_buffer_pxp_allocator_unmap() to actually unmap the buffer. */ 139 | imx_pxp_buffer->mapping_refcount = 1; 140 | imx_dma_buffer_pxp_allocator_unmap(allocator, buffer); 141 | } 142 | 143 | ioctl(imx_pxp_allocator->pxp_fd, PXP_IOC_PUT_PHYMEM, &(imx_pxp_buffer->mem_desc)); 144 | 145 | free(imx_pxp_buffer); 146 | } 147 | 148 | 149 | static uint8_t* imx_dma_buffer_pxp_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 150 | { 151 | ImxDmaBufferPxpBuffer *imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)buffer; 152 | ImxDmaBufferPxpAllocator *imx_pxp_allocator = (ImxDmaBufferPxpAllocator *)allocator; 153 | 154 | assert(imx_pxp_allocator != NULL); 155 | assert(imx_pxp_allocator->pxp_fd >= 0); 156 | assert(imx_pxp_buffer != NULL); 157 | assert(imx_pxp_buffer->physical_address != 0); 158 | 159 | if (imx_pxp_buffer->mapped_virtual_address != NULL) 160 | { 161 | assert((imx_pxp_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 162 | 163 | /* Buffer is already mapped. Just increment the 164 | * refcount and otherwise do nothing. */ 165 | imx_pxp_buffer->mapping_refcount++; 166 | } 167 | else 168 | { 169 | /* Buffer is not mapped yet. Call mmap() to perform 170 | * the memory mapping. */ 171 | 172 | int mmap_prot = 0; 173 | int mmap_flags = MAP_SHARED; 174 | void *virtual_address; 175 | 176 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? PROT_READ : 0; 177 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? PROT_WRITE : 0; 178 | 179 | imx_pxp_buffer->map_flags = flags; 180 | 181 | virtual_address = mmap(0, imx_pxp_buffer->size, mmap_prot, mmap_flags, imx_pxp_allocator->pxp_fd, imx_pxp_buffer->physical_address); 182 | if (virtual_address == MAP_FAILED) 183 | { 184 | if (error != NULL) 185 | *error = errno; 186 | } 187 | else 188 | { 189 | imx_pxp_buffer->mapping_refcount = 1; 190 | imx_pxp_buffer->mapped_virtual_address = virtual_address; 191 | } 192 | } 193 | 194 | return imx_pxp_buffer->mapped_virtual_address; 195 | } 196 | 197 | 198 | static void imx_dma_buffer_pxp_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 199 | { 200 | ImxDmaBufferPxpBuffer *imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)buffer; 201 | 202 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 203 | 204 | assert(imx_pxp_buffer != NULL); 205 | assert(imx_pxp_buffer->physical_address != 0); 206 | 207 | if (imx_pxp_buffer->mapped_virtual_address == NULL) 208 | return; 209 | 210 | imx_pxp_buffer->mapping_refcount--; 211 | if (imx_pxp_buffer->mapping_refcount != 0) 212 | return; 213 | 214 | munmap((void *)(imx_pxp_buffer->mapped_virtual_address), imx_pxp_buffer->size); 215 | imx_pxp_buffer->mapped_virtual_address = NULL; 216 | } 217 | 218 | 219 | static imx_physical_address_t imx_dma_buffer_pxp_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 220 | { 221 | ImxDmaBufferPxpBuffer *imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)buffer; 222 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 223 | assert(imx_pxp_buffer != NULL); 224 | return imx_pxp_buffer->aligned_physical_address; 225 | } 226 | 227 | 228 | static int imx_dma_buffer_pxp_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 229 | { 230 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 231 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 232 | return -1; 233 | } 234 | 235 | 236 | static size_t imx_dma_buffer_pxp_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 237 | { 238 | ImxDmaBufferPxpBuffer *imx_pxp_buffer = (ImxDmaBufferPxpBuffer *)buffer; 239 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 240 | assert(imx_pxp_buffer != NULL); 241 | return imx_pxp_buffer->size; 242 | } 243 | 244 | 245 | ImxDmaBufferAllocator* imx_dma_buffer_pxp_allocator_new(int pxp_fd, int *error) 246 | { 247 | ImxDmaBufferPxpAllocator *imx_pxp_allocator = (ImxDmaBufferPxpAllocator *)malloc(sizeof(ImxDmaBufferPxpAllocator)); 248 | imx_pxp_allocator->parent.destroy = imx_dma_buffer_pxp_allocator_destroy; 249 | imx_pxp_allocator->parent.allocate = imx_dma_buffer_pxp_allocator_allocate; 250 | imx_pxp_allocator->parent.deallocate = imx_dma_buffer_pxp_allocator_deallocate; 251 | imx_pxp_allocator->parent.map = imx_dma_buffer_pxp_allocator_map; 252 | imx_pxp_allocator->parent.unmap = imx_dma_buffer_pxp_allocator_unmap; 253 | imx_pxp_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 254 | imx_pxp_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 255 | imx_pxp_allocator->parent.get_physical_address = imx_dma_buffer_pxp_allocator_get_physical_address; 256 | imx_pxp_allocator->parent.get_fd = imx_dma_buffer_pxp_allocator_get_fd; 257 | imx_pxp_allocator->parent.get_size = imx_dma_buffer_pxp_allocator_get_size; 258 | imx_pxp_allocator->pxp_fd = pxp_fd; 259 | imx_pxp_allocator->pxp_fd_is_internal = (pxp_fd < 0); 260 | 261 | if (pxp_fd < 0) 262 | { 263 | imx_pxp_allocator->pxp_fd = open("/dev/pxp_device", O_RDWR, 0); 264 | if (imx_pxp_allocator->pxp_fd < 0) 265 | { 266 | if (error != NULL) 267 | *error = errno; 268 | free(imx_pxp_allocator); 269 | return NULL; 270 | } 271 | } 272 | 273 | return (ImxDmaBufferAllocator*)imx_pxp_allocator; 274 | } 275 | 276 | 277 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_dwl_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "dwl.h" 7 | 8 | #include 9 | #include "imxdmabuffer.h" 10 | #include "imxdmabuffer_priv.h" 11 | #include "imxdmabuffer_dwl_allocator.h" 12 | 13 | 14 | typedef struct 15 | { 16 | ImxDmaBuffer parent; 17 | 18 | struct DWLLinearMem dwl_linear_mem; 19 | 20 | size_t actual_size; 21 | size_t size; 22 | uint8_t* aligned_virtual_address; 23 | imx_physical_address_t aligned_physical_address; 24 | 25 | /* These are kept around to catch invalid redundant mapping attempts. 26 | * It is good practice to check for those even if the underlying 27 | * allocator (DWL in this case) does not actually need any mapping 28 | * or mapping flags. */ 29 | unsigned int map_flags; 30 | int mapping_refcount; 31 | } 32 | ImxDmaBufferDwlBuffer; 33 | 34 | 35 | typedef struct 36 | { 37 | ImxDmaBufferAllocator parent; 38 | struct DWLInitParam dwl_init_param; 39 | void const *dwl_instance; 40 | } 41 | ImxDmaBufferDwlAllocator; 42 | 43 | 44 | static void imx_dma_buffer_dwl_allocator_destroy(ImxDmaBufferAllocator *allocator); 45 | static ImxDmaBuffer* imx_dma_buffer_dwl_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 46 | static void imx_dma_buffer_dwl_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 47 | static uint8_t* imx_dma_buffer_dwl_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 48 | static void imx_dma_buffer_dwl_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 49 | static imx_physical_address_t imx_dma_buffer_dwl_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 50 | static int imx_dma_buffer_dwl_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 51 | static size_t imx_dma_buffer_dwl_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 52 | 53 | 54 | static void imx_dma_buffer_dwl_allocator_destroy(ImxDmaBufferAllocator *allocator) 55 | { 56 | ImxDmaBufferDwlAllocator *imx_dwl_allocator = (ImxDmaBufferDwlAllocator *)allocator; 57 | 58 | assert(imx_dwl_allocator != NULL); 59 | assert(imx_dwl_allocator->dwl_instance != NULL); 60 | 61 | DWLRelease(imx_dwl_allocator->dwl_instance); 62 | 63 | free(imx_dwl_allocator); 64 | } 65 | 66 | 67 | static ImxDmaBuffer* imx_dma_buffer_dwl_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 68 | { 69 | size_t actual_size; 70 | ImxDmaBufferDwlBuffer *imx_dwl_buffer; 71 | ImxDmaBufferDwlAllocator *imx_dwl_allocator = (ImxDmaBufferDwlAllocator *)allocator; 72 | 73 | assert(imx_dwl_allocator != NULL); 74 | assert(imx_dwl_allocator->dwl_instance != NULL); 75 | 76 | /* The DWL allocator does not have a parameter for alignment, so we resort to a trick. 77 | * We allocate some extra bytes. Then, once allocated, we take the returned physical 78 | * address, and add an offset to it to make sure the address is aligned as requested. 79 | * This modified physical address is stored in aligned_physical_address . The maximum 80 | * offset equals the alignment size, which is why we increase the allocation size by 81 | * the alignment amount. Alignment of 0 or 1 however means "no alignment", so we don't 82 | * actually do this trick in that case. */ 83 | actual_size = size; 84 | if (alignment == 0) 85 | alignment = 1; 86 | if (alignment > 1) 87 | actual_size += alignment; 88 | 89 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 90 | imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)malloc(sizeof(ImxDmaBufferDwlBuffer)); 91 | imx_dwl_buffer->parent.allocator = allocator; 92 | imx_dwl_buffer->actual_size = actual_size; 93 | imx_dwl_buffer->size = size; 94 | imx_dwl_buffer->mapping_refcount = 0; 95 | 96 | /* Initialize the DWL linear memory structure for allocation. DWL_MEM_TYPE_CPU is 97 | * physically contiguous memory that can be accessed with the CPU. 98 | * TODO: There is another type called "secure memory". It is selected by using the 99 | * DWL_MEM_TYPE_SLICE type. Currently, it is unclear how to use it properly. */ 100 | memset(&(imx_dwl_buffer->dwl_linear_mem), 0, sizeof(imx_dwl_buffer->dwl_linear_mem)); 101 | imx_dwl_buffer->dwl_linear_mem.mem_type = DWL_MEM_TYPE_CPU; 102 | 103 | /* Perform the actual allocation. */ 104 | if (DWLMallocLinear(imx_dwl_allocator->dwl_instance, actual_size, &(imx_dwl_buffer->dwl_linear_mem)) < 0) 105 | { 106 | if (error != NULL) 107 | *error = ENOMEM; 108 | goto cleanup; 109 | } 110 | 111 | /* Align the returned address. We also align the virtual address here, which isn't 112 | * strictly necessary (alignment is only required for the physical address), but 113 | * we do it regardless for sake of consistency. */ 114 | imx_dwl_buffer->aligned_virtual_address = (uint8_t *)IMX_DMA_BUFFER_ALIGN_VAL_TO((uint8_t *)(imx_dwl_buffer->dwl_linear_mem.virtual_address), alignment); 115 | imx_dwl_buffer->aligned_physical_address = (imx_physical_address_t)IMX_DMA_BUFFER_ALIGN_VAL_TO((imx_physical_address_t)(imx_dwl_buffer->dwl_linear_mem.bus_address), alignment); 116 | 117 | finish: 118 | return (ImxDmaBuffer *)imx_dwl_buffer; 119 | 120 | cleanup: 121 | free(imx_dwl_buffer); 122 | imx_dwl_buffer = NULL; 123 | goto finish; 124 | } 125 | 126 | static void imx_dma_buffer_dwl_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 127 | { 128 | ImxDmaBufferDwlAllocator *imx_dwl_allocator = (ImxDmaBufferDwlAllocator *)allocator; 129 | ImxDmaBufferDwlBuffer *imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)buffer; 130 | 131 | assert(imx_dwl_buffer != NULL); 132 | assert(imx_dwl_allocator != NULL); 133 | assert(imx_dwl_allocator->dwl_instance != NULL); 134 | 135 | DWLFreeLinear(imx_dwl_allocator->dwl_instance, &(imx_dwl_buffer->dwl_linear_mem)); 136 | 137 | free(imx_dwl_buffer); 138 | } 139 | 140 | static uint8_t* imx_dma_buffer_dwl_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 141 | { 142 | ImxDmaBufferDwlBuffer *imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)buffer; 143 | 144 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 145 | IMX_DMA_BUFFER_UNUSED_PARAM(error); 146 | 147 | assert(imx_dwl_buffer != NULL); 148 | 149 | if (flags == 0) 150 | flags = IMX_DMA_BUFFER_MAPPING_FLAG_READ | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE; 151 | 152 | /* As mentioned above, we keep the refcount and flags around 153 | * just to check correct API usage. Do this check here. 154 | * (Other allocators perform more steps than this.) */ 155 | if (imx_dwl_buffer->mapping_refcount > 0) 156 | { 157 | assert((imx_dwl_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 158 | imx_dwl_buffer->mapping_refcount++; 159 | } 160 | else 161 | { 162 | imx_dwl_buffer->map_flags = flags; 163 | imx_dwl_buffer->mapping_refcount = 1; 164 | } 165 | 166 | /* DWL allocated memory is always mapped, so we just returned the aligned virtual 167 | * address we stored in imx_dma_buffer_dwl_allocator_allocate(). */ 168 | 169 | return imx_dwl_buffer->aligned_virtual_address; 170 | } 171 | 172 | static void imx_dma_buffer_dwl_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 173 | { 174 | ImxDmaBufferDwlBuffer *imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)buffer; 175 | 176 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 177 | 178 | if (imx_dwl_buffer->mapping_refcount > 0) 179 | imx_dwl_buffer->mapping_refcount--; 180 | 181 | /* DWL allocated memory is always mapped, so we don't do anything here. */ 182 | } 183 | 184 | static imx_physical_address_t imx_dma_buffer_dwl_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 185 | { 186 | ImxDmaBufferDwlBuffer *imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)buffer; 187 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 188 | assert(imx_dwl_buffer != NULL); 189 | return imx_dwl_buffer->aligned_physical_address; 190 | } 191 | 192 | static int imx_dma_buffer_dwl_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 193 | { 194 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 195 | IMX_DMA_BUFFER_UNUSED_PARAM(buffer); 196 | return -1; 197 | } 198 | 199 | static size_t imx_dma_buffer_dwl_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 200 | { 201 | ImxDmaBufferDwlBuffer *imx_dwl_buffer = (ImxDmaBufferDwlBuffer *)buffer; 202 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 203 | assert(imx_dwl_buffer != NULL); 204 | return imx_dwl_buffer->size; 205 | } 206 | 207 | 208 | ImxDmaBufferAllocator* imx_dma_buffer_dwl_allocator_new(int *error) 209 | { 210 | ImxDmaBufferDwlAllocator *imx_dwl_allocator = (ImxDmaBufferDwlAllocator *)malloc(sizeof(ImxDmaBufferDwlAllocator)); 211 | 212 | imx_dwl_allocator->parent.destroy = imx_dma_buffer_dwl_allocator_destroy; 213 | imx_dwl_allocator->parent.allocate = imx_dma_buffer_dwl_allocator_allocate; 214 | imx_dwl_allocator->parent.deallocate = imx_dma_buffer_dwl_allocator_deallocate; 215 | imx_dwl_allocator->parent.map = imx_dma_buffer_dwl_allocator_map; 216 | imx_dwl_allocator->parent.unmap = imx_dma_buffer_dwl_allocator_unmap; 217 | imx_dwl_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 218 | imx_dwl_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 219 | imx_dwl_allocator->parent.get_physical_address = imx_dma_buffer_dwl_allocator_get_physical_address; 220 | imx_dwl_allocator->parent.get_fd = imx_dma_buffer_dwl_allocator_get_fd; 221 | imx_dwl_allocator->parent.get_size = imx_dma_buffer_dwl_allocator_get_size; 222 | 223 | memset(&(imx_dwl_allocator->dwl_init_param), 0, sizeof(imx_dwl_allocator->dwl_init_param)); 224 | 225 | /* Example code from the imx-vpu-hantro and imx-vpuwrap packages indicate that 226 | * for a Hantro G2 decoder, the HEVC client type should be used here, and for 227 | * a G1 decoder, we should use the H264 client type. The decoder version is 228 | * currently selected in the libimxdmabuffer build configuration. */ 229 | #if defined(IMXDMABUFFER_DWL_USE_CLIENT_TYPE_HEVC) 230 | imx_dwl_allocator->dwl_init_param.client_type = DWL_CLIENT_TYPE_HEVC_DEC; 231 | #elif defined(IMXDMABUFFER_DWL_USE_CLIENT_TYPE_H264) 232 | imx_dwl_allocator->dwl_init_param.client_type = DWL_CLIENT_TYPE_H264_DEC; 233 | #else 234 | #error Unknown client type 235 | #endif 236 | imx_dwl_allocator->dwl_instance = DWLInit(&(imx_dwl_allocator->dwl_init_param)); 237 | if (imx_dwl_allocator->dwl_instance == NULL) 238 | { 239 | if (error != NULL) 240 | *error = ENOMEM; 241 | goto cleanup; 242 | } 243 | 244 | finish: 245 | return (ImxDmaBufferAllocator *)imx_dwl_allocator; 246 | 247 | cleanup: 248 | free(imx_dwl_allocator); 249 | imx_dwl_allocator = NULL; 250 | goto finish; 251 | } 252 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer.h: -------------------------------------------------------------------------------- 1 | #ifndef IMXDMABUFFER_H 2 | #define IMXDMABUFFER_H 3 | 4 | #include 5 | #include 6 | #include "imxdmabuffer_physaddr.h" 7 | 8 | 9 | #ifdef __cplusplus 10 | extern "C" { 11 | #endif 12 | 13 | 14 | /* ImxDmaBufferMappingFlags: Flags for the ImxVpuDMABufferAllocator's 15 | * map vfuncs. These flags can be bitwise-OR combined. */ 16 | typedef enum 17 | { 18 | /* Map memory for CPU write access. */ 19 | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE = (1UL << 0), 20 | /* Map memory for CPU read access. */ 21 | IMX_DMA_BUFFER_MAPPING_FLAG_READ = (1UL << 1), 22 | /* Access sync is done manually by explicitly calling 23 | * imx_dma_buffer_start_sync_session() and 24 | * imx_dma_buffer_stop_sync_session(). */ 25 | IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC = (1UL << 2) 26 | } 27 | ImxDmaBufferMappingFlags; 28 | 29 | #define IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK (IMX_DMA_BUFFER_MAPPING_FLAG_READ | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) 30 | 31 | 32 | typedef struct _ImxDmaBuffer ImxDmaBuffer; 33 | typedef struct _ImxDmaBufferAllocator ImxDmaBufferAllocator; 34 | typedef struct _ImxWrappedDmaBuffer ImxWrappedDmaBuffer; 35 | 36 | 37 | #define IMX_DMA_BUFFER_PADDING 8 38 | 39 | 40 | /* ImxDmaBuffer: 41 | * 42 | * Opaque object containing a DMA buffer (a physically contiguous 43 | * memory block that can be used for transmissions through DMA channels). 44 | * Its structure is defined by the allocator which created the object. 45 | */ 46 | struct _ImxDmaBuffer 47 | { 48 | ImxDmaBufferAllocator *allocator; 49 | }; 50 | 51 | 52 | /* ImxDmaBufferAllocator: 53 | * 54 | * This structure contains function pointers (referred to as "vfuncs") which define an allocator 55 | * for ImxDmaBuffer instances. It is possible to define a custom allocator, which is useful for 56 | * tracing memory allocations, and for hooking up any existing allocation mechanisms. 57 | * 58 | * The vfuncs typically are not called directly from the outside, but by using the corresponding 59 | * imx_dma_buffer_* functions() instead. See the documentation of these functions for more details 60 | * about what the vfuncs do. 61 | */ 62 | struct _ImxDmaBufferAllocator 63 | { 64 | void (*destroy)(ImxDmaBufferAllocator *allocator); 65 | 66 | ImxDmaBuffer* (*allocate)(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 67 | void (*deallocate)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 68 | 69 | uint8_t* (*map)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 70 | void (*unmap)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 71 | 72 | void (*start_sync_session)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 73 | void (*stop_sync_session)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 74 | 75 | imx_physical_address_t (*get_physical_address)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 76 | int (*get_fd)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 77 | 78 | size_t (*get_size)(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 79 | 80 | void* _reserved[IMX_DMA_BUFFER_PADDING - 2]; 81 | }; 82 | 83 | 84 | /* Creates a new DMA buffer allocator. 85 | * 86 | * This uses one of the several available i.MX DMA allocators internally. Which 87 | * one is used is determined by the build configuration of libimxdmabuffer. 88 | * 89 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 90 | * the pointer refers to is set to an error code from errno.h. If creating 91 | * the allocator succeeds, the integer is not modified. 92 | * @return Pointer to the newly created DMA allocator, or NULL in case of an error. 93 | */ 94 | ImxDmaBufferAllocator* imx_dma_buffer_allocator_new(int *error); 95 | 96 | /* Destroys a previously created DMA buffer allocator. 97 | * 98 | * After this call, the allocator is fully destroyed, and must not be used anymore. 99 | * Also, any existing DMA buffers that have been allocated by this allocator will be 100 | * deallocated. 101 | */ 102 | void imx_dma_buffer_allocator_destroy(ImxDmaBufferAllocator *allocator); 103 | 104 | /* Allocates a DMA buffer. 105 | * 106 | * For deallocating DMA buffers, use imx_dma_buffer_deallocate(). 107 | * 108 | * Allocated buffers can have their physical addresses aligned. The alignment is 109 | * in bytes. An alignment of 1 or 0 means that no alignment is required. The 110 | * alignment is only required for the buffer's physical address, not for mapped 111 | * virtual addresses. Alignment does not reduce the accessible size of the buffer. 112 | * If for example the required alignment is 32 bytes, and the underlying allocation 113 | * mechanism does not accept an alignment parameter, then the allocated buffer will 114 | * internally have a size that is buffer than the one specified here, and it will 115 | * increase the value of the physical address if necessary to make it align to 32. 116 | * 117 | * @param allocator Allocator to use. 118 | * @param size Size of the buffer to allocate, in bytes. Must be at least 1. 119 | * @param alignment Physical address alignment, in bytes. 120 | * @param error If this pointer is non-NULL, and if an error occurs, then the integer 121 | * the pointer refers to is set to an error code from errno.h. If allocation 122 | * succeeds, the integer is not modified. 123 | * @return Pointer to the newly allocated DMA buffer if allocation succeeded, 124 | * or NULL in case of an error. 125 | */ 126 | ImxDmaBuffer* imx_dma_buffer_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 127 | 128 | /* Deallocates a DMA buffer. 129 | * 130 | * After this call, the buffer is fully deallocated, and must not be accessed anymore. 131 | */ 132 | void imx_dma_buffer_deallocate(ImxDmaBuffer *buffer); 133 | 134 | /* Maps a DMA buffer to the local address space, and returns the virtual address to this space. 135 | * 136 | * Trying to map an already mapped buffer does not re-map. Instead, it increments an 137 | * internal reference counter, and returns the same mapped virtual address as before. 138 | * This means that imx_dma_buffer_unmap() must be called exactly the same number of times 139 | * imx_dma_buffer_map() was called on the same DMA buffer in order to be actually unmapped. 140 | * 141 | * IMPORTANT: Attempts to map an already mapped buffer with different read/write flags are 142 | * only valid if the new flags are a strict subset of the old flags. For example, if the 143 | * buffer was already mapped with the read and write flags, and another, redundant mapping 144 | * attempt is made with only the read flag, then this is valid. Another example: If the buffer 145 | * was already mapped, but with the read flag only, and another, redundant mapping attempt is 146 | * made with only the write flag, then this is invalid. 147 | * 148 | * IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC however is not subject to this restriction. 149 | * This flag is only applied to the first map / last unmap. In redundant (un)mapping calls, 150 | * it is ignored. 151 | * 152 | * This function automatically synchronizes access to the mapped region, behaving like an 153 | * implicit imx_dma_buffer_start_sync_session() call. To ṕrevent this behavior, add 154 | * IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC to the flags. 155 | * 156 | * @param flags Bitwise OR combination of flags (or 0 if no flags are used, in which case it 157 | * will map in regular read/write mode). See ImxDmaBufferMappingFlags for a list of 158 | * valid flags. 159 | * @param error If this pointer is non-NULL, and an error occurs, then the integer 160 | * the pointer refers to is set to an error code from errno.h. If mapping 161 | * succeeds, the integer is not modified. 162 | * @return Pointer with the address to the mapped region in the virtual address space where 163 | * the data from the DMA buffer can be accessed, or NULL in case of an error. 164 | */ 165 | uint8_t* imx_dma_buffer_map(ImxDmaBuffer *buffer, unsigned int flags, int *error); 166 | 167 | /* Unmaps a DMA buffer. 168 | * 169 | * If the buffer isn't currently mapped, this function does nothing. As explained in 170 | * imx_dma_buffer_map(), the buffer isn't actually unmapped until the internal reference 171 | * counter reaches zero. 172 | * 173 | * This function automatically synchronizes access to the mapped region, behaving like an 174 | * implicit imx_dma_buffer_stop_sync_session() call. To ṕrevent this behavior, add 175 | * IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC to the flags passed to imx_dma_buffer_map(). 176 | */ 177 | void imx_dma_buffer_unmap(ImxDmaBuffer *buffer); 178 | 179 | /* Starts a synchronized map access session. 180 | * 181 | * When cached DMA buffers are allocated, it is important to maintain cache coherency. 182 | * Otherwise, data in the CPU cache and data in memory might differ, leading to 183 | * undefined behavior. This function, along with imx_dma_buffer_stop_sync_session(), 184 | * establishes a "session" in which it is guaranteed that coherency will be established 185 | * at the beginning and the end of the session. At the start, the CPU cache will be 186 | * repopulated with the contents of the underlying memory region (if the cache is stale) 187 | * if the IMX_DMA_BUFFER_MAPPING_FLAG_READ flag was passed to imx_dma_buffer_map(). 188 | * When the session stops, the contents of the CPU cache are written to the underlying 189 | * memory region if the IMX_DMA_BUFFER_MAPPING_FLAG_WRITE flag was passed to 190 | * imx_dma_buffer_map(). That way, reading from DMA memory and writing to DMA memory 191 | * both can be done without breaking cache coherency. 192 | * 193 | * Normally, users do not need to call this, since the map and unmap functions will 194 | * do this automatically. If IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC was passed to 195 | * imx_dma_buffer_map() though, mapping and unmapping will _not_ automatically 196 | * handle the sync, and users have to call imx_dma_buffer_start_sync_session() and 197 | * imx_dma_buffer_stop_sync_session() manually. This can be useful if synchronization 198 | * needs to happen sometime while the buffer is mapped, for example if the buffer 199 | * is (un)mapped as part of a device initialization / shutdown. In such cases, the 200 | * buffer needs to stay mapped, but cache coherency may have to be maintained at 201 | * some point. 202 | * 203 | * If IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC was not passed to imx_dma_buffer_map(), 204 | * this function does nothing. 205 | * 206 | * The buffer must have been mapped before such a session starts, and the session 207 | * must be stopped before the buffer is unmapped. 208 | * 209 | * If the allocator allocates uncached DMA memory, this function does nothing. 210 | */ 211 | void imx_dma_buffer_start_sync_session(ImxDmaBuffer *buffer); 212 | 213 | /* Stops a synchronized map access session. 214 | * 215 | * See imx_dma_buffer_start_sync_session() for an explanation about synchronized 216 | * map access. 217 | * 218 | * If IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC was not passed to imx_dma_buffer_map(), 219 | * this function does nothing. 220 | * 221 | * If the allocator allocates uncached DMA memory, this function does nothing. 222 | */ 223 | void imx_dma_buffer_stop_sync_session(ImxDmaBuffer *buffer); 224 | 225 | /* Gets the physical address associated with the DMA buffer. 226 | * 227 | * This address points to the start of the buffer in the physical address space. The 228 | * physical address will be aligned to the value that was specified by the alignment 229 | * argument in the imx_dma_buffer_allocate() function that allocated this DMA buffer. 230 | * 231 | * This function can also be called while the DMA buffer is memory-mapped. 232 | */ 233 | imx_physical_address_t imx_dma_buffer_get_physical_address(ImxDmaBuffer *buffer); 234 | 235 | /* Returns a file descriptor associated with the DMA buffer (if one exists). 236 | * 237 | * If the underlying DMA memory allocator uses file descriptors, then this function 238 | * returns the file descriptor associated with the DMA buffer. If no such file 239 | * descriptor exists, -1 is returned. 240 | * 241 | * This function can also be called while the DMA buffer is memory-mapped. 242 | */ 243 | int imx_dma_buffer_get_fd(ImxDmaBuffer *buffer); 244 | 245 | /* Returns the size of the buffer, in bytes. 246 | * 247 | * This function can also be called while the DMA buffer is memory-mapped. 248 | */ 249 | size_t imx_dma_buffer_get_size(ImxDmaBuffer *buffer); 250 | 251 | 252 | /* ImxWrappedDmaBuffer: 253 | * 254 | * Structure for wrapping existing DMA buffers. This is useful for interfacing with 255 | * existing buffers that were not allocated by libimxdmabuffer. 256 | * 257 | * First, initialize the structure with imx_dma_buffer_init_wrapped_buffer(). 258 | * Then fill the fd, physical_address, and size values. 259 | * 260 | * This does not take ownership over any passed fd. 261 | * 262 | * map_func / unmap_func are used in the imx_dma_buffer_map() / imx_dma_buffer_unmap() 263 | * calls. If these function pointers are NULL, no mapping will be done. 264 | * NOTE: imx_dma_buffer_map() will return a NULL pointer in this case. 265 | */ 266 | struct _ImxWrappedDmaBuffer 267 | { 268 | ImxDmaBuffer parent; 269 | 270 | uint8_t* (*map)(ImxWrappedDmaBuffer *wrapped_dma_buffer, unsigned int flags, int *error); 271 | void (*unmap)(ImxWrappedDmaBuffer *wrapped_dma_buffer); 272 | 273 | int fd; 274 | imx_physical_address_t physical_address; 275 | size_t size; 276 | }; 277 | 278 | /* Call for initializing wrapped DMA buffer structures. 279 | * Always call this before further using such a structure. */ 280 | void imx_dma_buffer_init_wrapped_buffer(ImxWrappedDmaBuffer *buffer); 281 | 282 | 283 | #ifdef __cplusplus 284 | } 285 | #endif 286 | 287 | 288 | #endif /* IMXDMABUFFER_H */ 289 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_ion_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "imxdmabuffer.h" 14 | #include "imxdmabuffer_priv.h" 15 | #include "imxdmabuffer_ion_allocator.h" 16 | 17 | 18 | typedef struct 19 | { 20 | ImxDmaBuffer parent; 21 | 22 | int dmabuf_fd; 23 | imx_physical_address_t physical_address; 24 | size_t size; 25 | uint8_t* mapped_virtual_address; 26 | unsigned int map_flags; 27 | 28 | int mapping_refcount; 29 | } 30 | ImxDmaBufferIonBuffer; 31 | 32 | 33 | typedef struct 34 | { 35 | ImxDmaBufferAllocator parent; 36 | int ion_fd; 37 | int ion_fd_is_internal; 38 | unsigned int ion_heap_id_mask; 39 | unsigned int ion_heap_flags; 40 | } 41 | ImxDmaBufferIonAllocator; 42 | 43 | 44 | static void imx_dma_buffer_ion_allocator_destroy(ImxDmaBufferAllocator *allocator); 45 | static ImxDmaBuffer* imx_dma_buffer_ion_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 46 | static void imx_dma_buffer_ion_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 47 | static uint8_t* imx_dma_buffer_ion_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 48 | static void imx_dma_buffer_ion_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 49 | static imx_physical_address_t imx_dma_buffer_ion_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 50 | static int imx_dma_buffer_ion_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 51 | static size_t imx_dma_buffer_ion_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 52 | 53 | 54 | static void imx_dma_buffer_ion_allocator_destroy(ImxDmaBufferAllocator *allocator) 55 | { 56 | ImxDmaBufferIonAllocator *imx_ion_allocator = (ImxDmaBufferIonAllocator *)allocator; 57 | 58 | assert(imx_ion_allocator != NULL); 59 | 60 | if ((imx_ion_allocator->ion_fd >= 0) && imx_ion_allocator->ion_fd_is_internal) 61 | { 62 | close(imx_ion_allocator->ion_fd); 63 | imx_ion_allocator->ion_fd = -1; 64 | } 65 | 66 | free(imx_ion_allocator); 67 | } 68 | 69 | 70 | static ImxDmaBuffer* imx_dma_buffer_ion_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 71 | { 72 | int dmabuf_fd = -1; 73 | imx_physical_address_t physical_address; 74 | ImxDmaBufferIonBuffer *imx_ion_buffer; 75 | ImxDmaBufferIonAllocator *imx_ion_allocator = (ImxDmaBufferIonAllocator *)allocator; 76 | 77 | assert(imx_ion_allocator != NULL); 78 | assert(imx_ion_allocator->ion_fd >= 0); 79 | 80 | /* Perform the actual allocation. */ 81 | dmabuf_fd = imx_dma_buffer_ion_allocate_dmabuf(imx_ion_allocator->ion_fd, size, alignment, imx_ion_allocator->ion_heap_id_mask, imx_ion_allocator->ion_heap_flags, error); 82 | if (dmabuf_fd < 0) 83 | return NULL; 84 | 85 | /* Now that we've got the buffer, retrieve its physical address. */ 86 | physical_address = imx_dma_buffer_ion_get_physical_address_from_dmabuf_fd(imx_ion_allocator->ion_fd, dmabuf_fd, error); 87 | if (physical_address == 0) 88 | { 89 | close(dmabuf_fd); 90 | return NULL; 91 | } 92 | 93 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 94 | imx_ion_buffer = (ImxDmaBufferIonBuffer *)malloc(sizeof(ImxDmaBufferIonBuffer)); 95 | imx_ion_buffer->parent.allocator = allocator; 96 | imx_ion_buffer->dmabuf_fd = dmabuf_fd; 97 | imx_ion_buffer->physical_address = physical_address; 98 | imx_ion_buffer->size = size; 99 | imx_ion_buffer->mapped_virtual_address = NULL; 100 | imx_ion_buffer->mapping_refcount = 0; 101 | 102 | return (ImxDmaBuffer *)imx_ion_buffer; 103 | } 104 | 105 | 106 | static void imx_dma_buffer_ion_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 107 | { 108 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 109 | 110 | assert(imx_ion_buffer != NULL); 111 | assert(imx_ion_buffer->dmabuf_fd >= 0); 112 | 113 | if (imx_ion_buffer->mapped_virtual_address != NULL) 114 | { 115 | /* Set mapping_refcount to 1 to force an 116 | * imx_dma_buffer_ion_allocator_unmap() to actually unmap the buffer. */ 117 | imx_ion_buffer->mapping_refcount = 1; 118 | imx_dma_buffer_ion_allocator_unmap(allocator, buffer); 119 | } 120 | 121 | close(imx_ion_buffer->dmabuf_fd); 122 | free(imx_ion_buffer); 123 | } 124 | 125 | 126 | static uint8_t* imx_dma_buffer_ion_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 127 | { 128 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 129 | 130 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 131 | 132 | assert(imx_ion_buffer != NULL); 133 | assert(imx_ion_buffer->dmabuf_fd >= 0); 134 | 135 | if (flags == 0) 136 | flags = IMX_DMA_BUFFER_MAPPING_FLAG_READ | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE; 137 | 138 | if (imx_ion_buffer->mapped_virtual_address != NULL) 139 | { 140 | assert((imx_ion_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 141 | 142 | /* Buffer is already mapped. Just increment the 143 | * refcount and otherwise do nothing. */ 144 | imx_ion_buffer->mapping_refcount++; 145 | } 146 | else 147 | { 148 | /* Buffer is not mapped yet. Call mmap() to perform 149 | * the memory mapping. */ 150 | 151 | int mmap_prot = 0; 152 | int mmap_flags = MAP_SHARED; 153 | void *virtual_address; 154 | 155 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? PROT_READ : 0; 156 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? PROT_WRITE : 0; 157 | 158 | imx_ion_buffer->map_flags = flags; 159 | 160 | virtual_address = mmap(0, imx_ion_buffer->size, mmap_prot, mmap_flags, imx_ion_buffer->dmabuf_fd, 0); 161 | if (virtual_address == MAP_FAILED) 162 | { 163 | if (error != NULL) 164 | *error = errno; 165 | } 166 | else 167 | { 168 | imx_ion_buffer->mapping_refcount = 1; 169 | imx_ion_buffer->mapped_virtual_address = virtual_address; 170 | } 171 | } 172 | 173 | return imx_ion_buffer->mapped_virtual_address; 174 | } 175 | 176 | 177 | static void imx_dma_buffer_ion_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 178 | { 179 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 180 | 181 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 182 | 183 | assert(imx_ion_buffer != NULL); 184 | assert(imx_ion_buffer->dmabuf_fd >= 0); 185 | 186 | if (imx_ion_buffer->mapped_virtual_address == NULL) 187 | return; 188 | 189 | imx_ion_buffer->mapping_refcount--; 190 | if (imx_ion_buffer->mapping_refcount != 0) 191 | return; 192 | 193 | munmap((void *)(imx_ion_buffer->mapped_virtual_address), imx_ion_buffer->size); 194 | imx_ion_buffer->mapped_virtual_address = NULL; 195 | } 196 | 197 | 198 | static imx_physical_address_t imx_dma_buffer_ion_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 199 | { 200 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 201 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 202 | assert(imx_ion_buffer != NULL); 203 | return imx_ion_buffer->physical_address; 204 | } 205 | 206 | 207 | static int imx_dma_buffer_ion_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 208 | { 209 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 210 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 211 | assert(imx_ion_buffer != NULL); 212 | return imx_ion_buffer->dmabuf_fd; 213 | } 214 | 215 | 216 | static size_t imx_dma_buffer_ion_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 217 | { 218 | ImxDmaBufferIonBuffer *imx_ion_buffer = (ImxDmaBufferIonBuffer *)buffer; 219 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 220 | assert(imx_ion_buffer != NULL); 221 | return imx_ion_buffer->size; 222 | } 223 | 224 | 225 | ImxDmaBufferAllocator* imx_dma_buffer_ion_allocator_new(int ion_fd, unsigned int ion_heap_id_mask, unsigned int ion_heap_flags, int *error) 226 | { 227 | ImxDmaBufferIonAllocator *imx_ion_allocator = (ImxDmaBufferIonAllocator *)malloc(sizeof(ImxDmaBufferIonAllocator)); 228 | imx_ion_allocator->parent.destroy = imx_dma_buffer_ion_allocator_destroy; 229 | imx_ion_allocator->parent.allocate = imx_dma_buffer_ion_allocator_allocate; 230 | imx_ion_allocator->parent.deallocate = imx_dma_buffer_ion_allocator_deallocate; 231 | imx_ion_allocator->parent.map = imx_dma_buffer_ion_allocator_map; 232 | imx_ion_allocator->parent.unmap = imx_dma_buffer_ion_allocator_unmap; 233 | imx_ion_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 234 | imx_ion_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 235 | imx_ion_allocator->parent.get_physical_address = imx_dma_buffer_ion_allocator_get_physical_address; 236 | imx_ion_allocator->parent.get_fd = imx_dma_buffer_ion_allocator_get_fd; 237 | imx_ion_allocator->parent.get_size = imx_dma_buffer_ion_allocator_get_size; 238 | imx_ion_allocator->ion_fd = ion_fd; 239 | imx_ion_allocator->ion_fd_is_internal = (ion_fd < 0); 240 | imx_ion_allocator->ion_heap_id_mask = ion_heap_id_mask; 241 | imx_ion_allocator->ion_heap_flags = ion_heap_flags; 242 | 243 | if (ion_fd < 0) 244 | { 245 | imx_ion_allocator->ion_fd = open("/dev/ion", O_RDONLY); 246 | if (imx_ion_allocator->ion_fd < 0) 247 | { 248 | if (error != NULL) 249 | *error = errno; 250 | free(imx_ion_allocator); 251 | return NULL; 252 | } 253 | } 254 | 255 | return (ImxDmaBufferAllocator*)imx_ion_allocator; 256 | } 257 | 258 | 259 | int imx_dma_buffer_ion_allocator_get_ion_fd(ImxDmaBufferAllocator *allocator) 260 | { 261 | ImxDmaBufferIonAllocator *imx_ion_allocator = (ImxDmaBufferIonAllocator *)allocator; 262 | return imx_ion_allocator->ion_fd; 263 | } 264 | 265 | 266 | 267 | 268 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) 269 | 270 | static unsigned int get_heap_id_mask(int ion_fd, int *error) 271 | { 272 | static unsigned int heap_id_mask = 0; 273 | 274 | /* Starting with kernel 4.14.34, we can iterate over the 275 | * ION heaps and find those with type ION_HEAP_TYPE_DMA. */ 276 | 277 | int i; 278 | int heap_count; 279 | struct ion_heap_query query = { 0 }; 280 | struct ion_heap_data *heap_data = NULL; 281 | 282 | if (heap_id_mask != 0) 283 | return heap_id_mask; 284 | 285 | if ((ioctl(ion_fd, ION_IOC_HEAP_QUERY, &query) < 0) || (query.cnt == 0)) 286 | { 287 | if (error != NULL) 288 | *error = errno; 289 | return 0; 290 | } 291 | 292 | heap_count = query.cnt; 293 | 294 | heap_data = calloc(heap_count, sizeof(struct ion_heap_data)); 295 | query.cnt = heap_count; 296 | query.heaps = (__u64)((uintptr_t)heap_data); 297 | if (ioctl(ion_fd, ION_IOC_HEAP_QUERY, &query) < 0) 298 | { 299 | if (error != NULL) 300 | *error = errno; 301 | free(heap_data); 302 | return 0; 303 | } 304 | 305 | for (i = 0; i < heap_count; ++i) 306 | { 307 | int is_dma_heap = (heap_data[i].type == ION_HEAP_TYPE_DMA); 308 | if (is_dma_heap) 309 | heap_id_mask |= 1u << heap_data[i].heap_id; 310 | } 311 | 312 | free(heap_data); 313 | 314 | return heap_id_mask; 315 | } 316 | 317 | #endif 318 | 319 | 320 | int imx_dma_buffer_ion_allocate_dmabuf(int ion_fd, size_t size, size_t alignment, unsigned int ion_heap_id_mask, unsigned int ion_heap_flags, int *error) 321 | { 322 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) 323 | /* alignment value is unused in newer kernels. See function 324 | * documentation for more about this. */ 325 | IMX_DMA_BUFFER_UNUSED_PARAM(alignment); 326 | #endif 327 | 328 | /* Prior to kernel 4.14.34, we cannot get the FD from the 329 | * allocation data directly, and have to resort to an extra 330 | * ION_IOC_MAP ioctl, which requires the user_handle. */ 331 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) 332 | ion_user_handle_t user_handle; 333 | int user_handle_set = 0; 334 | #endif 335 | int dmabuf_fd = -1; 336 | 337 | assert(ion_fd >= 0); 338 | 339 | #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0) 340 | /* Starting with kernel 4.14.34, we do not need the ion_heap_id_mask 341 | * argument anymore, since we can autodetect the mask, so we ignore 342 | * the argument's value. */ 343 | ion_heap_id_mask = get_heap_id_mask(ion_fd, error); 344 | if (ion_heap_id_mask == 0) 345 | goto finish; 346 | #endif 347 | 348 | { 349 | struct ion_allocation_data allocation_data = 350 | { 351 | .len = size, 352 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) 353 | .align = alignment, 354 | #endif 355 | .heap_id_mask = ion_heap_id_mask, 356 | .flags = ion_heap_flags 357 | }; 358 | 359 | if (ioctl(ion_fd, ION_IOC_ALLOC, &allocation_data) < 0) 360 | { 361 | if (error != NULL) 362 | *error = errno; 363 | goto finish; 364 | } 365 | 366 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) 367 | { 368 | user_handle = allocation_data.handle; 369 | user_handle_set = 1; 370 | 371 | struct ion_fd_data fd_data = 372 | { 373 | .handle = user_handle 374 | }; 375 | 376 | if ((ioctl(ion_fd, ION_IOC_MAP, &fd_data) < 0) || (fd_data.fd < 0)) 377 | { 378 | if (error != NULL) 379 | *error = errno; 380 | goto finish; 381 | } 382 | 383 | dmabuf_fd = fd_data.fd; 384 | } 385 | #else 386 | dmabuf_fd = allocation_data.fd; 387 | #endif 388 | } 389 | 390 | 391 | finish: 392 | 393 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) 394 | if (user_handle_set) 395 | { 396 | struct ion_handle_data handle_data = 397 | { 398 | .handle = user_handle 399 | }; 400 | 401 | ioctl(ion_fd, ION_IOC_FREE, &handle_data); 402 | } 403 | #endif 404 | 405 | return dmabuf_fd; 406 | } 407 | 408 | 409 | imx_physical_address_t imx_dma_buffer_ion_get_physical_address_from_dmabuf_fd(int ion_fd, int dmabuf_fd, int *error) 410 | { 411 | imx_physical_address_t physical_address = 0; 412 | 413 | /* The DMA_BUF_IOCTL_PHYS ioctl is not available 414 | * until kernel version 4.14.34. */ 415 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0) 416 | 417 | struct ion_phys_dma_data phys_dma_data = 418 | { 419 | .phys = 0, 420 | .size = 0, 421 | .dmafd = dmabuf_fd 422 | }; 423 | struct ion_custom_data custom_data = 424 | { 425 | .cmd = ION_IOC_PHYS_DMA, 426 | .arg = (unsigned long)(&phys_dma_data) 427 | }; 428 | 429 | assert(ion_fd >= 0); 430 | 431 | if (ioctl(ion_fd, ION_IOC_CUSTOM, &custom_data) < 0) 432 | { 433 | if (error != NULL) 434 | *error = errno; 435 | return 0; 436 | } 437 | 438 | physical_address = (imx_physical_address_t)(phys_dma_data.phys); 439 | 440 | #else 441 | 442 | struct dma_buf_phys dma_phys; 443 | 444 | assert(dmabuf_fd >= 0); 445 | 446 | IMX_DMA_BUFFER_UNUSED_PARAM(ion_fd); 447 | 448 | if (ioctl(dmabuf_fd, DMA_BUF_IOCTL_PHYS, &dma_phys) < 0) 449 | { 450 | if (error != NULL) 451 | *error = errno; 452 | return 0; 453 | } 454 | physical_address = (imx_physical_address_t)(dma_phys.phys); 455 | 456 | #endif 457 | 458 | return physical_address; 459 | } 460 | -------------------------------------------------------------------------------- /wscript: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | from waflib.Build import BuildContext, CleanContext, InstallContext, UninstallContext, Logs 5 | import os 6 | 7 | top = '.' 8 | out = 'build' 9 | 10 | 11 | # the code inside fragment deliberately does an unsafe implicit cast float->char to trigger a 12 | # compiler warning; sometimes, gcc does not tell about an unsupported parameter *unless* the 13 | # code being compiled causes a warning 14 | c_cflag_check_code = """ 15 | int main() 16 | { 17 | float f = 4.0; 18 | char c = f; 19 | return c - 4; 20 | } 21 | """ 22 | def check_compiler_flag(conf, flag, lang): 23 | return conf.check(fragment = c_cflag_check_code, mandatory = 0, execute = 0, define_ret = 0, msg = 'Checking for compiler switch %s' % flag, cxxflags = conf.env[lang + 'FLAGS'] + [flag], okmsg = 'yes', errmsg = 'no') 24 | def check_compiler_flags_2(conf, cflags, ldflags, msg): 25 | Logs.pprint('NORMAL', msg) 26 | return conf.check(fragment = c_cflag_check_code, mandatory = 0, execute = 0, define_ret = 0, msg = 'Checking if building with these flags works', cxxflags = cflags, ldflags = ldflags, okmsg = 'yes', errmsg = 'no') 27 | 28 | 29 | def add_compiler_flags(conf, env, flags, lang, compiler, uselib = ''): 30 | for flag in reversed(flags): 31 | if type(flag) == type(()): 32 | flag_candidate = flag[0] 33 | flag_alternative = flag[1] 34 | else: 35 | flag_candidate = flag 36 | flag_alternative = None 37 | 38 | if uselib: 39 | flags_pattern = lang + 'FLAGS_' + uselib 40 | else: 41 | flags_pattern = lang + 'FLAGS' 42 | 43 | if check_compiler_flag(conf, flag_candidate, compiler): 44 | env.prepend_value(flags_pattern, [flag_candidate]) 45 | elif flag_alternative: 46 | if check_compiler_flag(conf, flag_alternative, compiler): 47 | env.prepend_value(flags_pattern, [flag_alternative]) 48 | 49 | 50 | def options(opt): 51 | opt.add_option('--enable-debug', action = 'store_true', default = False, help = 'enable debug build [default: disabled]') 52 | opt.add_option('--enable-static', action = 'store_true', default = False, help = 'build static library [default: build shared library]') 53 | opt.add_option('--imx-linux-headers-path', action='store', default='', help='path to i.MX linux headers (where linux/ipu.h etc. can be found)') 54 | opt.add_option('--with-dma-heap-allocator', action='store', default = 'auto', help = 'build with dma-heap allocator support (valid values: yes/no/auto)') 55 | opt.add_option('--dma-heap-device-node-path', action='store', default='/dev/dma_heap/linux,cma', help='path to dma-heap device node') 56 | opt.add_option('--dma-heap-uncached-memory', action='store_true', default = False, help = 'dma-heap allocator allocates uncached DMA memory (default: allocates cached DMA memory)') 57 | opt.add_option('--with-ion-allocator', action='store', default = 'auto', help = 'build with ION allocator support (valid values: yes/no/auto)') 58 | opt.add_option('--with-dwl-allocator', action='store', default = 'auto', help = 'build with DWL allocator support (valid values: yes/no/auto)') 59 | opt.add_option('--hantro-decoder-version', action='store', default = '', help = 'Hantro decoder version to use for DWL based allocations (valid values: G1 G2)') 60 | opt.add_option('--hantro-headers-path', action='store', default='', help='path to hantro headers (dwl.h codec.h are checked for)') 61 | opt.add_option('--with-ipu-allocator', action='store', default = 'auto', help = 'build with IPU allocator support (valid values: yes/no/auto)') 62 | opt.add_option('--with-g2d-allocator', action='store', default = 'auto', help = 'build with G2D allocator support (valid values: yes/no/auto)') 63 | opt.add_option('--g2d-includes', action = 'store', default = '', help = 'path to the directory where the g2d.h header is') 64 | opt.add_option('--g2d-libs', action = 'store', default = '', help = 'path to the directory where the g2d library is') 65 | opt.add_option('--with-pxp-allocator', action='store', default = 'auto', help = 'build with PxP allocator support (valid values: yes/no/auto)') 66 | opt.load('compiler_c') 67 | opt.load('gnu_dirs') 68 | 69 | 70 | def configure(conf): 71 | conf.load('compiler_c') 72 | conf.load('gnu_dirs') 73 | 74 | 75 | # check and add compiler flags 76 | 77 | if conf.env['CFLAGS'] and conf.env['LINKFLAGS']: 78 | check_compiler_flags_2(conf, conf.env['CFLAGS'], conf.env['LINKFLAGS'], "Testing compiler flags %s and linker flags %s" % (' '.join(conf.env['CFLAGS']), ' '.join(conf.env['LINKFLAGS']))) 79 | elif conf.env['CFLAGS']: 80 | check_compiler_flags_2(conf, conf.env['CFLAGS'], '', "Testing compiler flags %s" % ' '.join(conf.env['CFLAGS'])) 81 | elif conf.env['LINKFLAGS']: 82 | check_compiler_flags_2(conf, '', conf.env['LINKFLAGS'], "Testing linker flags %s" % ' '.join(conf.env['LINKFLAGS'])) 83 | 84 | compiler_flags = ['-Wextra', '-Wall', '-std=gnu99', '-pedantic', '-fPIC', '-DPIC'] 85 | if conf.options.enable_debug: 86 | compiler_flags += ['-O0', '-g3', '-ggdb'] 87 | else: 88 | compiler_flags += ['-O2'] 89 | 90 | add_compiler_flags(conf, conf.env, compiler_flags, 'C', 'C') 91 | 92 | 93 | # misc checks and flags 94 | conf.env['BUILD_STATIC'] = conf.options.enable_static 95 | conf.env['EXTRA_USELIBS'] = [] 96 | conf.env['EXTRA_SOURCE_FILES'] = [] 97 | 98 | 99 | # i.MX linux header checks and flags 100 | if not conf.options.imx_linux_headers_path: 101 | conf.fatal('--imx-linux-headers-path is not set') 102 | conf.env['INCLUDES_IMXHEADERS'] = [os.path.abspath(os.path.expanduser(conf.options.imx_linux_headers_path))] 103 | Logs.pprint('NORMAL', 'i.MX linux headers path: ' + conf.env['INCLUDES_IMXHEADERS'][0]) 104 | 105 | 106 | # dma-heap allocator checks and flags 107 | with_dma_heap_alloc = conf.options.with_dma_heap_allocator 108 | if with_dma_heap_alloc != 'no': 109 | dma_heap_supported = conf.check_cc( 110 | fragment = ''' 111 | #include 112 | #include 113 | int main() { 114 | struct dma_heap_allocation_data heap_alloc_data; 115 | ioctl(-1, DMA_HEAP_IOCTL_ALLOC, &heap_alloc_data); 116 | return 0; 117 | } 118 | ''', 119 | uselib = 'IMXHEADERS', 120 | mandatory = False, 121 | execute = False, 122 | msg = 'Checking for dma-heap allocator support by testing the presence of the DMA_HEAP_IOCTL_ALLOC ioctl' 123 | ) 124 | if dma_heap_supported: 125 | dma_heap_device_node_path = conf.options.dma_heap_device_node_path 126 | if not dma_heap_device_node_path: 127 | conf.fatal('dma-heap device node path must not be an empty string') 128 | Logs.pprint('NORMAL', 'dma-heap device node path: ' + dma_heap_device_node_path) 129 | conf.msg('dma-heap allocates uncached memory', 'yes' if conf.options.dma_heap_uncached_memory else 'no') 130 | 131 | conf.env['WITH_DMA_HEAP_ALLOCATOR'] = 1 132 | conf.define('IMXDMABUFFER_DMA_HEAP_ALLOCATOR_ENABLED', 1) 133 | conf.define('IMXDMABUFFER_DMA_HEAP_DEVICE_NODE_PATH', dma_heap_device_node_path) 134 | if conf.options.dma_heap_uncached_memory: 135 | conf.define('IMXDMABUFFER_DMA_HEAP_ALLOCATES_UNCACHED_MEMORY', 1) 136 | 137 | conf.env['EXTRA_USELIBS'] += ['IMXHEADERS'] 138 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_dma_heap_allocator.h'] 139 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_dma_heap_allocator.c'] 140 | else: 141 | conf.env['WITH_DMA_HEAP_ALLOCATOR'] = 0 142 | if with_dma_heap_alloc == 'yes': 143 | conf.fatal('DMA_HEAP_IOCTL_ALLOC ioctl was not found') 144 | else: 145 | Logs.pprint('NORMAL', 'DMA_HEAP_IOCTL_ALLOC ioctl was not found; disabling dma-heap allocator') 146 | 147 | 148 | # ION allocator checks and flags 149 | with_ion_alloc = conf.options.with_ion_allocator 150 | if with_ion_alloc != 'no': 151 | ion_header_found = conf.check_cc( 152 | fragment = ''' 153 | #include 154 | #include 155 | int main() { 156 | return 0; 157 | } 158 | ''', 159 | uselib = 'IMXHEADERS', 160 | mandatory = False, 161 | execute = False, 162 | msg = 'Checking for ION allocator support by testing the presence of linux/ion.h' 163 | ) 164 | if ion_header_found: 165 | conf.env['WITH_ION_ALLOCATOR'] = 1 166 | conf.define('IMXDMABUFFER_ION_ALLOCATOR_ENABLED', 1) 167 | conf.env['EXTRA_USELIBS'] += ['IMXHEADERS'] 168 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_ion_allocator.h'] 169 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_ion_allocator.c'] 170 | else: 171 | conf.env['WITH_ION_ALLOCATOR'] = 0 172 | if with_ion_alloc == 'yes': 173 | conf.fatal('linux/ion.h was not found in i.MX linux headers path') 174 | else: 175 | Logs.pprint('NORMAL', 'linux/ion.h was not found in i.MX linux headers path; disabling ION allocator') 176 | 177 | 178 | # DWL allocator checks and flags 179 | with_dwl_alloc = conf.options.with_dwl_allocator 180 | if with_dwl_alloc != 'no': 181 | dwl_alloc_enabled = True 182 | auto_check = (with_dwl_alloc == 'auto') 183 | hantro_decoder_version = None 184 | 185 | if not conf.options.hantro_decoder_version: 186 | if auto_check: 187 | Logs.pprint('NORMAL', '--hantro-decoder-version is not set; disabling DWL allocator') 188 | dwl_alloc_enabled = False 189 | else: 190 | conf.fatal('--hantro-decoder-version is not set') 191 | if dwl_alloc_enabled: 192 | hantro_decoder_version = conf.options.hantro_decoder_version if conf.options.hantro_decoder_version in ['G1', 'G2'] else None 193 | if not hantro_decoder_version: 194 | conf.fatal('Invalid Hantro decoder version "%s" specified' % conf.options.hantro_decoder_version) 195 | Logs.pprint('NORMAL', 'Hantro decoder version: %s' % hantro_decoder_version) 196 | 197 | if dwl_alloc_enabled and not conf.options.hantro_headers_path: 198 | if auto_check: 199 | Logs.pprint('NORMAL', '--hantro-headers-path is not set; disabling DWL allocator') 200 | dwl_alloc_enabled = False 201 | else: 202 | conf.fatal('--hantro-headers-path is not set') 203 | Logs.pprint('NORMAL', 'Hantro headers path: %s' % conf.options.hantro_headers_path) 204 | 205 | if dwl_alloc_enabled: 206 | dwl_header_found = conf.check_cc(uselib_store = 'HANTRO', define_name = '', mandatory = False, includes = [conf.options.hantro_headers_path], header_name = 'dwl.h') 207 | if not dwl_header_found: 208 | if auto_check: 209 | Logs.pprint('NORMAL', 'Could not find dwl.h in path "%s" specified by --hantro-headers-path; disabling DWL allocator' % conf.options.hantro_headers_path) 210 | dwl_alloc_enabled = False 211 | else: 212 | conf.fatal('Could not find dwl.h in path "%s" specified by --hantro-headers-path' % conf.options.hantro_headers_path) 213 | 214 | if dwl_alloc_enabled and not conf.check_cc(uselib_store = 'RT', mandatory = True, lib = 'rt'): 215 | if auto_check: 216 | Logs.pprint('NORMAL', 'Could not find rt library; disabling DWL allocator') 217 | dwl_alloc_enabled = False 218 | else: 219 | conf.fatal('Could not find rt library') 220 | 221 | if dwl_alloc_enabled and not conf.check_cc(uselib_store = 'HANTRO', uselib = ['HANTRO', 'RT'], mandatory = True, lib = 'hantro'): 222 | if auto_check: 223 | Logs.pprint('NORMAL', 'Could not find hantro library; disabling DWL allocator') 224 | dwl_alloc_enabled = False 225 | else: 226 | conf.fatal('Could not find hantro library') 227 | 228 | if dwl_alloc_enabled: 229 | conf.define('IMXDMABUFFER_DWL_ALLOCATOR_ENABLED', 1) 230 | if hantro_decoder_version == 'G2': 231 | conf.define('IMXDMABUFFER_DWL_USE_CLIENT_TYPE_HEVC', 1) 232 | elif hantro_decoder_version == 'G1': 233 | conf.define('IMXDMABUFFER_DWL_USE_CLIENT_TYPE_H264', 1) 234 | else: 235 | conf.fatal('Internal configuration error - unknown Hantro decoder type') 236 | conf.env['EXTRA_USELIBS'] += ['HANTRO', 'RT'] 237 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_dwl_allocator.h'] 238 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_dwl_allocator.c'] 239 | 240 | 241 | # IPU allocator checks and flags 242 | with_ipu_alloc = conf.options.with_ipu_allocator 243 | if with_ipu_alloc != 'no': 244 | ipu_header_found = conf.check_cc(fragment = ''' 245 | #include 246 | #include 247 | #include 248 | #include 249 | 250 | int main() { return 0; } 251 | ''', 252 | uselib = 'IMXHEADERS', 253 | mandatory = False, 254 | execute = False, 255 | msg = 'checking for linux/fb.h and the IPU header linux/ipu.h' 256 | ) 257 | if ipu_header_found: 258 | conf.define('IMXDMABUFFER_IPU_ALLOCATOR_ENABLED', 1) 259 | conf.env['EXTRA_USELIBS'] += ['IMXHEADERS'] 260 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_ipu_allocator.h'] 261 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_ipu_allocator.c', 'imxdmabuffer/imxdmabuffer_ipu_priv.c'] 262 | else: 263 | if with_ipu_alloc == 'yes': 264 | conf.fatal('linux/fb.h and/or linux/ipu.h were not found in i.MX linux headers path') 265 | else: 266 | Logs.pprint('NORMAL', 'linux/fb.h and/or linux/ipu.h were not found in i.MX linux headers path; disabling IPU allocator') 267 | 268 | 269 | # G2D allocator checks and flags 270 | with_g2d_alloc = conf.options.with_g2d_allocator 271 | if with_g2d_alloc != 'no': 272 | g2d_libpath = [conf.options.g2d_libs] if conf.options.g2d_libs else [] 273 | g2d_includes = [conf.options.g2d_includes] if conf.options.g2d_includes else [] 274 | g2d_lib_found = conf.check_cc(mandatory = 0, libpath = g2d_libpath , lib = 'g2d' , uselib_store = 'IMXG2D') 275 | g2d_inc_found = conf.check_cc(mandatory = 0, define_name = '', includes = g2d_includes, header_name = 'g2d.h', uselib_store = 'IMXG2D') 276 | if g2d_lib_found and g2d_inc_found: 277 | conf.define('IMXDMABUFFER_G2D_ALLOCATOR_ENABLED', 1) 278 | conf.env['EXTRA_USELIBS'] += ['IMXG2D'] 279 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_g2d_allocator.h'] 280 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_g2d_allocator.c'] 281 | else: 282 | if with_g2d_alloc == 'yes': 283 | conf.fatal('G2D not found (library found: %d header found: %d)' % (g2d_lib_found != None, g2d_inc_found != None)) 284 | else: 285 | Logs.pprint('NORMAL', 'G2D not found (library found: %d header found: %d); disabling G2D allocator' % (g2d_lib_found != None, g2d_inc_found != None)) 286 | 287 | 288 | # PxP allocator checks and flags 289 | with_pxp_alloc = conf.options.with_pxp_allocator 290 | if with_pxp_alloc != 'no': 291 | pxp_header_found = conf.check_cc(fragment = ''' 292 | #include 293 | 294 | int main() { return 0; } 295 | ''', 296 | uselib = 'IMXHEADERS', 297 | mandatory = False, 298 | execute = False, 299 | msg = 'checking for linux/pxp_device.h' 300 | ) 301 | if pxp_header_found: 302 | conf.define('IMXDMABUFFER_PXP_ALLOCATOR_ENABLED', 1) 303 | conf.env['EXTRA_USELIBS'] += ['IMXHEADERS'] 304 | conf.env['EXTRA_HEADER_FILES'] += ['imxdmabuffer/imxdmabuffer_pxp_allocator.h'] 305 | conf.env['EXTRA_SOURCE_FILES'] += ['imxdmabuffer/imxdmabuffer_pxp_allocator.c'] 306 | else: 307 | if with_pxp_alloc == 'yes': 308 | conf.fatal('linux/pxp_device.h was not found in i.MX linux headers path') 309 | else: 310 | Logs.pprint('NORMAL', 'linux/pxp_device.h was not found in i.MX linux headers path; disabling PxP allocator') 311 | 312 | 313 | # Process the library version number 314 | version_node = conf.srcnode.find_node('VERSION') 315 | if not version_node: 316 | conf.fatal('Could not open VERSION file') 317 | with open(version_node.abspath()) as x: 318 | version = x.readline().splitlines()[0] 319 | conf.env['IMXDMABUFFER_VERSION'] = version 320 | conf.define('IMXDMABUFFER_VERSION', version) 321 | Logs.pprint('NORMAL', 'libimxdmabuffer version %s' % version) 322 | 323 | 324 | # Write the config header 325 | conf.write_config_header('imxdmabuffer_config.h') 326 | 327 | 328 | def build(bld): 329 | bld( 330 | features = ['c', 'cstlib' if bld.env['BUILD_STATIC'] else 'cshlib'], 331 | includes = ['.'], 332 | uselib = bld.env['EXTRA_USELIBS'], 333 | source = ['imxdmabuffer/imxdmabuffer.c'] + bld.env['EXTRA_SOURCE_FILES'], 334 | name = 'imxdmabuffer', 335 | target = 'imxdmabuffer', 336 | vnum = bld.env['IMXDMABUFFER_VERSION'], 337 | install_path = "${LIBDIR}" 338 | ) 339 | 340 | bld.install_files('${PREFIX}/include/imxdmabuffer/', ['imxdmabuffer_config.h', 'imxdmabuffer/imxdmabuffer.h', 'imxdmabuffer/imxdmabuffer_physaddr.h'] + bld.env['EXTRA_HEADER_FILES']) 341 | 342 | bld( 343 | features = ['subst'], 344 | source = "libimxdmabuffer.pc.in", 345 | target = "libimxdmabuffer.pc", 346 | install_path = "${LIBDIR}/pkgconfig" 347 | ) 348 | 349 | bld( 350 | features = ['c', 'cprogram'], 351 | includes = ['.'], 352 | use = 'imxdmabuffer', 353 | source = ['test/test-alloc.c'], 354 | target = 'test-alloc', 355 | install_path = None 356 | ) 357 | -------------------------------------------------------------------------------- /imxdmabuffer/imxdmabuffer_dma_heap_allocator.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | #include "imxdmabuffer.h" 15 | #include "imxdmabuffer_priv.h" 16 | #include "imxdmabuffer_dma_heap_allocator.h" 17 | 18 | 19 | /* XXX: Currently (2022-04-28), DMA-BUF heaps do not synchrnize properly in 20 | * NXP BSPs. Not even the DMA_BUF sync ioctl helps, and according to this commit: 21 | * https://source.codeaurora.org/external/imx/gst-plugins-base/commit/?h=MM_04.06.04_2112_L5.15.y&id=6dd8dc7566a76354e3283dc4e2dcecae817bf20e 22 | * it even cause issues (which are not documented, unfortunately). 23 | * 24 | * A workaround is to issue a DMA_BUF_IOCTL_PHYS ioctl, as seen here: 25 | * https://source.codeaurora.org/external/imx/gst-plugins-base/commit/?h=MM_04.06.04_2112_L5.15.y&id=d6ad337837085f8dc1ef29eae6844edbf3a3915f 26 | * This seems to sync CPU caches with DRAM. It is unclear if this will ever change, 27 | * so this workaround, along with the DMA_BUF sync, are kept in code blocks that 28 | * can be enabled/disabled with these #defines. */ 29 | //#define USE_DMA_BUF_SYNC_IOCTL 30 | #define USE_DMA_BUF_PHYS_SYNC_WORKAROUND 31 | 32 | 33 | typedef struct 34 | { 35 | ImxDmaBuffer parent; 36 | 37 | int dmabuf_fd; 38 | imx_physical_address_t physical_address; 39 | size_t size; 40 | uint8_t* mapped_virtual_address; 41 | unsigned int map_flags; 42 | 43 | int mapping_refcount; 44 | int sync_started; 45 | } 46 | ImxDmaBufferDmaHeapBuffer; 47 | 48 | 49 | typedef struct 50 | { 51 | ImxDmaBufferAllocator parent; 52 | int dma_heap_fd; 53 | int dma_heap_fd_is_internal; 54 | unsigned int heap_flags; 55 | unsigned int fd_flags; 56 | int is_cached; 57 | } 58 | ImxDmaBufferDmaHeapAllocator; 59 | 60 | 61 | static void imx_dma_buffer_dma_heap_allocator_destroy(ImxDmaBufferAllocator *allocator); 62 | static ImxDmaBuffer* imx_dma_buffer_dma_heap_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error); 63 | static void imx_dma_buffer_dma_heap_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 64 | static uint8_t* imx_dma_buffer_dma_heap_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error); 65 | static void imx_dma_buffer_dma_heap_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 66 | static void imx_dma_buffer_dma_heap_allocator_start_sync_session(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 67 | static void imx_dma_buffer_dma_heap_allocator_start_sync_session_impl(ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer); 68 | static void imx_dma_buffer_dma_heap_allocator_stop_sync_session(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 69 | static void imx_dma_buffer_dma_heap_allocator_stop_sync_session_impl(ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer); 70 | static imx_physical_address_t imx_dma_buffer_dma_heap_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 71 | static int imx_dma_buffer_dma_heap_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 72 | static size_t imx_dma_buffer_dma_heap_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer); 73 | 74 | 75 | static void imx_dma_buffer_dma_heap_allocator_destroy(ImxDmaBufferAllocator *allocator) 76 | { 77 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)allocator; 78 | 79 | assert(imx_dma_heap_allocator != NULL); 80 | 81 | if ((imx_dma_heap_allocator->dma_heap_fd > 0) && imx_dma_heap_allocator->dma_heap_fd_is_internal) 82 | { 83 | close(imx_dma_heap_allocator->dma_heap_fd); 84 | imx_dma_heap_allocator->dma_heap_fd = -1; 85 | } 86 | 87 | free(imx_dma_heap_allocator); 88 | } 89 | 90 | 91 | static ImxDmaBuffer* imx_dma_buffer_dma_heap_allocator_allocate(ImxDmaBufferAllocator *allocator, size_t size, size_t alignment, int *error) 92 | { 93 | int dmabuf_fd = -1; 94 | imx_physical_address_t physical_address; 95 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer; 96 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)allocator; 97 | 98 | IMX_DMA_BUFFER_UNUSED_PARAM(alignment); 99 | 100 | assert(imx_dma_heap_allocator != NULL); 101 | assert(imx_dma_heap_allocator->dma_heap_fd > 0); 102 | 103 | /* Perform the actual allocation. */ 104 | dmabuf_fd = imx_dma_buffer_dma_heap_allocate_dmabuf( 105 | imx_dma_heap_allocator->dma_heap_fd, 106 | size, 107 | imx_dma_heap_allocator->heap_flags, 108 | imx_dma_heap_allocator->fd_flags, 109 | error 110 | ); 111 | if (dmabuf_fd < 0) 112 | return NULL; 113 | 114 | /* Now that we've got the buffer, retrieve its physical address. */ 115 | physical_address = imx_dma_buffer_dma_heap_get_physical_address_from_dmabuf_fd(dmabuf_fd, error); 116 | if (physical_address == 0) 117 | { 118 | close(dmabuf_fd); 119 | return NULL; 120 | } 121 | 122 | /* Allocate system memory for the DMA buffer structure, and initialize its fields. */ 123 | imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)malloc(sizeof(ImxDmaBufferDmaHeapBuffer)); 124 | imx_dma_heap_buffer->parent.allocator = allocator; 125 | imx_dma_heap_buffer->dmabuf_fd = dmabuf_fd; 126 | imx_dma_heap_buffer->physical_address = physical_address; 127 | imx_dma_heap_buffer->size = size; 128 | imx_dma_heap_buffer->mapped_virtual_address = NULL; 129 | imx_dma_heap_buffer->mapping_refcount = 0; 130 | imx_dma_heap_buffer->sync_started = 0; 131 | 132 | return (ImxDmaBuffer *)imx_dma_heap_buffer; 133 | } 134 | 135 | 136 | static void imx_dma_buffer_dma_heap_allocator_deallocate(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 137 | { 138 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 139 | 140 | assert(imx_dma_heap_buffer != NULL); 141 | assert(imx_dma_heap_buffer->dmabuf_fd > 0); 142 | 143 | if (imx_dma_heap_buffer->mapped_virtual_address != NULL) 144 | { 145 | imx_dma_buffer_dma_heap_allocator_stop_sync_session(allocator, buffer); 146 | 147 | /* Set mapping_refcount to 1 to force an 148 | * imx_dma_buffer_dma_heap_allocator_unmap() to actually unmap the buffer. */ 149 | imx_dma_heap_buffer->mapping_refcount = 1; 150 | imx_dma_buffer_dma_heap_allocator_unmap(allocator, buffer); 151 | } 152 | 153 | close(imx_dma_heap_buffer->dmabuf_fd); 154 | free(imx_dma_heap_buffer); 155 | } 156 | 157 | 158 | static uint8_t* imx_dma_buffer_dma_heap_allocator_map(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer, unsigned int flags, int *error) 159 | { 160 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 161 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)allocator; 162 | 163 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 164 | 165 | assert(imx_dma_heap_buffer != NULL); 166 | assert(imx_dma_heap_buffer->dmabuf_fd > 0); 167 | 168 | if ((flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == 0) 169 | flags |= IMX_DMA_BUFFER_MAPPING_FLAG_READ | IMX_DMA_BUFFER_MAPPING_FLAG_WRITE; 170 | 171 | if (imx_dma_heap_buffer->mapped_virtual_address != NULL) 172 | { 173 | assert((imx_dma_heap_buffer->map_flags & flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK) == (flags & IMX_DMA_BUFFER_MAPPING_READWRITE_FLAG_MASK)); 174 | 175 | /* Buffer is already mapped. Just increment the 176 | * refcount and otherwise do nothing. */ 177 | imx_dma_heap_buffer->mapping_refcount++; 178 | } 179 | else 180 | { 181 | /* Buffer is not mapped yet. Call mmap() to perform 182 | * the memory mapping. */ 183 | 184 | int mmap_prot = 0; 185 | int mmap_flags = MAP_SHARED; 186 | void *virtual_address; 187 | 188 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? PROT_READ : 0; 189 | mmap_prot |= (flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? PROT_WRITE : 0; 190 | 191 | imx_dma_heap_buffer->map_flags = flags; 192 | 193 | virtual_address = mmap(0, imx_dma_heap_buffer->size, mmap_prot, mmap_flags, imx_dma_heap_buffer->dmabuf_fd, 0); 194 | if (virtual_address == MAP_FAILED) 195 | { 196 | if (error != NULL) 197 | *error = errno; 198 | } 199 | else 200 | { 201 | imx_dma_heap_buffer->mapping_refcount = 1; 202 | imx_dma_heap_buffer->mapped_virtual_address = virtual_address; 203 | } 204 | 205 | if (imx_dma_heap_allocator->is_cached && !(flags & IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC)) 206 | imx_dma_buffer_dma_heap_allocator_start_sync_session_impl(imx_dma_heap_buffer); 207 | } 208 | 209 | return imx_dma_heap_buffer->mapped_virtual_address; 210 | } 211 | 212 | 213 | static void imx_dma_buffer_dma_heap_allocator_unmap(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 214 | { 215 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 216 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)allocator; 217 | 218 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 219 | 220 | assert(imx_dma_heap_buffer != NULL); 221 | assert(imx_dma_heap_buffer->dmabuf_fd > 0); 222 | 223 | if (imx_dma_heap_buffer->mapped_virtual_address == NULL) 224 | return; 225 | 226 | imx_dma_heap_buffer->mapping_refcount--; 227 | if (imx_dma_heap_buffer->mapping_refcount != 0) 228 | return; 229 | 230 | if (imx_dma_heap_allocator->is_cached && !(imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC)) 231 | imx_dma_buffer_dma_heap_allocator_stop_sync_session_impl(imx_dma_heap_buffer); 232 | 233 | munmap((void *)(imx_dma_heap_buffer->mapped_virtual_address), imx_dma_heap_buffer->size); 234 | imx_dma_heap_buffer->mapped_virtual_address = NULL; 235 | } 236 | 237 | 238 | static void imx_dma_buffer_dma_heap_allocator_start_sync_session(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 239 | { 240 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 241 | 242 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 243 | 244 | if (imx_dma_heap_buffer->sync_started) 245 | return; 246 | if (!(imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC)) 247 | return; 248 | 249 | imx_dma_buffer_dma_heap_allocator_start_sync_session_impl(imx_dma_heap_buffer); 250 | } 251 | 252 | 253 | static void imx_dma_buffer_dma_heap_allocator_start_sync_session_impl(ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer) 254 | { 255 | #ifdef USE_DMA_BUF_SYNC_IOCTL 256 | { 257 | struct dma_buf_sync dmabuf_sync; 258 | memset(&dmabuf_sync, 0, sizeof(dmabuf_sync)); 259 | dmabuf_sync.flags = DMA_BUF_SYNC_START; 260 | dmabuf_sync.flags |= (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? DMA_BUF_SYNC_READ : 0; 261 | dmabuf_sync.flags |= (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? DMA_BUF_SYNC_WRITE : 0; 262 | 263 | ioctl(imx_dma_heap_buffer->dmabuf_fd, DMA_BUF_IOCTL_SYNC, &dmabuf_sync); 264 | } 265 | #endif 266 | 267 | #ifdef USE_DMA_BUF_PHYS_SYNC_WORKAROUND 268 | /* Use the DMA_BUF_IOCTL_PHYS here to force the CPU cache to 269 | * be repopulated with the contents of the actual memory block. 270 | * Otherwise, CPU read operations might use stale cached data. */ 271 | if (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) 272 | { 273 | struct dma_buf_phys dma_phys; 274 | ioctl(imx_dma_heap_buffer->dmabuf_fd, DMA_BUF_IOCTL_PHYS, &dma_phys); 275 | } 276 | #endif 277 | 278 | imx_dma_heap_buffer->sync_started = 1; 279 | } 280 | 281 | 282 | static void imx_dma_buffer_dma_heap_allocator_stop_sync_session(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 283 | { 284 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 285 | 286 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 287 | 288 | assert(imx_dma_heap_buffer->mapped_virtual_address != 0); 289 | 290 | if (!imx_dma_heap_buffer->sync_started) 291 | return; 292 | if (!(imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_MANUAL_SYNC)) 293 | return; 294 | 295 | imx_dma_buffer_dma_heap_allocator_stop_sync_session_impl(imx_dma_heap_buffer); 296 | } 297 | 298 | 299 | static void imx_dma_buffer_dma_heap_allocator_stop_sync_session_impl(ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer) 300 | { 301 | #ifdef USE_DMA_BUF_SYNC_IOCTL 302 | { 303 | struct dma_buf_sync dmabuf_sync; 304 | memset(&dmabuf_sync, 0, sizeof(dmabuf_sync)); 305 | dmabuf_sync.flags = DMA_BUF_SYNC_END; 306 | dmabuf_sync.flags |= (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_READ) ? DMA_BUF_SYNC_READ : 0; 307 | dmabuf_sync.flags |= (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) ? DMA_BUF_SYNC_WRITE : 0; 308 | 309 | ioctl(imx_dma_heap_buffer->dmabuf_fd, DMA_BUF_IOCTL_SYNC, &dmabuf_sync); 310 | } 311 | #endif 312 | 313 | #ifdef USE_DMA_BUF_PHYS_SYNC_WORKAROUND 314 | /* Use the DMA_BUF_IOCTL_PHYS here to force the CPU cache to be 315 | * written to the actual memory block. Otherwise, device DMA 316 | * access to memory may not use the data the CPU just wrote. */ 317 | if (imx_dma_heap_buffer->map_flags & IMX_DMA_BUFFER_MAPPING_FLAG_WRITE) 318 | { 319 | struct dma_buf_phys dma_phys; 320 | ioctl(imx_dma_heap_buffer->dmabuf_fd, DMA_BUF_IOCTL_PHYS, &dma_phys); 321 | } 322 | #endif 323 | 324 | imx_dma_heap_buffer->sync_started = 0; 325 | } 326 | 327 | 328 | static imx_physical_address_t imx_dma_buffer_dma_heap_allocator_get_physical_address(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 329 | { 330 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 331 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 332 | assert(imx_dma_heap_buffer != NULL); 333 | return imx_dma_heap_buffer->physical_address; 334 | } 335 | 336 | 337 | static int imx_dma_buffer_dma_heap_allocator_get_fd(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 338 | { 339 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 340 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 341 | assert(imx_dma_heap_buffer != NULL); 342 | return imx_dma_heap_buffer->dmabuf_fd; 343 | } 344 | 345 | 346 | static size_t imx_dma_buffer_dma_heap_allocator_get_size(ImxDmaBufferAllocator *allocator, ImxDmaBuffer *buffer) 347 | { 348 | ImxDmaBufferDmaHeapBuffer *imx_dma_heap_buffer = (ImxDmaBufferDmaHeapBuffer *)buffer; 349 | IMX_DMA_BUFFER_UNUSED_PARAM(allocator); 350 | assert(imx_dma_heap_buffer != NULL); 351 | return imx_dma_heap_buffer->size; 352 | } 353 | 354 | 355 | char const * IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_DMA_HEAP_NODE = "/dev/dma_heap/linux,cma"; 356 | unsigned int const IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_HEAP_FLAGS = DMA_HEAP_VALID_HEAP_FLAGS; 357 | unsigned int const IMX_DMA_BUFFER_DMA_HEAP_ALLOCATOR_DEFAULT_FD_FLAGS = (O_RDWR | O_CLOEXEC); 358 | 359 | 360 | ImxDmaBufferAllocator* imx_dma_buffer_dma_heap_allocator_new( 361 | int dma_heap_fd, 362 | unsigned int heap_flags, 363 | unsigned int fd_flags, 364 | int *error 365 | ) 366 | { 367 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator; 368 | 369 | imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)malloc(sizeof(ImxDmaBufferDmaHeapAllocator)); 370 | imx_dma_heap_allocator->parent.destroy = imx_dma_buffer_dma_heap_allocator_destroy; 371 | imx_dma_heap_allocator->parent.allocate = imx_dma_buffer_dma_heap_allocator_allocate; 372 | imx_dma_heap_allocator->parent.deallocate = imx_dma_buffer_dma_heap_allocator_deallocate; 373 | imx_dma_heap_allocator->parent.map = imx_dma_buffer_dma_heap_allocator_map; 374 | imx_dma_heap_allocator->parent.unmap = imx_dma_buffer_dma_heap_allocator_unmap; 375 | imx_dma_heap_allocator->parent.get_physical_address = imx_dma_buffer_dma_heap_allocator_get_physical_address; 376 | imx_dma_heap_allocator->parent.get_fd = imx_dma_buffer_dma_heap_allocator_get_fd; 377 | imx_dma_heap_allocator->parent.get_size = imx_dma_buffer_dma_heap_allocator_get_size; 378 | imx_dma_heap_allocator->dma_heap_fd = dma_heap_fd; 379 | imx_dma_heap_allocator->dma_heap_fd_is_internal = (dma_heap_fd < 0); 380 | imx_dma_heap_allocator->heap_flags = heap_flags; 381 | imx_dma_heap_allocator->fd_flags = fd_flags; 382 | 383 | #ifdef IMXDMABUFFER_DMA_HEAP_ALLOCATES_UNCACHED_MEMORY 384 | imx_dma_heap_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 385 | imx_dma_heap_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 386 | imx_dma_heap_allocator->is_cached = 0; 387 | #else 388 | imx_dma_heap_allocator->parent.start_sync_session = imx_dma_buffer_dma_heap_allocator_start_sync_session; 389 | imx_dma_heap_allocator->parent.stop_sync_session = imx_dma_buffer_dma_heap_allocator_stop_sync_session; 390 | imx_dma_heap_allocator->is_cached = 1; 391 | #endif 392 | 393 | if (dma_heap_fd < 0) 394 | { 395 | imx_dma_heap_allocator->dma_heap_fd = open(IMXDMABUFFER_DMA_HEAP_DEVICE_NODE_PATH, O_RDWR); 396 | if (imx_dma_heap_allocator->dma_heap_fd < 0) 397 | { 398 | if (error != NULL) 399 | *error = errno; 400 | free(imx_dma_heap_allocator); 401 | return NULL; 402 | } 403 | } 404 | 405 | return (ImxDmaBufferAllocator*)imx_dma_heap_allocator; 406 | } 407 | 408 | 409 | ImxDmaBufferAllocator* imx_dma_buffer_dma_heap_allocator_new_from_fd( 410 | int dma_heap_fd, 411 | unsigned int heap_flags, 412 | unsigned int fd_flags, 413 | int is_cached_memory_heap 414 | ) 415 | { 416 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator; 417 | 418 | assert(dma_heap_fd > 0); 419 | 420 | imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)malloc(sizeof(ImxDmaBufferDmaHeapAllocator)); 421 | imx_dma_heap_allocator->parent.destroy = imx_dma_buffer_dma_heap_allocator_destroy; 422 | imx_dma_heap_allocator->parent.allocate = imx_dma_buffer_dma_heap_allocator_allocate; 423 | imx_dma_heap_allocator->parent.deallocate = imx_dma_buffer_dma_heap_allocator_deallocate; 424 | imx_dma_heap_allocator->parent.map = imx_dma_buffer_dma_heap_allocator_map; 425 | imx_dma_heap_allocator->parent.unmap = imx_dma_buffer_dma_heap_allocator_unmap; 426 | imx_dma_heap_allocator->parent.get_physical_address = imx_dma_buffer_dma_heap_allocator_get_physical_address; 427 | imx_dma_heap_allocator->parent.get_fd = imx_dma_buffer_dma_heap_allocator_get_fd; 428 | imx_dma_heap_allocator->parent.get_size = imx_dma_buffer_dma_heap_allocator_get_size; 429 | imx_dma_heap_allocator->dma_heap_fd = dma_heap_fd; 430 | imx_dma_heap_allocator->dma_heap_fd_is_internal = 0; 431 | imx_dma_heap_allocator->heap_flags = heap_flags; 432 | imx_dma_heap_allocator->fd_flags = fd_flags; 433 | imx_dma_heap_allocator->is_cached = !!is_cached_memory_heap; 434 | 435 | if (is_cached_memory_heap) 436 | { 437 | imx_dma_heap_allocator->parent.start_sync_session = imx_dma_buffer_dma_heap_allocator_start_sync_session; 438 | imx_dma_heap_allocator->parent.stop_sync_session = imx_dma_buffer_dma_heap_allocator_stop_sync_session; 439 | } 440 | else 441 | { 442 | imx_dma_heap_allocator->parent.start_sync_session = imx_dma_buffer_noop_start_sync_session_func; 443 | imx_dma_heap_allocator->parent.stop_sync_session = imx_dma_buffer_noop_stop_sync_session_func; 444 | } 445 | 446 | return (ImxDmaBufferAllocator*)imx_dma_heap_allocator; 447 | } 448 | 449 | 450 | int imx_dma_buffer_dma_heap_allocator_get_dma_heap_fd(ImxDmaBufferAllocator *allocator) 451 | { 452 | ImxDmaBufferDmaHeapAllocator *imx_dma_heap_allocator = (ImxDmaBufferDmaHeapAllocator *)allocator; 453 | return imx_dma_heap_allocator->dma_heap_fd; 454 | } 455 | 456 | 457 | int imx_dma_buffer_dma_heap_allocate_dmabuf( 458 | int dma_heap_fd, 459 | size_t size, 460 | unsigned int heap_flags, 461 | unsigned int fd_flags, 462 | int *error 463 | ) 464 | { 465 | int dmabuf_fd = -1; 466 | struct dma_heap_allocation_data heap_alloc_data; 467 | 468 | assert(dma_heap_fd > 0); 469 | assert(size > 0); 470 | 471 | memset(&heap_alloc_data, 0, sizeof(heap_alloc_data)); 472 | 473 | heap_alloc_data.len = size; 474 | heap_alloc_data.heap_flags = heap_flags; 475 | heap_alloc_data.fd_flags = fd_flags; 476 | 477 | if (ioctl(dma_heap_fd, DMA_HEAP_IOCTL_ALLOC, &heap_alloc_data) < 0) 478 | { 479 | if (error != NULL) 480 | *error = errno; 481 | goto finish; 482 | } 483 | 484 | dmabuf_fd = heap_alloc_data.fd; 485 | 486 | finish: 487 | return dmabuf_fd; 488 | } 489 | 490 | 491 | imx_physical_address_t imx_dma_buffer_dma_heap_get_physical_address_from_dmabuf_fd(int dmabuf_fd, int *error) 492 | { 493 | struct dma_buf_phys dma_phys; 494 | 495 | assert(dmabuf_fd > 0); 496 | 497 | if (ioctl(dmabuf_fd, DMA_BUF_IOCTL_PHYS, &dma_phys) < 0) 498 | { 499 | if (error != NULL) 500 | *error = errno; 501 | return 0; 502 | } 503 | 504 | return (imx_physical_address_t)(dma_phys.phys); 505 | } 506 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 2.1, February 1999 3 | 4 | Copyright (C) 1991, 1999 Free Software Foundation, Inc. 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | (This is the first released version of the Lesser GPL. It also counts 10 | as the successor of the GNU Library Public License, version 2, hence 11 | the version number 2.1.) 12 | 13 | Preamble 14 | 15 | The licenses for most software are designed to take away your 16 | freedom to share and change it. By contrast, the GNU General Public 17 | Licenses are intended to guarantee your freedom to share and change 18 | free software--to make sure the software is free for all its users. 19 | 20 | This license, the Lesser General Public License, applies to some 21 | specially designated software packages--typically libraries--of the 22 | Free Software Foundation and other authors who decide to use it. You 23 | can use it too, but we suggest you first think carefully about whether 24 | this license or the ordinary General Public License is the better 25 | strategy to use in any particular case, based on the explanations below. 26 | 27 | When we speak of free software, we are referring to freedom of use, 28 | not price. Our General Public Licenses are designed to make sure that 29 | you have the freedom to distribute copies of free software (and charge 30 | for this service if you wish); that you receive source code or can get 31 | it if you want it; that you can change the software and use pieces of 32 | it in new free programs; and that you are informed that you can do 33 | these things. 34 | 35 | To protect your rights, we need to make restrictions that forbid 36 | distributors to deny you these rights or to ask you to surrender these 37 | rights. These restrictions translate to certain responsibilities for 38 | you if you distribute copies of the library or if you modify it. 39 | 40 | For example, if you distribute copies of the library, whether gratis 41 | or for a fee, you must give the recipients all the rights that we gave 42 | you. You must make sure that they, too, receive or can get the source 43 | code. If you link other code with the library, you must provide 44 | complete object files to the recipients, so that they can relink them 45 | with the library after making changes to the library and recompiling 46 | it. And you must show them these terms so they know their rights. 47 | 48 | We protect your rights with a two-step method: (1) we copyright the 49 | library, and (2) we offer you this license, which gives you legal 50 | permission to copy, distribute and/or modify the library. 51 | 52 | To protect each distributor, we want to make it very clear that 53 | there is no warranty for the free library. Also, if the library is 54 | modified by someone else and passed on, the recipients should know 55 | that what they have is not the original version, so that the original 56 | author's reputation will not be affected by problems that might be 57 | introduced by others. 58 | 59 | Finally, software patents pose a constant threat to the existence of 60 | any free program. We wish to make sure that a company cannot 61 | effectively restrict the users of a free program by obtaining a 62 | restrictive license from a patent holder. Therefore, we insist that 63 | any patent license obtained for a version of the library must be 64 | consistent with the full freedom of use specified in this license. 65 | 66 | Most GNU software, including some libraries, is covered by the 67 | ordinary GNU General Public License. This license, the GNU Lesser 68 | General Public License, applies to certain designated libraries, and 69 | is quite different from the ordinary General Public License. We use 70 | this license for certain libraries in order to permit linking those 71 | libraries into non-free programs. 72 | 73 | When a program is linked with a library, whether statically or using 74 | a shared library, the combination of the two is legally speaking a 75 | combined work, a derivative of the original library. The ordinary 76 | General Public License therefore permits such linking only if the 77 | entire combination fits its criteria of freedom. The Lesser General 78 | Public License permits more lax criteria for linking other code with 79 | the library. 80 | 81 | We call this license the "Lesser" General Public License because it 82 | does Less to protect the user's freedom than the ordinary General 83 | Public License. It also provides other free software developers Less 84 | of an advantage over competing non-free programs. These disadvantages 85 | are the reason we use the ordinary General Public License for many 86 | libraries. However, the Lesser license provides advantages in certain 87 | special circumstances. 88 | 89 | For example, on rare occasions, there may be a special need to 90 | encourage the widest possible use of a certain library, so that it becomes 91 | a de-facto standard. To achieve this, non-free programs must be 92 | allowed to use the library. A more frequent case is that a free 93 | library does the same job as widely used non-free libraries. In this 94 | case, there is little to gain by limiting the free library to free 95 | software only, so we use the Lesser General Public License. 96 | 97 | In other cases, permission to use a particular library in non-free 98 | programs enables a greater number of people to use a large body of 99 | free software. For example, permission to use the GNU C Library in 100 | non-free programs enables many more people to use the whole GNU 101 | operating system, as well as its variant, the GNU/Linux operating 102 | system. 103 | 104 | Although the Lesser General Public License is Less protective of the 105 | users' freedom, it does ensure that the user of a program that is 106 | linked with the Library has the freedom and the wherewithal to run 107 | that program using a modified version of the Library. 108 | 109 | The precise terms and conditions for copying, distribution and 110 | modification follow. Pay close attention to the difference between a 111 | "work based on the library" and a "work that uses the library". The 112 | former contains code derived from the library, whereas the latter must 113 | be combined with the library in order to run. 114 | 115 | GNU LESSER GENERAL PUBLIC LICENSE 116 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 117 | 118 | 0. This License Agreement applies to any software library or other 119 | program which contains a notice placed by the copyright holder or 120 | other authorized party saying it may be distributed under the terms of 121 | this Lesser General Public License (also called "this License"). 122 | Each licensee is addressed as "you". 123 | 124 | A "library" means a collection of software functions and/or data 125 | prepared so as to be conveniently linked with application programs 126 | (which use some of those functions and data) to form executables. 127 | 128 | The "Library", below, refers to any such software library or work 129 | which has been distributed under these terms. A "work based on the 130 | Library" means either the Library or any derivative work under 131 | copyright law: that is to say, a work containing the Library or a 132 | portion of it, either verbatim or with modifications and/or translated 133 | straightforwardly into another language. (Hereinafter, translation is 134 | included without limitation in the term "modification".) 135 | 136 | "Source code" for a work means the preferred form of the work for 137 | making modifications to it. For a library, complete source code means 138 | all the source code for all modules it contains, plus any associated 139 | interface definition files, plus the scripts used to control compilation 140 | and installation of the library. 141 | 142 | Activities other than copying, distribution and modification are not 143 | covered by this License; they are outside its scope. The act of 144 | running a program using the Library is not restricted, and output from 145 | such a program is covered only if its contents constitute a work based 146 | on the Library (independent of the use of the Library in a tool for 147 | writing it). Whether that is true depends on what the Library does 148 | and what the program that uses the Library does. 149 | 150 | 1. You may copy and distribute verbatim copies of the Library's 151 | complete source code as you receive it, in any medium, provided that 152 | you conspicuously and appropriately publish on each copy an 153 | appropriate copyright notice and disclaimer of warranty; keep intact 154 | all the notices that refer to this License and to the absence of any 155 | warranty; and distribute a copy of this License along with the 156 | Library. 157 | 158 | You may charge a fee for the physical act of transferring a copy, 159 | and you may at your option offer warranty protection in exchange for a 160 | fee. 161 | 162 | 2. You may modify your copy or copies of the Library or any portion 163 | of it, thus forming a work based on the Library, and copy and 164 | distribute such modifications or work under the terms of Section 1 165 | above, provided that you also meet all of these conditions: 166 | 167 | a) The modified work must itself be a software library. 168 | 169 | b) You must cause the files modified to carry prominent notices 170 | stating that you changed the files and the date of any change. 171 | 172 | c) You must cause the whole of the work to be licensed at no 173 | charge to all third parties under the terms of this License. 174 | 175 | d) If a facility in the modified Library refers to a function or a 176 | table of data to be supplied by an application program that uses 177 | the facility, other than as an argument passed when the facility 178 | is invoked, then you must make a good faith effort to ensure that, 179 | in the event an application does not supply such function or 180 | table, the facility still operates, and performs whatever part of 181 | its purpose remains meaningful. 182 | 183 | (For example, a function in a library to compute square roots has 184 | a purpose that is entirely well-defined independent of the 185 | application. Therefore, Subsection 2d requires that any 186 | application-supplied function or table used by this function must 187 | be optional: if the application does not supply it, the square 188 | root function must still compute square roots.) 189 | 190 | These requirements apply to the modified work as a whole. If 191 | identifiable sections of that work are not derived from the Library, 192 | and can be reasonably considered independent and separate works in 193 | themselves, then this License, and its terms, do not apply to those 194 | sections when you distribute them as separate works. But when you 195 | distribute the same sections as part of a whole which is a work based 196 | on the Library, the distribution of the whole must be on the terms of 197 | this License, whose permissions for other licensees extend to the 198 | entire whole, and thus to each and every part regardless of who wrote 199 | it. 200 | 201 | Thus, it is not the intent of this section to claim rights or contest 202 | your rights to work written entirely by you; rather, the intent is to 203 | exercise the right to control the distribution of derivative or 204 | collective works based on the Library. 205 | 206 | In addition, mere aggregation of another work not based on the Library 207 | with the Library (or with a work based on the Library) on a volume of 208 | a storage or distribution medium does not bring the other work under 209 | the scope of this License. 210 | 211 | 3. You may opt to apply the terms of the ordinary GNU General Public 212 | License instead of this License to a given copy of the Library. To do 213 | this, you must alter all the notices that refer to this License, so 214 | that they refer to the ordinary GNU General Public License, version 2, 215 | instead of to this License. (If a newer version than version 2 of the 216 | ordinary GNU General Public License has appeared, then you can specify 217 | that version instead if you wish.) Do not make any other change in 218 | these notices. 219 | 220 | Once this change is made in a given copy, it is irreversible for 221 | that copy, so the ordinary GNU General Public License applies to all 222 | subsequent copies and derivative works made from that copy. 223 | 224 | This option is useful when you wish to copy part of the code of 225 | the Library into a program that is not a library. 226 | 227 | 4. You may copy and distribute the Library (or a portion or 228 | derivative of it, under Section 2) in object code or executable form 229 | under the terms of Sections 1 and 2 above provided that you accompany 230 | it with the complete corresponding machine-readable source code, which 231 | must be distributed under the terms of Sections 1 and 2 above on a 232 | medium customarily used for software interchange. 233 | 234 | If distribution of object code is made by offering access to copy 235 | from a designated place, then offering equivalent access to copy the 236 | source code from the same place satisfies the requirement to 237 | distribute the source code, even though third parties are not 238 | compelled to copy the source along with the object code. 239 | 240 | 5. A program that contains no derivative of any portion of the 241 | Library, but is designed to work with the Library by being compiled or 242 | linked with it, is called a "work that uses the Library". Such a 243 | work, in isolation, is not a derivative work of the Library, and 244 | therefore falls outside the scope of this License. 245 | 246 | However, linking a "work that uses the Library" with the Library 247 | creates an executable that is a derivative of the Library (because it 248 | contains portions of the Library), rather than a "work that uses the 249 | library". The executable is therefore covered by this License. 250 | Section 6 states terms for distribution of such executables. 251 | 252 | When a "work that uses the Library" uses material from a header file 253 | that is part of the Library, the object code for the work may be a 254 | derivative work of the Library even though the source code is not. 255 | Whether this is true is especially significant if the work can be 256 | linked without the Library, or if the work is itself a library. The 257 | threshold for this to be true is not precisely defined by law. 258 | 259 | If such an object file uses only numerical parameters, data 260 | structure layouts and accessors, and small macros and small inline 261 | functions (ten lines or less in length), then the use of the object 262 | file is unrestricted, regardless of whether it is legally a derivative 263 | work. (Executables containing this object code plus portions of the 264 | Library will still fall under Section 6.) 265 | 266 | Otherwise, if the work is a derivative of the Library, you may 267 | distribute the object code for the work under the terms of Section 6. 268 | Any executables containing that work also fall under Section 6, 269 | whether or not they are linked directly with the Library itself. 270 | 271 | 6. As an exception to the Sections above, you may also combine or 272 | link a "work that uses the Library" with the Library to produce a 273 | work containing portions of the Library, and distribute that work 274 | under terms of your choice, provided that the terms permit 275 | modification of the work for the customer's own use and reverse 276 | engineering for debugging such modifications. 277 | 278 | You must give prominent notice with each copy of the work that the 279 | Library is used in it and that the Library and its use are covered by 280 | this License. You must supply a copy of this License. If the work 281 | during execution displays copyright notices, you must include the 282 | copyright notice for the Library among them, as well as a reference 283 | directing the user to the copy of this License. Also, you must do one 284 | of these things: 285 | 286 | a) Accompany the work with the complete corresponding 287 | machine-readable source code for the Library including whatever 288 | changes were used in the work (which must be distributed under 289 | Sections 1 and 2 above); and, if the work is an executable linked 290 | with the Library, with the complete machine-readable "work that 291 | uses the Library", as object code and/or source code, so that the 292 | user can modify the Library and then relink to produce a modified 293 | executable containing the modified Library. (It is understood 294 | that the user who changes the contents of definitions files in the 295 | Library will not necessarily be able to recompile the application 296 | to use the modified definitions.) 297 | 298 | b) Use a suitable shared library mechanism for linking with the 299 | Library. A suitable mechanism is one that (1) uses at run time a 300 | copy of the library already present on the user's computer system, 301 | rather than copying library functions into the executable, and (2) 302 | will operate properly with a modified version of the library, if 303 | the user installs one, as long as the modified version is 304 | interface-compatible with the version that the work was made with. 305 | 306 | c) Accompany the work with a written offer, valid for at 307 | least three years, to give the same user the materials 308 | specified in Subsection 6a, above, for a charge no more 309 | than the cost of performing this distribution. 310 | 311 | d) If distribution of the work is made by offering access to copy 312 | from a designated place, offer equivalent access to copy the above 313 | specified materials from the same place. 314 | 315 | e) Verify that the user has already received a copy of these 316 | materials or that you have already sent this user a copy. 317 | 318 | For an executable, the required form of the "work that uses the 319 | Library" must include any data and utility programs needed for 320 | reproducing the executable from it. However, as a special exception, 321 | the materials to be distributed need not include anything that is 322 | normally distributed (in either source or binary form) with the major 323 | components (compiler, kernel, and so on) of the operating system on 324 | which the executable runs, unless that component itself accompanies 325 | the executable. 326 | 327 | It may happen that this requirement contradicts the license 328 | restrictions of other proprietary libraries that do not normally 329 | accompany the operating system. Such a contradiction means you cannot 330 | use both them and the Library together in an executable that you 331 | distribute. 332 | 333 | 7. You may place library facilities that are a work based on the 334 | Library side-by-side in a single library together with other library 335 | facilities not covered by this License, and distribute such a combined 336 | library, provided that the separate distribution of the work based on 337 | the Library and of the other library facilities is otherwise 338 | permitted, and provided that you do these two things: 339 | 340 | a) Accompany the combined library with a copy of the same work 341 | based on the Library, uncombined with any other library 342 | facilities. This must be distributed under the terms of the 343 | Sections above. 344 | 345 | b) Give prominent notice with the combined library of the fact 346 | that part of it is a work based on the Library, and explaining 347 | where to find the accompanying uncombined form of the same work. 348 | 349 | 8. You may not copy, modify, sublicense, link with, or distribute 350 | the Library except as expressly provided under this License. Any 351 | attempt otherwise to copy, modify, sublicense, link with, or 352 | distribute the Library is void, and will automatically terminate your 353 | rights under this License. However, parties who have received copies, 354 | or rights, from you under this License will not have their licenses 355 | terminated so long as such parties remain in full compliance. 356 | 357 | 9. You are not required to accept this License, since you have not 358 | signed it. However, nothing else grants you permission to modify or 359 | distribute the Library or its derivative works. These actions are 360 | prohibited by law if you do not accept this License. Therefore, by 361 | modifying or distributing the Library (or any work based on the 362 | Library), you indicate your acceptance of this License to do so, and 363 | all its terms and conditions for copying, distributing or modifying 364 | the Library or works based on it. 365 | 366 | 10. Each time you redistribute the Library (or any work based on the 367 | Library), the recipient automatically receives a license from the 368 | original licensor to copy, distribute, link with or modify the Library 369 | subject to these terms and conditions. You may not impose any further 370 | restrictions on the recipients' exercise of the rights granted herein. 371 | You are not responsible for enforcing compliance by third parties with 372 | this License. 373 | 374 | 11. If, as a consequence of a court judgment or allegation of patent 375 | infringement or for any other reason (not limited to patent issues), 376 | conditions are imposed on you (whether by court order, agreement or 377 | otherwise) that contradict the conditions of this License, they do not 378 | excuse you from the conditions of this License. If you cannot 379 | distribute so as to satisfy simultaneously your obligations under this 380 | License and any other pertinent obligations, then as a consequence you 381 | may not distribute the Library at all. For example, if a patent 382 | license would not permit royalty-free redistribution of the Library by 383 | all those who receive copies directly or indirectly through you, then 384 | the only way you could satisfy both it and this License would be to 385 | refrain entirely from distribution of the Library. 386 | 387 | If any portion of this section is held invalid or unenforceable under any 388 | particular circumstance, the balance of the section is intended to apply, 389 | and the section as a whole is intended to apply in other circumstances. 390 | 391 | It is not the purpose of this section to induce you to infringe any 392 | patents or other property right claims or to contest validity of any 393 | such claims; this section has the sole purpose of protecting the 394 | integrity of the free software distribution system which is 395 | implemented by public license practices. Many people have made 396 | generous contributions to the wide range of software distributed 397 | through that system in reliance on consistent application of that 398 | system; it is up to the author/donor to decide if he or she is willing 399 | to distribute software through any other system and a licensee cannot 400 | impose that choice. 401 | 402 | This section is intended to make thoroughly clear what is believed to 403 | be a consequence of the rest of this License. 404 | 405 | 12. If the distribution and/or use of the Library is restricted in 406 | certain countries either by patents or by copyrighted interfaces, the 407 | original copyright holder who places the Library under this License may add 408 | an explicit geographical distribution limitation excluding those countries, 409 | so that distribution is permitted only in or among countries not thus 410 | excluded. In such case, this License incorporates the limitation as if 411 | written in the body of this License. 412 | 413 | 13. The Free Software Foundation may publish revised and/or new 414 | versions of the Lesser General Public License from time to time. 415 | Such new versions will be similar in spirit to the present version, 416 | but may differ in detail to address new problems or concerns. 417 | 418 | Each version is given a distinguishing version number. If the Library 419 | specifies a version number of this License which applies to it and 420 | "any later version", you have the option of following the terms and 421 | conditions either of that version or of any later version published by 422 | the Free Software Foundation. If the Library does not specify a 423 | license version number, you may choose any version ever published by 424 | the Free Software Foundation. 425 | 426 | 14. If you wish to incorporate parts of the Library into other free 427 | programs whose distribution conditions are incompatible with these, 428 | write to the author to ask for permission. For software which is 429 | copyrighted by the Free Software Foundation, write to the Free 430 | Software Foundation; we sometimes make exceptions for this. Our 431 | decision will be guided by the two goals of preserving the free status 432 | of all derivatives of our free software and of promoting the sharing 433 | and reuse of software generally. 434 | 435 | NO WARRANTY 436 | 437 | 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO 438 | WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. 439 | EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR 440 | OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY 441 | KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE 442 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 443 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE 444 | LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME 445 | THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 446 | 447 | 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN 448 | WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY 449 | AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU 450 | FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR 451 | CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE 452 | LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING 453 | RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A 454 | FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF 455 | SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH 456 | DAMAGES. 457 | 458 | END OF TERMS AND CONDITIONS 459 | 460 | How to Apply These Terms to Your New Libraries 461 | 462 | If you develop a new library, and you want it to be of the greatest 463 | possible use to the public, we recommend making it free software that 464 | everyone can redistribute and change. You can do so by permitting 465 | redistribution under these terms (or, alternatively, under the terms of the 466 | ordinary General Public License). 467 | 468 | To apply these terms, attach the following notices to the library. It is 469 | safest to attach them to the start of each source file to most effectively 470 | convey the exclusion of warranty; and each file should have at least the 471 | "copyright" line and a pointer to where the full notice is found. 472 | 473 | {description} 474 | Copyright (C) {year} {fullname} 475 | 476 | This library is free software; you can redistribute it and/or 477 | modify it under the terms of the GNU Lesser General Public 478 | License as published by the Free Software Foundation; either 479 | version 2.1 of the License, or (at your option) any later version. 480 | 481 | This library is distributed in the hope that it will be useful, 482 | but WITHOUT ANY WARRANTY; without even the implied warranty of 483 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 484 | Lesser General Public License for more details. 485 | 486 | You should have received a copy of the GNU Lesser General Public 487 | License along with this library; if not, write to the Free Software 488 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 489 | USA 490 | 491 | Also add information on how to contact you by electronic and paper mail. 492 | 493 | You should also get your employer (if you work as a programmer) or your 494 | school, if any, to sign a "copyright disclaimer" for the library, if 495 | necessary. Here is a sample; alter the names: 496 | 497 | Yoyodyne, Inc., hereby disclaims all copyright interest in the 498 | library `Frob' (a library for tweaking knobs) written by James Random 499 | Hacker. 500 | 501 | {signature of Ty Coon}, 1 April 1990 502 | Ty Coon, President of Vice 503 | 504 | That's all there is to it! --------------------------------------------------------------------------------