├── README
├── nvsci_mm
├── Makefile
├── README
├── inc
│ └── uapi
│ │ └── linux
│ │ └── nvsci_mm.h
├── nvsci_mm.c
└── system_heap.c
└── nvsciipc
├── Makefile
├── README
├── linux
└── nvsciipc_interface.h
├── nvsciipc.c
├── nvsciipc.h
└── uapi
└── linux
└── nvsciipc_ioctl.h
/README:
--------------------------------------------------------------------------------
1 | This repo contains the following kernel modules:
2 | - Name: NvSci Memory Management Kernel Driver
3 | Version: 1.0
4 | Description: Linux kernel module for memory allocations and secure buffer sharing
5 |
6 | - Name: NvSciIpc kernel driver
7 | Version: 1.0
8 | Description: Linux kernel module for secure buffer sharing
9 |
10 | nvsci_mm v1.0 and nvsciipc v1.0 are part NVIDIA DRIVE OS 6.0.8.0 release.
11 |
12 | Contacts:
13 | https://developer.nvidia.com/Contact
14 |
--------------------------------------------------------------------------------
/nvsci_mm/Makefile:
--------------------------------------------------------------------------------
1 | ##################################################################################################
2 | #
3 | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4 | # SPDX-License-Identifier: GPL-2.0-only
5 | #
6 | # This program is free software; you can redistribute it and/or modify it
7 | # under the terms and conditions of the GNU General Public License,
8 | # version 2, as published by the Free Software Foundation.
9 | #
10 | # This program is distributed in the hope it will be useful, but WITHOUT
11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 | # more details.
14 | #
15 | # You should have received a copy of the GNU General Public License
16 | # along with this program. If not, see .
17 | #
18 | ##################################################################################################
19 |
20 | EXTRA_CFLAGS=-I$(PWD)/inc
21 | EXTRA_CFLAGS+=-I$(PWD)/../nvsciipc/linux
22 | obj-m += nvsci_mm.o
23 |
--------------------------------------------------------------------------------
/nvsci_mm/README:
--------------------------------------------------------------------------------
1 | NvSci Memory Management Kernel Driver
2 | =====================================
3 |
4 | Description:
5 | Linux kernel module for memory allocations and secure buffer sharing
6 | Version:
7 | 1.0
8 |
9 | Build NvSci MM KMD for DRIVE OS x86
10 | ===================================
11 |
12 | 1) install kernel header package
13 | sudo apt-get install linux-headers-`uname -r`
14 |
15 | check version of desktop using "uname -r" command.
16 | kernel version must be 5.4.0-104+.
17 |
18 | 2) build
19 | cd nvsci_mm
20 | make -C /lib/modules/`uname -r`/build M=${PWD} modules
21 |
22 | 3) module parameters:
23 | a) max_pending_exports: the maximum number of simultaneous/active exports by single client. Default value: UINT_MAX
24 | b) [gid_start:gid_end]: the range of Linux Group ID which will have access to device node. Default value: [0:0] i.e. only accessible to root.
25 | c) enable_debug: enable debug logs. Set this to any value greater than 0 to enable the logs. Default value: 0
26 |
27 | 4) install NvSci MM KMD
28 | a) with default parameter: sudo insmod nvsci_mm.ko
29 | b) with custom parameters: sudo insmod nvsci_mm.ko max_pending_exports=4096 gid_start=1000 gid_end=2000 enable_debug=1
30 | NOTE: Make sure to remove stale "/dev/nvsci_mm" device nodes before installing the kernel module.
31 |
32 | 5) remove NvSci MM KMD
33 | sudo rmmod nvsci_mm
34 |
35 | 6) clean
36 | make -C /lib/modules/`uname -r`/build M=${PWD} clean
37 |
38 | NOTES
39 | =====
40 | nvsci_mm.c leverages implementation from system_heap.c.
41 | A copy of system_heap.c is maintained here for reference.
42 |
--------------------------------------------------------------------------------
/nvsci_mm/inc/uapi/linux/nvsci_mm.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | #ifndef _NVSCI_MM_H
19 | #define _NVSCI_MM_H
20 | #include
21 | #include
22 |
23 | #define NVSCI_MM_DEV_NODE "/dev/nvsci_mm"
24 |
25 | #define NVSCI_MM_MAJOR_VERSION (1U)
26 | #define NVSCI_MM_MINOR_VERSION (0U)
27 |
28 | struct nvsci_mm_check_compat_data {
29 | __u64 header;
30 | __u32 result;
31 | };
32 |
33 | struct nvsci_mm_allocation_data {
34 | __u64 header;
35 | __u64 len;
36 | __s32 fd;
37 | __u32 fd_flags;
38 | };
39 |
40 | struct nvsci_mm_get_sciipcid_data{
41 | __u64 header;
42 | __s32 fd;
43 | __u32 fd_flags;
44 | __u64 auth_token;
45 | __u64 sci_ipc_id;
46 | };
47 |
48 | #define NVSCI_MM_HEADER_VERSIONBITS 16
49 | #define NVSCI_MM_HEADER_SIZEBITS 32
50 | #define NVSCI_MM_HEADER_VERSIONMASK ((1U << NVSCI_MM_HEADER_VERSIONBITS) - 1)
51 |
52 | #define NVSCI_MM_SET_HEADER(data) \
53 | (((__u64)sizeof(data) << NVSCI_MM_HEADER_SIZEBITS) | \
54 | (NVSCI_MM_MAJOR_VERSION << NVSCI_MM_HEADER_VERSIONBITS) | (NVSCI_MM_MINOR_VERSION))
55 |
56 | #define GET_MINOR_FROM_HEADER(header) \
57 | ((__u32)((header) & NVSCI_MM_HEADER_VERSIONMASK))
58 |
59 | #define GET_MAJOR_FROM_HEADER(header) \
60 | ((__u32)(((header) >> NVSCI_MM_HEADER_VERSIONBITS) & NVSCI_MM_HEADER_VERSIONMASK))
61 |
62 | #define GET_SIZE_FROM_HEADER(header) \
63 | ((__u32)((header) >> NVSCI_MM_HEADER_SIZEBITS))
64 |
65 | #define NVSCI_MM_SET_DEFAULT_CHECK_COMPAT_DATA(data) \
66 | do { \
67 | (data).header = NVSCI_MM_SET_HEADER(data); \
68 | (data).result = (0); \
69 | } while (1 == 0)
70 |
71 | #define NVSCI_MM_SET_DEFAULT_ALLOCATION_DATA(data) \
72 | do { \
73 | (data).header = NVSCI_MM_SET_HEADER(data); \
74 | (data).len = (0); \
75 | (data).fd = (-1); \
76 | (data).fd_flags = (0); \
77 | } while (1 == 0)
78 |
79 | #define NVSCI_MM_SET_DEFAULT_SCIIPCID_DATA(data) \
80 | do { \
81 | (data).header = NVSCI_MM_SET_HEADER(data); \
82 | (data).fd = (-1); \
83 | (data).fd_flags = (0); \
84 | (data).auth_token = (0); \
85 | (data).sci_ipc_id = (0); \
86 | } while (1 == 0)
87 |
88 | #define NVSCI_MM_IOC_MAGIC 'H'
89 | #define NVSCI_MM_IOCTL_CHECK_COMPAT _IOWR(NVSCI_MM_IOC_MAGIC, 0x0, struct nvsci_mm_check_compat_data)
90 | #define NVSCI_MM_IOCTL_ALLOC _IOWR(NVSCI_MM_IOC_MAGIC, 0x1, struct nvsci_mm_allocation_data)
91 | #define NVSCI_MM_IOCTL_GET_SCIIPCID _IOWR(NVSCI_MM_IOC_MAGIC, 0x2, struct nvsci_mm_get_sciipcid_data)
92 | #define NVSCI_MM_IOCTL_FD_FROM_SCIIPCID _IOWR(NVSCI_MM_IOC_MAGIC, 0x3, struct nvsci_mm_get_sciipcid_data)
93 |
94 | #endif /* _NVSCI_MM_H */
95 |
--------------------------------------------------------------------------------
/nvsci_mm/nvsci_mm.c:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 |
31 | #include "uapi/linux/nvsci_mm.h"
32 | #include "nvsciipc_interface.h"
33 |
34 | atomic_t nvsci_mm_call_level = ATOMIC_INIT(0);
35 |
36 | void PrintLevel(void)
37 | {
38 | int i = 0;
39 | int level = atomic_read(&nvsci_mm_call_level);
40 | for (i=0; igid.val;
90 | int ret = 0;
91 |
92 | EN;
93 |
94 | PrintLog("Device open\n");
95 | /* Provide access only to root and user groups within [gid_start, gid_end] */
96 | if ((current_gid != 0) && ((current_gid < gid_start) || (current_gid > gid_end))) {
97 | ret = -EACCES;
98 | goto out;
99 | }
100 |
101 | nonseekable_open(inode, filp);
102 | out:
103 | EX;
104 | return ret;
105 | }
106 |
107 | static int dev_ops_release(struct inode *inodep, struct file *filp)
108 | {
109 | struct nvsci_mm_db_entry *e;
110 | struct rb_node *n;
111 | bool deletePresent = false;
112 | int ret = 0;
113 |
114 | EN;
115 | PrintLog("Device close\n");
116 | mutex_lock(&nvsci_mm_db_ptr->lock);
117 |
118 | PrintLog("Checking for export entries from this client\n");
119 | do {
120 | deletePresent = false;
121 | for (n = rb_first(&nvsci_mm_db_ptr->root); n; n = rb_next(n)) {
122 | e = rb_entry(n, struct nvsci_mm_db_entry, entry);
123 | if ((struct file *)e->client == filp) {
124 | deletePresent = true;
125 | break;
126 | }
127 | }
128 |
129 | if (deletePresent) {
130 | struct free_sid_node *free_node = kzalloc(sizeof(*free_node), GFP_KERNEL);
131 | if (free_node != NULL) {
132 | free_node->sid = e->sci_ipc_id;
133 | list_add_tail(&free_node->list, &nvsci_mm_db_ptr->free_sid_list);
134 | }
135 | PrintLog("Deleting entry %p %p %llu %llu %llu %u %u\n", e->client, e->handle,
136 | e->sci_ipc_id, e->local_vuid, e->peer_vuid, e->fd_flags, e->refcount);
137 | fput((struct file*)e->handle);
138 | rb_erase(&e->entry, &nvsci_mm_db_ptr->root);
139 | kfree(e);
140 | }
141 | } while(deletePresent);
142 | mutex_unlock(&nvsci_mm_db_ptr->lock);
143 |
144 | EX;
145 | return ret;
146 | }
147 |
148 | struct nvsci_mm_buffer {
149 | struct dma_buf *dmabuf;
150 | size_t size;
151 |
152 | void *priv_virt;
153 | struct mutex lock;
154 | int vmap_cnt;
155 | void *vaddr;
156 | pgoff_t pagecount;
157 | struct page **pages;
158 | struct list_head attachments;
159 |
160 | void (*free)(struct nvsci_mm_buffer *buffer);
161 | };
162 |
163 | static void buffer_free(struct nvsci_mm_buffer *buffer)
164 | {
165 | pgoff_t pg = 0;
166 | EN;
167 |
168 | for (pg = 0; pg < buffer->pagecount; pg++) {
169 | __free_page(buffer->pages[pg]);
170 | }
171 |
172 | kfree(buffer->pages);
173 | PrintLog("Buffer pointer %p\n", buffer);
174 | kfree(buffer);
175 | EX;
176 | }
177 |
178 | struct buffer_attachment {
179 | struct device *dev;
180 | struct sg_table table;
181 | struct list_head list;
182 | };
183 |
184 | static struct sg_table *buffer_ops_map_dma_buf(struct dma_buf_attachment *attachment,
185 | enum dma_data_direction direction)
186 | {
187 | int nents;
188 | struct buffer_attachment *a = attachment->priv;
189 | struct sg_table *table = &(a->table);
190 | struct sg_table *ret = NULL;
191 |
192 | EN;
193 |
194 | nents = dma_map_sg(attachment->dev, table->sgl, table->nents, direction);
195 | if (nents < 0) {
196 | ret = ERR_PTR(-EINVAL);
197 | goto out;
198 | }
199 |
200 | table->nents = nents;
201 | ret = table;
202 |
203 | out:
204 | EX;
205 | return ret;
206 | }
207 |
208 | static void buffer_ops_unmap_dma_buf(struct dma_buf_attachment *attachment,
209 | struct sg_table *table,
210 | enum dma_data_direction direction)
211 | {
212 | EN;
213 | dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
214 | EX;
215 | }
216 |
217 | void * dma_heap_dma_buf_map (struct dma_buf *dmabuf)
218 | {
219 | EN;
220 | EX;
221 | return NULL;
222 | }
223 |
224 | static vm_fault_t buffer_vm_ops_fault(struct vm_fault *vmf)
225 | {
226 | struct nvsci_mm_buffer *buffer = vmf->vma->vm_private_data;
227 | static vm_fault_t ret = 0;
228 | EN;
229 |
230 | if (vmf->pgoff > buffer->pagecount) {
231 | ret = VM_FAULT_SIGBUS;
232 | goto out;
233 | }
234 |
235 | vmf->page = buffer->pages[vmf->pgoff];
236 | get_page(vmf->page);
237 |
238 | out:
239 | EX;
240 | return ret;
241 | }
242 |
243 | static const struct vm_operations_struct buffer_vm_ops = {
244 | .fault = buffer_vm_ops_fault,
245 | };
246 |
247 | static int buffer_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
248 | {
249 | int ret = 0;
250 |
251 | EN;
252 |
253 | if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) {
254 | ret = -EINVAL;
255 | goto out;
256 | }
257 |
258 | vma->vm_ops = &buffer_vm_ops;
259 | vma->vm_private_data = dmabuf->priv;
260 |
261 | out:
262 | EX;
263 | return ret;
264 | }
265 |
266 | static void buffer_ops_release(struct dma_buf *dmabuf)
267 | {
268 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
269 | EN;
270 |
271 | if (buffer->vmap_cnt > 0) {
272 | WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
273 | vunmap(buffer->vaddr);
274 | }
275 |
276 | buffer->free(buffer);
277 | EX;
278 | }
279 |
280 | static int buffer_ops_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
281 | {
282 | struct buffer_attachment *a;
283 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
284 | int ret = 0;
285 | EN;
286 |
287 | a = kzalloc(sizeof(*a), GFP_KERNEL);
288 | if (!a) {
289 | ret = -ENOMEM;
290 | goto out;
291 | }
292 |
293 | ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
294 | buffer->pagecount, 0,
295 | buffer->pagecount << PAGE_SHIFT,
296 | GFP_KERNEL);
297 | if (ret) {
298 | kfree(a);
299 | goto out;
300 | }
301 |
302 | a->dev = attachment->dev;
303 | INIT_LIST_HEAD(&a->list);
304 |
305 | attachment->priv = a;
306 |
307 | mutex_lock(&buffer->lock);
308 | list_add(&a->list, &buffer->attachments);
309 | mutex_unlock(&buffer->lock);
310 |
311 | out:
312 | EX;
313 | return ret;
314 | }
315 |
316 | static void buffer_ops_detach(struct dma_buf *dmabuf,
317 | struct dma_buf_attachment *attachment)
318 | {
319 | struct buffer_attachment *a = attachment->priv;
320 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
321 | EN;
322 |
323 | mutex_lock(&buffer->lock);
324 | list_del(&a->list);
325 | mutex_unlock(&buffer->lock);
326 |
327 | sg_free_table(&a->table);
328 | kfree(a);
329 | EX;
330 | }
331 |
332 | static int buffer_ops_begin_cpu_access(struct dma_buf *dmabuf,
333 | enum dma_data_direction direction)
334 | {
335 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
336 | struct buffer_attachment *a = NULL;
337 | int ret = 0;
338 | EN;
339 |
340 | mutex_lock(&buffer->lock);
341 |
342 | if (buffer->vmap_cnt) {
343 | invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
344 | }
345 |
346 | list_for_each_entry(a, &buffer->attachments, list) {
347 | dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
348 | direction);
349 | }
350 | mutex_unlock(&buffer->lock);
351 |
352 | EX;
353 | return ret;
354 | }
355 |
356 | static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
357 | enum dma_data_direction direction)
358 | {
359 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
360 | struct buffer_attachment *a = NULL;
361 | EN;
362 |
363 | mutex_lock(&buffer->lock);
364 |
365 | if (buffer->vmap_cnt) {
366 | flush_kernel_vmap_range(buffer->vaddr, buffer->size);
367 | }
368 |
369 | list_for_each_entry(a, &buffer->attachments, list) {
370 | dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents, direction);
371 | }
372 |
373 | mutex_unlock(&buffer->lock);
374 |
375 | EX;
376 | return 0;
377 | }
378 |
379 | static void *buffer_ops_vmap(struct dma_buf *dmabuf)
380 | {
381 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
382 | void *vaddr;
383 | EN;
384 |
385 | mutex_lock(&buffer->lock);
386 | if (buffer->vmap_cnt) {
387 | buffer->vmap_cnt++;
388 | vaddr = buffer->vaddr;
389 | goto out;
390 | }
391 |
392 | vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
393 | if (!vaddr) {
394 | vaddr = ERR_PTR(-ENOMEM);
395 | goto out;
396 | }
397 |
398 | buffer->vaddr = vaddr;
399 | buffer->vmap_cnt++;
400 |
401 | out:
402 | mutex_unlock(&buffer->lock);
403 | EX;
404 | return vaddr;
405 | }
406 |
407 | static void buffer_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
408 | {
409 | struct nvsci_mm_buffer *buffer = dmabuf->priv;
410 | EN;
411 |
412 | mutex_lock(&buffer->lock);
413 |
414 | if (!--buffer->vmap_cnt) {
415 | vunmap(buffer->vaddr);
416 | buffer->vaddr = NULL;
417 | }
418 |
419 | mutex_unlock(&buffer->lock);
420 | EX;
421 | }
422 |
423 | static void *buffer_ops_map(struct dma_buf *buf, unsigned long page_num)
424 | {
425 | struct nvsci_mm_buffer *buffer = buf->priv;
426 | struct page *page = buffer->pages[page_num];
427 | EN;
428 | EX;
429 | return kmap(page);
430 | }
431 |
432 | static void buffer_ops_unmap(struct dma_buf *buf, unsigned long page_num,
433 | void *vaddr)
434 | {
435 | EN;
436 | kunmap(vaddr);
437 | EX;
438 | }
439 |
440 | const struct dma_buf_ops buffer_ops = {
441 | .map_dma_buf = buffer_ops_map_dma_buf,
442 | .unmap_dma_buf = buffer_ops_unmap_dma_buf,
443 | .mmap = buffer_ops_mmap,
444 | .release = buffer_ops_release,
445 | .attach = buffer_ops_attach,
446 | .detach = buffer_ops_detach,
447 | .begin_cpu_access = buffer_ops_begin_cpu_access,
448 | .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
449 | .vmap = buffer_ops_vmap,
450 | .vunmap = buffer_ops_vunmap,
451 | .map = buffer_ops_map,
452 | .unmap = buffer_ops_unmap,
453 | };
454 |
455 | static int buffer_allocate(unsigned long len,
456 | unsigned long fd_flags)
457 | {
458 | struct nvsci_mm_buffer *buffer_data;
459 | struct dma_buf *dmabuf;
460 | int ret = -ENOMEM;
461 | pgoff_t pg;
462 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
463 |
464 | EN;
465 |
466 | buffer_data = kzalloc(sizeof(*buffer_data), GFP_KERNEL);
467 | if (!buffer_data) {
468 | ret = -ENOMEM;
469 | goto out;
470 | }
471 |
472 | buffer_data->priv_virt = NULL;
473 | mutex_init(&buffer_data->lock);
474 | buffer_data->vmap_cnt = 0;
475 | buffer_data->vaddr = NULL;
476 | buffer_data->pagecount = 0;
477 | buffer_data->pages = NULL;
478 | INIT_LIST_HEAD(&buffer_data->attachments);
479 | buffer_data->free = buffer_free;
480 | buffer_data->size = len;
481 |
482 | buffer_data->pagecount = len / PAGE_SIZE;
483 | buffer_data->pages = kmalloc_array(buffer_data->pagecount, sizeof(*buffer_data->pages), GFP_KERNEL);
484 | if (!buffer_data->pages) {
485 | ret = -ENOMEM;
486 | goto err0;
487 | }
488 |
489 | for (pg = 0; pg < buffer_data->pagecount; pg++) {
490 | if (fatal_signal_pending(current)) {
491 | goto err1;
492 | }
493 |
494 | buffer_data->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
495 | if (!buffer_data->pages[pg])
496 | goto err1;
497 | }
498 |
499 | exp_info.owner = THIS_MODULE;
500 | exp_info.ops = &buffer_ops;
501 | exp_info.size = buffer_data->size;
502 | exp_info.flags = fd_flags;
503 | exp_info.priv = buffer_data;
504 |
505 | dmabuf = dma_buf_export(&exp_info);
506 | if (IS_ERR(dmabuf)) {
507 | ret = PTR_ERR(dmabuf);
508 | goto err1;
509 | }
510 |
511 | buffer_data->dmabuf = dmabuf;
512 |
513 | ret = dma_buf_fd(dmabuf, fd_flags);
514 | if (ret < 0) {
515 | dma_buf_put(dmabuf);
516 | goto out;
517 | }
518 |
519 | PrintLog("Buffer pointer %p\n", buffer_data);
520 | PrintLog("dma Buffer pointer %p\n", dmabuf);
521 | PrintLog("dma Buffer file pointer %p\n", dmabuf->file);
522 | PrintLog("dma Buffer file refCount %llu\n", dmabuf->file->f_count.counter);
523 | PrintLog("dma Buffer fd %d\n", ret);
524 |
525 | goto out;
526 |
527 | err1:
528 | while (pg > 0)
529 | __free_page(buffer_data->pages[--pg]);
530 | kfree(buffer_data->pages);
531 | err0:
532 | kfree(buffer_data);
533 | out:
534 | EX;
535 | return ret;
536 | }
537 |
538 | static long nvsci_mm_ioctl_check_compat(struct file *file, void *data)
539 | {
540 | struct nvsci_mm_check_compat_data *compat_data = data;
541 | uint32_t umajorVer = GET_MAJOR_FROM_HEADER(compat_data->header);
542 | uint32_t uminorVer = GET_MINOR_FROM_HEADER(compat_data->header);
543 | long ret = 0;
544 |
545 | EN;
546 |
547 | PrintLog("Userspace major %u\n", umajorVer);
548 | PrintLog("Userspace minor %u\n", uminorVer);
549 | PrintLog("Kernel major %u\n", NVSCI_MM_MAJOR_VERSION);
550 | PrintLog("Kernel minor %u\n", NVSCI_MM_MINOR_VERSION);
551 |
552 | compat_data->result = (NVSCI_MM_MAJOR_VERSION == umajorVer);
553 |
554 | PrintLog("Compatiblity result %u\n", compat_data->result);
555 |
556 | EX;
557 | return ret;
558 | }
559 |
560 | static long nvsci_mm_ioctl_allocate(struct file *file, void *data)
561 | {
562 | struct nvsci_mm_allocation_data *alloc_data = data;
563 | int fd;
564 | long ret = 0;
565 | size_t len = 0;
566 | EN;
567 |
568 | if ((NVSCI_MM_MAJOR_VERSION != GET_MAJOR_FROM_HEADER(alloc_data->header)) ||
569 | (alloc_data->fd >= 0) ||
570 | (alloc_data->len == 0) ||
571 | (alloc_data->fd_flags > O_RDWR)) {
572 | ret = -EINVAL;
573 | goto out;
574 | }
575 |
576 | len = PAGE_ALIGN(alloc_data->len);
577 |
578 | fd = buffer_allocate(len, alloc_data->fd_flags);
579 | if (fd < 0) {
580 | ret = fd;
581 | goto out;
582 | }
583 |
584 | alloc_data->fd = fd;
585 | PrintLog("Requested size %llu\n", alloc_data->len);
586 | PrintLog("Requested fd_flags %x\n", alloc_data->fd_flags);
587 | PrintLog("Allocated size %lu\n", len);
588 |
589 | out:
590 | EX;
591 | return ret;
592 | }
593 |
594 | static NvSciError nvsci_mm_get_validate_map_vuid(
595 | NvSciIpcEndpointAuthToken authToken,
596 | NvSciIpcEndpointVuid *lu_vuid,
597 | NvSciIpcEndpointVuid *pr_vuid)
598 | {
599 | NvSciIpcTopoId pr_topoid;
600 |
601 | NvSciError (*ValidateAuthToken)(
602 | NvSciIpcEndpointAuthToken authToken,
603 | NvSciIpcEndpointVuid *localUserVuid);
604 |
605 | NvSciError (*MapVuid)(
606 | NvSciIpcEndpointVuid localUserVuid,
607 | NvSciIpcTopoId *peerTopoId,
608 | NvSciIpcEndpointVuid *peerUserVuid);
609 | NvSciError sciErr = NvSciError_Success;
610 |
611 | EN;
612 |
613 | ValidateAuthToken = symbol_get(NvSciIpcEndpointValidateAuthTokenLinuxCurrent);
614 | MapVuid = symbol_get(NvSciIpcEndpointMapVuid);
615 |
616 | if (ValidateAuthToken && MapVuid) {
617 | sciErr = ValidateAuthToken(authToken, lu_vuid);
618 | if (sciErr == NvSciError_Success) {
619 | sciErr = MapVuid(*lu_vuid, &pr_topoid, pr_vuid);
620 | }
621 |
622 | symbol_put(NvSciIpcEndpointValidateAuthTokenLinuxCurrent);
623 | symbol_put(NvSciIpcEndpointMapVuid);
624 | } else {
625 | /* Fall back to non-secure sharing */
626 | memset(lu_vuid, 0x0, sizeof(NvSciIpcEndpointVuid));
627 | memset(pr_vuid, 0x0, sizeof(NvSciIpcEndpointVuid));
628 | }
629 |
630 | EX;
631 | return sciErr;
632 | }
633 |
634 | static long nvsci_mm_ioctl_get_sciipcid(struct file *file, void *data)
635 | {
636 | struct nvsci_mm_get_sciipcid_data *get_sciipcid_data = data;
637 | NvSciIpcEndpointVuid pr_vuid;
638 | NvSciIpcEndpointVuid lu_vuid;
639 | NvSciError sciErr = NvSciError_Success;
640 | struct nvsci_mm_db_entry *entry = NULL;
641 | struct nvsci_mm_db_entry *temp = NULL;
642 | struct rb_node *parent = NULL;
643 | struct rb_node *node = NULL;
644 | struct rb_node **link = NULL;
645 | struct fd f = {0};
646 | struct file *fd_file = NULL;
647 | long ret = 0;
648 | static atomic_t unq_id = { 0 };
649 | uint32_t current_pending_exports = 0U;
650 |
651 | EN;
652 |
653 | if (NVSCI_MM_MAJOR_VERSION != GET_MAJOR_FROM_HEADER(get_sciipcid_data->header) ||
654 | (get_sciipcid_data->fd_flags > O_RDWR)) {
655 | ret = -EINVAL;
656 | goto out;
657 | }
658 |
659 | f = fdget(get_sciipcid_data->fd);
660 | if (f.file == NULL) {
661 | ret = -EINVAL;
662 | goto out;
663 | }
664 | fd_file = f.file;
665 |
666 | sciErr = nvsci_mm_get_validate_map_vuid(get_sciipcid_data->auth_token, &lu_vuid, &pr_vuid);
667 | if (sciErr != NvSciError_Success) {
668 | ret = -EINVAL;
669 | goto put_file;
670 | }
671 |
672 | PrintLog("Peer vuid %llu\n", pr_vuid);
673 | PrintLog("Local vuid %llu\n", lu_vuid);
674 |
675 | mutex_lock(&nvsci_mm_db_ptr->lock);
676 |
677 | for (node = rb_first(&nvsci_mm_db_ptr->root); node; node = rb_next(node)) {
678 | entry = rb_entry(node, struct nvsci_mm_db_entry, entry);
679 | if (file == entry->client) {
680 | current_pending_exports += entry->refcount;
681 | }
682 | }
683 | if (max_pending_exports <= current_pending_exports) {
684 | ret = -EINVAL;
685 | goto unlock;
686 | }
687 |
688 | for (node = rb_first(&nvsci_mm_db_ptr->root); node; node = rb_next(node)) {
689 | entry = rb_entry(node, struct nvsci_mm_db_entry, entry);
690 | if ((entry != NULL) &&
691 | (file == entry->client) &&
692 | (fd_file == entry->handle) &&
693 | (lu_vuid == entry->local_vuid) &&
694 | (pr_vuid == entry->peer_vuid) &&
695 | (get_sciipcid_data->fd_flags == entry->fd_flags)) {
696 | break;
697 | }
698 | entry = NULL;
699 | }
700 |
701 | if (entry) {
702 | entry->refcount++;
703 | get_sciipcid_data->sci_ipc_id = entry->sci_ipc_id;
704 | PrintLog("Found exsisting entry %p %p %llu %llu %llu %u %u\n", entry->client,
705 | entry->handle, entry->sci_ipc_id, entry->local_vuid, entry->peer_vuid,
706 | entry->fd_flags, entry->refcount);
707 | goto unlock;
708 | } else {
709 | if (!list_empty(&nvsci_mm_db_ptr->free_sid_list)) {
710 | struct free_sid_node *fnode = list_first_entry(
711 | &nvsci_mm_db_ptr->free_sid_list, struct free_sid_node, list);
712 | get_sciipcid_data->sci_ipc_id = fnode->sid;
713 | list_del(&fnode->list);
714 | kfree(fnode);
715 | PrintLog("Reusing sid %llu\n", get_sciipcid_data->sci_ipc_id);
716 | } else {
717 | get_sciipcid_data->sci_ipc_id = atomic_add_return(2, &unq_id);
718 | PrintLog("Generated new sid %llu\n", get_sciipcid_data->sci_ipc_id);
719 | }
720 |
721 | entry = kzalloc(sizeof(*entry), GFP_KERNEL);
722 | if (entry == NULL) {
723 | ret = -ENOMEM;
724 | goto unlock;
725 | }
726 |
727 | get_file(fd_file);
728 |
729 | entry->client = file;
730 | entry->handle = fd_file;
731 | entry->local_vuid = lu_vuid;
732 | entry->peer_vuid = pr_vuid;
733 | entry->fd_flags = get_sciipcid_data->fd_flags;
734 | entry->sci_ipc_id = get_sciipcid_data->sci_ipc_id;
735 | entry->refcount = 1;
736 |
737 | link = &nvsci_mm_db_ptr->root.rb_node;
738 |
739 | while(*link) {
740 | parent = *link;
741 | temp = rb_entry(parent, struct nvsci_mm_db_entry, entry);
742 | link = (temp->sci_ipc_id > get_sciipcid_data->sci_ipc_id) ?
743 | (&parent->rb_left) : (&parent->rb_right);
744 | }
745 | rb_link_node(&entry->entry, parent, link);
746 | rb_insert_color(&entry->entry, &nvsci_mm_db_ptr->root);
747 | PrintLog("Added entry %p %p %llu %llu %llu %u %u\n", entry->client, entry->handle,
748 | entry->sci_ipc_id, entry->local_vuid, entry->peer_vuid, entry->fd_flags,
749 | entry->refcount);
750 | }
751 |
752 | PrintLog("dma Buffer file pointer %p\n", fd_file);
753 | PrintLog("dma Buffer file refCount %d\n", (int)fd_file->f_count.counter);
754 |
755 | unlock:
756 | mutex_unlock(&nvsci_mm_db_ptr->lock);
757 | put_file:
758 | fdput(f);
759 | out:
760 | EX;
761 | return ret;
762 | }
763 |
764 | static long nvsci_mm_ioctl_fd_from_sci_ipc_id(struct file *file, void *data)
765 | {
766 | struct nvsci_mm_get_sciipcid_data *get_sciipcid_data = data;
767 | NvSciIpcEndpointVuid pr_vuid;
768 | NvSciIpcEndpointVuid lu_vuid;
769 | NvSciError sciErr = NvSciError_Success;
770 | struct nvsci_mm_db_entry *entry = NULL;
771 | struct file* filp = NULL;
772 | struct rb_node *node = NULL;
773 | struct free_sid_node *free_node = NULL;
774 | int ret = 0;
775 |
776 | EN;
777 |
778 | if (NVSCI_MM_MAJOR_VERSION != GET_MAJOR_FROM_HEADER(get_sciipcid_data->header)) {
779 | ret = -EINVAL;
780 | goto out;
781 | }
782 |
783 | if (get_sciipcid_data->fd_flags > O_RDWR) {
784 | ret = -EINVAL;
785 | goto out;
786 |
787 | }
788 |
789 | sciErr = nvsci_mm_get_validate_map_vuid(get_sciipcid_data->auth_token, &lu_vuid, &pr_vuid);
790 | if (sciErr != NvSciError_Success) {
791 | ret = -EINVAL;
792 | goto out;
793 | }
794 |
795 | PrintLog("Peer vuid %llu\n", pr_vuid);
796 | PrintLog("Local vuid %llu\n", lu_vuid);
797 |
798 | mutex_lock(&nvsci_mm_db_ptr->lock);
799 |
800 | for (node = rb_first(&nvsci_mm_db_ptr->root); node; node = rb_next(node)) {
801 | entry = rb_entry(node, struct nvsci_mm_db_entry, entry);
802 | if ((entry != NULL) &&
803 | (entry->sci_ipc_id == get_sciipcid_data->sci_ipc_id)) {
804 | break;
805 | }
806 | entry = NULL;
807 | }
808 |
809 | if (entry == NULL || (lu_vuid != entry->peer_vuid) ||
810 | (pr_vuid != entry->local_vuid) ||
811 | (get_sciipcid_data->fd_flags != entry->fd_flags)) {
812 | ret = -EINVAL;
813 | PrintLog("No entry for %p %p %llu %llu %llu %u %u\n", NULL, NULL,
814 | get_sciipcid_data->sci_ipc_id, pr_vuid, lu_vuid, 0x00, 0x00);
815 | goto unlock;
816 | }
817 |
818 | filp = (struct file*)entry->handle;
819 | get_sciipcid_data->fd = get_unused_fd_flags(entry->fd_flags);
820 | if (get_sciipcid_data->fd < 0) {
821 | ret = -EFAULT;
822 | goto unlock;
823 | }
824 |
825 | PrintLog("Found entry %p %p %llu %llu %llu %u %u\n", entry->client, entry->handle,
826 | entry->sci_ipc_id, entry->local_vuid, entry->peer_vuid, entry->fd_flags,
827 | entry->refcount);
828 | fd_install(get_sciipcid_data->fd, filp);
829 | get_file(filp);
830 | entry->refcount--;
831 | if (entry->refcount == 0) {
832 | fput(filp);
833 | rb_erase(&entry->entry, &nvsci_mm_db_ptr->root);
834 | PrintLog("Deleted entry %p %p %llu %llu %llu %u %u\n", entry->client, entry->handle,
835 | entry->sci_ipc_id, entry->local_vuid, entry->peer_vuid, entry->fd_flags,
836 | entry->refcount);
837 | free_node = kzalloc(sizeof(*free_node), GFP_KERNEL);
838 | if (free_node == NULL) {
839 | ret = -ENOMEM;
840 | kfree(entry);
841 | goto unlock;
842 | }
843 |
844 | free_node->sid = entry->sci_ipc_id;
845 | list_add_tail(&free_node->list, &nvsci_mm_db_ptr->free_sid_list);
846 | PrintLog("Recycling sid %llu\n", get_sciipcid_data->sci_ipc_id);
847 | kfree(entry);
848 | }
849 |
850 | PrintLog("dma Buffer file pointer %p\n", filp);
851 | PrintLog("dma Buffer file refCount %d\n", (int)filp->f_count.counter);
852 |
853 | unlock:
854 | mutex_unlock(&nvsci_mm_db_ptr->lock);
855 | out:
856 | EX;
857 | return ret;
858 | }
859 |
860 | static long dev_ops_ioctl(struct file *filp, unsigned int ucmd, unsigned long arg)
861 | {
862 | long ret = 0;
863 | char stack_kdata[128];
864 | unsigned int kcmd;
865 | int nr = _IOC_NR(ucmd);
866 | char *kdata = stack_kdata;
867 | unsigned int in_size, out_size, drv_size, ksize;
868 |
869 | EN;
870 |
871 | if (nr >= ARRAY_SIZE(nvsci_mm_ioctl_cmds)) {
872 | ret = -EINVAL;
873 | goto out;
874 | }
875 |
876 | nr = array_index_nospec(nr, ARRAY_SIZE(nvsci_mm_ioctl_cmds));
877 | /* Get the kernel ioctl cmd that matches */
878 | kcmd = nvsci_mm_ioctl_cmds[nr];
879 |
880 | /* Figure out the delta between user cmd size and kernel cmd size */
881 | drv_size = _IOC_SIZE(kcmd);
882 | out_size = _IOC_SIZE(ucmd);
883 | in_size = out_size;
884 | if ((ucmd & kcmd & IOC_IN) == 0)
885 | in_size = 0;
886 | if ((ucmd & kcmd & IOC_OUT) == 0)
887 | out_size = 0;
888 | ksize = max(max(in_size, out_size), drv_size);
889 |
890 | /* If necessary, allocate buffer for ioctl argument */
891 | if (ksize > sizeof(stack_kdata)) {
892 | kdata = kmalloc(ksize, GFP_KERNEL);
893 | if (!kdata) {
894 | ret = -ENOMEM;
895 | goto out;
896 | }
897 | }
898 |
899 | if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
900 | ret = -EFAULT;
901 | goto err;
902 | }
903 |
904 | /* zero out any difference between the kernel/user structure size */
905 | if (ksize > in_size)
906 | memset(kdata + in_size, 0, ksize - in_size);
907 |
908 | switch (ucmd) {
909 | case NVSCI_MM_IOCTL_CHECK_COMPAT:
910 | ret = nvsci_mm_ioctl_check_compat(filp, kdata);
911 | break;
912 | case NVSCI_MM_IOCTL_ALLOC:
913 | ret = nvsci_mm_ioctl_allocate(filp, kdata);
914 | break;
915 | case NVSCI_MM_IOCTL_GET_SCIIPCID:
916 | ret = nvsci_mm_ioctl_get_sciipcid(filp, kdata);
917 | break;
918 | case NVSCI_MM_IOCTL_FD_FROM_SCIIPCID:
919 | ret = nvsci_mm_ioctl_fd_from_sci_ipc_id(filp, kdata);
920 | break;
921 | default:
922 | ret = -ENOTTY;
923 | goto err;
924 | }
925 |
926 | if (copy_to_user((void __user *)arg, kdata, out_size) != 0) {
927 | ret = -EFAULT;
928 | goto err;
929 | }
930 |
931 | err:
932 | if (kdata != stack_kdata)
933 | kfree(kdata);
934 | out:
935 | EX;
936 | return ret;
937 | }
938 |
939 | static ssize_t dev_ops_write(struct file *filp, const char __user *buf,
940 | size_t len, loff_t *ppos)
941 | {
942 | EN;
943 | PrintLog("Device write\n");
944 | EX;
945 | return len;
946 | }
947 |
948 | static ssize_t dev_ops_read(struct file *filp, char __user *buf,
949 | size_t count, loff_t *f_pos)
950 | {
951 | struct rb_node *node = NULL;
952 | struct nvsci_mm_db_entry *entry;
953 | struct free_sid_node *fnode, *temp;
954 | ssize_t ret = 0;
955 |
956 | EN;
957 |
958 | PrintLog("Device read\n");
959 | PrintLog("Module refCount %d\n", module_refcount(THIS_MODULE));
960 | PrintLog("Module filePointer %p\n", filp);
961 | PrintLog("Module file refCount %d\n", (int)filp->f_count.counter);
962 |
963 | mutex_lock(&nvsci_mm_db_ptr->lock);
964 | PrintLog("DB\n");
965 | for (node = rb_first(&nvsci_mm_db_ptr->root); node; node = rb_next(node)) {
966 | entry = rb_entry(node, struct nvsci_mm_db_entry, entry);
967 | PrintLog("%p %p %llu %llu %llu %u %u\n", entry->client, entry->handle,
968 | entry->sci_ipc_id, entry->local_vuid, entry->peer_vuid, entry->fd_flags, entry->refcount);
969 | }
970 |
971 | PrintLog("Free sids\n");
972 | list_for_each_entry_safe(fnode, temp, &nvsci_mm_db_ptr->free_sid_list, list) {
973 | PrintLog("%llu\n", fnode->sid);
974 | }
975 |
976 | mutex_unlock(&nvsci_mm_db_ptr->lock);
977 |
978 | EX;
979 | return ret;
980 | }
981 |
982 | static const struct file_operations nvsci_mm_dev_fops = {
983 | .owner = THIS_MODULE,
984 | .open = dev_ops_open,
985 | .release = dev_ops_release,
986 | .unlocked_ioctl = dev_ops_ioctl,
987 | .write = dev_ops_write,
988 | .read = dev_ops_read,
989 | .llseek = no_llseek,
990 | };
991 |
992 | struct {
993 | struct cdev cdev;
994 | dev_t dev;
995 | struct class *dev_class;
996 | struct device *device;
997 | } nvsci_mm_dev;
998 |
999 | static int nvsci_mm_db_init(void)
1000 | {
1001 | int ret = 0;
1002 | EN;
1003 |
1004 | nvsci_mm_db_ptr = kzalloc(sizeof(*nvsci_mm_db_ptr), GFP_KERNEL);
1005 | if (nvsci_mm_db_ptr == NULL) {
1006 | ret = -ENOMEM;
1007 | goto out;
1008 | }
1009 |
1010 | nvsci_mm_db_ptr->root = RB_ROOT;
1011 | INIT_LIST_HEAD(&nvsci_mm_db_ptr->free_sid_list);
1012 | mutex_init(&nvsci_mm_db_ptr->lock);
1013 |
1014 | out:
1015 | EX;
1016 | return ret;
1017 | }
1018 |
1019 | static void nvsci_mm_db_deinit(void)
1020 | {
1021 | struct nvsci_mm_db_entry *e;
1022 | struct free_sid_node *fnode, *temp;
1023 | struct rb_node *n;
1024 | EN;
1025 |
1026 | mutex_lock(&nvsci_mm_db_ptr->lock);
1027 | while ((n = rb_first(&nvsci_mm_db_ptr->root))) {
1028 | e = rb_entry(n, struct nvsci_mm_db_entry, entry);
1029 | rb_erase(&e->entry, &nvsci_mm_db_ptr->root);
1030 | kfree(e);
1031 | }
1032 |
1033 | list_for_each_entry_safe(fnode, temp, &nvsci_mm_db_ptr->free_sid_list, list) {
1034 | list_del(&fnode->list);
1035 | kfree(fnode);
1036 | }
1037 |
1038 | mutex_unlock(&nvsci_mm_db_ptr->lock);
1039 | kfree(nvsci_mm_db_ptr);
1040 | nvsci_mm_db_ptr = NULL;
1041 |
1042 | EX;
1043 | }
1044 |
1045 | static char *nvsci_mm_devnode(struct device *dev, umode_t *mode)
1046 | {
1047 | char *ret = NULL;
1048 |
1049 | if (!mode) {
1050 | goto out;
1051 | }
1052 |
1053 | *mode = 0444;
1054 |
1055 | out:
1056 | return ret;
1057 | }
1058 |
1059 | static int __init nvsci_mm_init(void)
1060 | {
1061 | int devno = 0;
1062 | int err = 0;
1063 |
1064 | EN;
1065 |
1066 | memset(&nvsci_mm_dev, 0x0, sizeof(nvsci_mm_dev));
1067 |
1068 | err = alloc_chrdev_region(&nvsci_mm_dev.dev, 0, 1, "nvsci_mm");
1069 | if (err < 0) {
1070 | pr_err("alloc_chrdev_region failed %d\n", err);
1071 | goto out;
1072 | }
1073 |
1074 | devno = MKDEV(MAJOR(nvsci_mm_dev.dev), MINOR(nvsci_mm_dev.dev));
1075 |
1076 | cdev_init(&nvsci_mm_dev.cdev, &nvsci_mm_dev_fops);
1077 |
1078 | nvsci_mm_dev.cdev.owner = THIS_MODULE;
1079 | nvsci_mm_dev.cdev.ops = &nvsci_mm_dev_fops;
1080 |
1081 | err = cdev_add(&nvsci_mm_dev.cdev, devno, 1);
1082 | if (err) {
1083 | pr_err("cdev_add failed %d\n", err);
1084 | goto free_chrdev;
1085 | }
1086 |
1087 | nvsci_mm_dev.dev_class = class_create(THIS_MODULE, "nvsci_mm");
1088 | if (IS_ERR_OR_NULL(nvsci_mm_dev.dev_class)) {
1089 | err = PTR_ERR(nvsci_mm_dev.dev_class);
1090 | pr_err("class_create failed %d\n", err);
1091 | goto free_cdev;
1092 | }
1093 |
1094 | nvsci_mm_dev.dev_class->devnode = nvsci_mm_devnode;
1095 |
1096 | nvsci_mm_dev.device = device_create(nvsci_mm_dev.dev_class,
1097 | NULL, nvsci_mm_dev.dev, NULL, "nvsci_mm");
1098 | if (IS_ERR(nvsci_mm_dev.device)) {
1099 | err = PTR_ERR(nvsci_mm_dev.device);
1100 | pr_err("device_create failed %d\n", err);
1101 | goto free_class;
1102 | }
1103 |
1104 | err = nvsci_mm_db_init();
1105 | if (err) {
1106 | pr_err("db init failed %d\n", err);
1107 | goto free_device;
1108 | }
1109 |
1110 | PrintLog("DB init passedi\n");
1111 |
1112 | goto out;
1113 |
1114 | free_device:
1115 | device_del(nvsci_mm_dev.device);
1116 | device_destroy(nvsci_mm_dev.dev_class, nvsci_mm_dev.dev);
1117 | nvsci_mm_dev.device = NULL;
1118 |
1119 | free_class:
1120 | class_destroy(nvsci_mm_dev.dev_class);
1121 | nvsci_mm_dev.dev_class = NULL;
1122 |
1123 | free_cdev:
1124 | cdev_del(&nvsci_mm_dev.cdev);
1125 |
1126 | free_chrdev:
1127 | unregister_chrdev_region(nvsci_mm_dev.dev, 1);
1128 |
1129 | out:
1130 | EX;
1131 | return err;
1132 | }
1133 |
1134 | static void __exit nvsci_mm_exit(void)
1135 | {
1136 | EN;
1137 |
1138 | nvsci_mm_db_deinit();
1139 | device_del(nvsci_mm_dev.device);
1140 | device_destroy(nvsci_mm_dev.dev_class, nvsci_mm_dev.dev);
1141 | class_destroy(nvsci_mm_dev.dev_class);
1142 | cdev_del(&nvsci_mm_dev.cdev);
1143 | unregister_chrdev_region(nvsci_mm_dev.dev, 1);
1144 | memset(&nvsci_mm_dev, 0x0, sizeof(nvsci_mm_dev));
1145 |
1146 | PrintLog("DB deinit passed\n");
1147 |
1148 | EX;
1149 | }
1150 |
1151 | module_init(nvsci_mm_init);
1152 | module_exit(nvsci_mm_exit);
1153 |
1154 | MODULE_DESCRIPTION("NVIDIA NvSci Memory Management Driver");
1155 | MODULE_AUTHOR("Nvidia Corporation");
1156 | MODULE_LICENSE("GPL v2");
1157 |
--------------------------------------------------------------------------------
/nvsci_mm/system_heap.c:
--------------------------------------------------------------------------------
1 | // SPDX-License-Identifier: GPL-2.0
2 | /*
3 | * DMABUF System heap exporter
4 | *
5 | * Copyright (C) 2011 Google, Inc.
6 | * Copyright (C) 2019, 2020 Linaro Ltd.
7 | *
8 | * Portions based off of Andrew Davis' SRAM heap:
9 | * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10 | * Andrew F. Davis
11 | */
12 |
13 | #include
14 | #include
15 | #include
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 |
24 | static struct dma_heap *sys_heap;
25 |
26 | struct system_heap_buffer {
27 | struct dma_heap *heap;
28 | struct list_head attachments;
29 | struct mutex lock;
30 | unsigned long len;
31 | struct sg_table sg_table;
32 | int vmap_cnt;
33 | void *vaddr;
34 | };
35 |
36 | struct dma_heap_attachment {
37 | struct device *dev;
38 | struct sg_table *table;
39 | struct list_head list;
40 | bool mapped;
41 | };
42 |
43 | #define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
44 | #define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
45 | #define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
46 | | __GFP_NORETRY) & ~__GFP_RECLAIM) \
47 | | __GFP_COMP)
48 | static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
49 | /*
50 | * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
51 | * to match with the sizes often found in IOMMUs. Using order 4 pages instead
52 | * of order 0 pages can significantly improve the performance of many IOMMUs
53 | * by reducing TLB pressure and time spent updating page tables.
54 | */
55 | static const unsigned int orders[] = {8, 4, 0};
56 | #define NUM_ORDERS ARRAY_SIZE(orders)
57 |
58 | static struct sg_table *dup_sg_table(struct sg_table *table)
59 | {
60 | struct sg_table *new_table;
61 | int ret, i;
62 | struct scatterlist *sg, *new_sg;
63 |
64 | new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
65 | if (!new_table)
66 | return ERR_PTR(-ENOMEM);
67 |
68 | ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
69 | if (ret) {
70 | kfree(new_table);
71 | return ERR_PTR(-ENOMEM);
72 | }
73 |
74 | new_sg = new_table->sgl;
75 | for_each_sgtable_sg(table, sg, i) {
76 | sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
77 | new_sg = sg_next(new_sg);
78 | }
79 |
80 | return new_table;
81 | }
82 |
83 | static int system_heap_attach(struct dma_buf *dmabuf,
84 | struct dma_buf_attachment *attachment)
85 | {
86 | struct system_heap_buffer *buffer = dmabuf->priv;
87 | struct dma_heap_attachment *a;
88 | struct sg_table *table;
89 |
90 | a = kzalloc(sizeof(*a), GFP_KERNEL);
91 | if (!a)
92 | return -ENOMEM;
93 |
94 | table = dup_sg_table(&buffer->sg_table);
95 | if (IS_ERR(table)) {
96 | kfree(a);
97 | return -ENOMEM;
98 | }
99 |
100 | a->table = table;
101 | a->dev = attachment->dev;
102 | INIT_LIST_HEAD(&a->list);
103 | a->mapped = false;
104 |
105 | attachment->priv = a;
106 |
107 | mutex_lock(&buffer->lock);
108 | list_add(&a->list, &buffer->attachments);
109 | mutex_unlock(&buffer->lock);
110 |
111 | return 0;
112 | }
113 |
114 | static void system_heap_detach(struct dma_buf *dmabuf,
115 | struct dma_buf_attachment *attachment)
116 | {
117 | struct system_heap_buffer *buffer = dmabuf->priv;
118 | struct dma_heap_attachment *a = attachment->priv;
119 |
120 | mutex_lock(&buffer->lock);
121 | list_del(&a->list);
122 | mutex_unlock(&buffer->lock);
123 |
124 | sg_free_table(a->table);
125 | kfree(a->table);
126 | kfree(a);
127 | }
128 |
129 | static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
130 | enum dma_data_direction direction)
131 | {
132 | struct dma_heap_attachment *a = attachment->priv;
133 | struct sg_table *table = a->table;
134 | int ret;
135 |
136 | ret = dma_map_sgtable(attachment->dev, table, direction, 0);
137 | if (ret)
138 | return ERR_PTR(ret);
139 |
140 | a->mapped = true;
141 | return table;
142 | }
143 |
144 | static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
145 | struct sg_table *table,
146 | enum dma_data_direction direction)
147 | {
148 | struct dma_heap_attachment *a = attachment->priv;
149 |
150 | a->mapped = false;
151 | dma_unmap_sgtable(attachment->dev, table, direction, 0);
152 | }
153 |
154 | static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
155 | enum dma_data_direction direction)
156 | {
157 | struct system_heap_buffer *buffer = dmabuf->priv;
158 | struct dma_heap_attachment *a;
159 |
160 | mutex_lock(&buffer->lock);
161 |
162 | if (buffer->vmap_cnt)
163 | invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
164 |
165 | list_for_each_entry(a, &buffer->attachments, list) {
166 | if (!a->mapped)
167 | continue;
168 | dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
169 | }
170 | mutex_unlock(&buffer->lock);
171 |
172 | return 0;
173 | }
174 |
175 | static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
176 | enum dma_data_direction direction)
177 | {
178 | struct system_heap_buffer *buffer = dmabuf->priv;
179 | struct dma_heap_attachment *a;
180 |
181 | mutex_lock(&buffer->lock);
182 |
183 | if (buffer->vmap_cnt)
184 | flush_kernel_vmap_range(buffer->vaddr, buffer->len);
185 |
186 | list_for_each_entry(a, &buffer->attachments, list) {
187 | if (!a->mapped)
188 | continue;
189 | dma_sync_sgtable_for_device(a->dev, a->table, direction);
190 | }
191 | mutex_unlock(&buffer->lock);
192 |
193 | return 0;
194 | }
195 |
196 | static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
197 | {
198 | struct system_heap_buffer *buffer = dmabuf->priv;
199 | struct sg_table *table = &buffer->sg_table;
200 | unsigned long addr = vma->vm_start;
201 | struct sg_page_iter piter;
202 | int ret;
203 |
204 | for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
205 | struct page *page = sg_page_iter_page(&piter);
206 |
207 | ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
208 | vma->vm_page_prot);
209 | if (ret)
210 | return ret;
211 | addr += PAGE_SIZE;
212 | if (addr >= vma->vm_end)
213 | return 0;
214 | }
215 | return 0;
216 | }
217 |
218 | static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
219 | {
220 | struct sg_table *table = &buffer->sg_table;
221 | int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
222 | struct page **pages = vmalloc(sizeof(struct page *) * npages);
223 | struct page **tmp = pages;
224 | struct sg_page_iter piter;
225 | void *vaddr;
226 |
227 | if (!pages)
228 | return ERR_PTR(-ENOMEM);
229 |
230 | for_each_sgtable_page(table, &piter, 0) {
231 | WARN_ON(tmp - pages >= npages);
232 | *tmp++ = sg_page_iter_page(&piter);
233 | }
234 |
235 | vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
236 | vfree(pages);
237 |
238 | if (!vaddr)
239 | return ERR_PTR(-ENOMEM);
240 |
241 | return vaddr;
242 | }
243 |
244 | static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
245 | {
246 | struct system_heap_buffer *buffer = dmabuf->priv;
247 | void *vaddr;
248 | int ret = 0;
249 |
250 | mutex_lock(&buffer->lock);
251 | if (buffer->vmap_cnt) {
252 | buffer->vmap_cnt++;
253 | iosys_map_set_vaddr(map, buffer->vaddr);
254 | goto out;
255 | }
256 |
257 | vaddr = system_heap_do_vmap(buffer);
258 | if (IS_ERR(vaddr)) {
259 | ret = PTR_ERR(vaddr);
260 | goto out;
261 | }
262 |
263 | buffer->vaddr = vaddr;
264 | buffer->vmap_cnt++;
265 | iosys_map_set_vaddr(map, buffer->vaddr);
266 | out:
267 | mutex_unlock(&buffer->lock);
268 |
269 | return ret;
270 | }
271 |
272 | static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
273 | {
274 | struct system_heap_buffer *buffer = dmabuf->priv;
275 |
276 | mutex_lock(&buffer->lock);
277 | if (!--buffer->vmap_cnt) {
278 | vunmap(buffer->vaddr);
279 | buffer->vaddr = NULL;
280 | }
281 | mutex_unlock(&buffer->lock);
282 | iosys_map_clear(map);
283 | }
284 |
285 | static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
286 | {
287 | struct system_heap_buffer *buffer = dmabuf->priv;
288 | struct sg_table *table;
289 | struct scatterlist *sg;
290 | int i;
291 |
292 | table = &buffer->sg_table;
293 | for_each_sgtable_sg(table, sg, i) {
294 | struct page *page = sg_page(sg);
295 |
296 | __free_pages(page, compound_order(page));
297 | }
298 | sg_free_table(table);
299 | kfree(buffer);
300 | }
301 |
302 | static const struct dma_buf_ops system_heap_buf_ops = {
303 | .attach = system_heap_attach,
304 | .detach = system_heap_detach,
305 | .map_dma_buf = system_heap_map_dma_buf,
306 | .unmap_dma_buf = system_heap_unmap_dma_buf,
307 | .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
308 | .end_cpu_access = system_heap_dma_buf_end_cpu_access,
309 | .mmap = system_heap_mmap,
310 | .vmap = system_heap_vmap,
311 | .vunmap = system_heap_vunmap,
312 | .release = system_heap_dma_buf_release,
313 | };
314 |
315 | static struct page *alloc_largest_available(unsigned long size,
316 | unsigned int max_order)
317 | {
318 | struct page *page;
319 | int i;
320 |
321 | for (i = 0; i < NUM_ORDERS; i++) {
322 | if (size < (PAGE_SIZE << orders[i]))
323 | continue;
324 | if (max_order < orders[i])
325 | continue;
326 |
327 | page = alloc_pages(order_flags[i], orders[i]);
328 | if (!page)
329 | continue;
330 | return page;
331 | }
332 | return NULL;
333 | }
334 |
335 | static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
336 | unsigned long len,
337 | unsigned long fd_flags,
338 | unsigned long heap_flags)
339 | {
340 | struct system_heap_buffer *buffer;
341 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
342 | unsigned long size_remaining = len;
343 | unsigned int max_order = orders[0];
344 | struct dma_buf *dmabuf;
345 | struct sg_table *table;
346 | struct scatterlist *sg;
347 | struct list_head pages;
348 | struct page *page, *tmp_page;
349 | int i, ret = -ENOMEM;
350 |
351 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
352 | if (!buffer)
353 | return ERR_PTR(-ENOMEM);
354 |
355 | INIT_LIST_HEAD(&buffer->attachments);
356 | mutex_init(&buffer->lock);
357 | buffer->heap = heap;
358 | buffer->len = len;
359 |
360 | INIT_LIST_HEAD(&pages);
361 | i = 0;
362 | while (size_remaining > 0) {
363 | /*
364 | * Avoid trying to allocate memory if the process
365 | * has been killed by SIGKILL
366 | */
367 | if (fatal_signal_pending(current)) {
368 | ret = -EINTR;
369 | goto free_buffer;
370 | }
371 |
372 | page = alloc_largest_available(size_remaining, max_order);
373 | if (!page)
374 | goto free_buffer;
375 |
376 | list_add_tail(&page->lru, &pages);
377 | size_remaining -= page_size(page);
378 | max_order = compound_order(page);
379 | i++;
380 | }
381 |
382 | table = &buffer->sg_table;
383 | if (sg_alloc_table(table, i, GFP_KERNEL))
384 | goto free_buffer;
385 |
386 | sg = table->sgl;
387 | list_for_each_entry_safe(page, tmp_page, &pages, lru) {
388 | sg_set_page(sg, page, page_size(page), 0);
389 | sg = sg_next(sg);
390 | list_del(&page->lru);
391 | }
392 |
393 | /* create the dmabuf */
394 | exp_info.exp_name = dma_heap_get_name(heap);
395 | exp_info.ops = &system_heap_buf_ops;
396 | exp_info.size = buffer->len;
397 | exp_info.flags = fd_flags;
398 | exp_info.priv = buffer;
399 | dmabuf = dma_buf_export(&exp_info);
400 | if (IS_ERR(dmabuf)) {
401 | ret = PTR_ERR(dmabuf);
402 | goto free_pages;
403 | }
404 | return dmabuf;
405 |
406 | free_pages:
407 | for_each_sgtable_sg(table, sg, i) {
408 | struct page *p = sg_page(sg);
409 |
410 | __free_pages(p, compound_order(p));
411 | }
412 | sg_free_table(table);
413 | free_buffer:
414 | list_for_each_entry_safe(page, tmp_page, &pages, lru)
415 | __free_pages(page, compound_order(page));
416 | kfree(buffer);
417 |
418 | return ERR_PTR(ret);
419 | }
420 |
421 | static const struct dma_heap_ops system_heap_ops = {
422 | .allocate = system_heap_allocate,
423 | };
424 |
425 | static int system_heap_create(void)
426 | {
427 | struct dma_heap_export_info exp_info;
428 |
429 | exp_info.name = "system";
430 | exp_info.ops = &system_heap_ops;
431 | exp_info.priv = NULL;
432 |
433 | sys_heap = dma_heap_add(&exp_info);
434 | if (IS_ERR(sys_heap))
435 | return PTR_ERR(sys_heap);
436 |
437 | return 0;
438 | }
439 | module_init(system_heap_create);
440 | MODULE_LICENSE("GPL v2");
441 |
--------------------------------------------------------------------------------
/nvsciipc/Makefile:
--------------------------------------------------------------------------------
1 | # SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved
2 | # SPDX-License-Identifier: GPL-2.0-only
3 | #
4 | # This program is free software; you can redistribute it and/or modify it
5 | # under the terms and conditions of the GNU General Public License,
6 | # version 2, as published by the Free Software Foundation.
7 | #
8 | # This program is distributed in the hope it will be useful, but WITHOUT
9 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 | # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 | # more details.
12 | #
13 | # You should have received a copy of the GNU General Public License
14 | # along with this program. If not, see .
15 | ccflags-y += -Werror
16 | ccflags-y += -I$(PWD)
17 | obj-m := nvsciipc.o
18 |
--------------------------------------------------------------------------------
/nvsciipc/README:
--------------------------------------------------------------------------------
1 | NvSciIpc kernel driver
2 | ======================
3 |
4 | Description:
5 | Linux kernel module for secure buffer sharing
6 | Version:
7 | 1.0
8 |
9 | Build NvSciIpc KMD for DRIVE OS x86
10 | ===================================
11 |
12 | 1) install kernel header package
13 | sudo apt-get install linux-headers-`uname -r`
14 |
15 | check version of desktop using "uname -r" command.
16 | kernel version must be 5.4.0-104+.
17 |
18 | 2) build
19 | cd nvsciipc
20 | make -C /lib/modules/`uname -r`/build M=${PWD} modules
21 |
22 | 3) install NvSciIpc KMD
23 | sudo make -C /lib/modules/`uname -r`/build M=${PWD} modules_install
24 | sudo depmod -a
25 |
26 | nvsciipc.ko will be installed in /lib/modules/`uname -r`/extra/nvsciipc.ko
27 |
28 | [NOTE] If kernel module installation is failed by missing signing key, follow steps below
29 |
30 | ** create x509.genkey
31 | echo -e "[ req ] \n\
32 | default_bits = 4096 \n\
33 | distinguished_name = req_distinguished_name \n\
34 | prompt = no \n\
35 | x509_extensions = myexts \n\
36 | \n\
37 | [ req_distinguished_name ] \n\
38 | CN = Modules \n\
39 | \n\
40 | [ myexts ] \n\
41 | basicConstraints=critical,CA:FALSE \n\
42 | keyUsage=digitalSignature \n\
43 | subjectKeyIdentifier=hash \n\
44 | authorityKeyIdentifier=keyid" > x509.genkey
45 |
46 | ** generate signing key
47 | openssl req -new -nodes -utf8 -sha512 -days 36500 -batch -x509 -config x509.genkey -outform DER -out signing_key.x509 -keyout signing_key.pem
48 |
49 | ** move signing key to kernel module folder of desktop
50 | sudo mv signing_key.* /lib/modules/`uname -r`/build/certs/
51 |
52 | 4) clean
53 | make -C /lib/modules/`uname -r`/build M=${PWD} clean
54 |
55 | 5) load NvSciIpc KMD
56 | You can load NvSciIpc KMD during desktop boot.
57 | sudo vi /etc/modules-load.d/modules.conf
58 | add "nvsciipc" to this file
59 |
60 | For manual loading KMD, do "sudo insmod nvsciipc.ko"
61 |
--------------------------------------------------------------------------------
/nvsciipc/linux/nvsciipc_interface.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | #ifndef __NVSCIIPC_INTERFACE_H__
19 | #define __NVSCIIPC_INTERFACE_H__
20 |
21 | /** Invalid VUID definition */
22 | #define NVSCIIPC_ENDPOINT_VUID_INVALID 0U
23 | /** Invalid authentication token definition */
24 | #define NVSCIIPC_ENDPOINT_AUTHTOKEN_INVALID 0U
25 | /** current self SOC ID */
26 | #define NVSCIIPC_SELF_SOCID 0xFFFFFFFFU
27 | /** current self VM ID */
28 | #define NVSCIIPC_SELF_VMID 0xFFFFFFFFU
29 |
30 | typedef enum NvSciErrorRec {
31 | /* Range 0x00000000 - 0x00FFFFFF : Common errors
32 | * This range is used for errors common to all NvSci libraries.
33 | */
34 |
35 | /** [EOK] No error */
36 | NvSciError_Success = 0x00000000,
37 |
38 | /** Unidentified error with no additional info */
39 | NvSciError_Unknown = 0x00000001,
40 |
41 | /* Generic errors */
42 | /** [ENOSYS] Feature is not implemented */
43 | NvSciError_NotImplemented = 0x00000010,
44 | /** [ENOTSUP] Feature is not supported */
45 | NvSciError_NotSupported = 0x00000011,
46 | /** [EACCES] Access to resource denied */
47 | NvSciError_AccessDenied = 0x00000020,
48 | /** [EPERM] No permission to perform operation */
49 | NvSciError_NotPermitted = 0x00000021,
50 | /** Resource is in wrong state to perform operation */
51 | NvSciError_InvalidState = 0x00000022,
52 | /** Requested operation is not legal */
53 | NvSciError_InvalidOperation = 0x00000023,
54 | /** Required resource is not initialized */
55 | NvSciError_NotInitialized = 0x00000024,
56 | /** [ENOMEM] Not enough memory */
57 | NvSciError_InsufficientMemory = 0x00000030,
58 | /** Not enough (non-memory) resources */
59 | NvSciError_InsufficientResource = 0x00000031,
60 | /** Resource failed */
61 | NvSciError_ResourceError = 0x00000032,
62 |
63 | /* Function parameter errors */
64 | /** [EINVAL] Invalid parameter value */
65 | NvSciError_BadParameter = 0x00000100,
66 | /** [EFAULT] Invalid address */
67 | NvSciError_BadAddress = 0x00000101,
68 | /** [E2BIG] Parameter list too long */
69 | NvSciError_TooBig = 0x00000102,
70 | /** [EOVERFLOW] Value too large for data type */
71 | NvSciError_Overflow = 0x00000103,
72 |
73 | /* Timing/temporary errors */
74 | /** [ETIMEDOUT] Operation timed out*/
75 | NvSciError_Timeout = 0x00000200,
76 | /** [EAGAIN] Resource unavailable. Try again. */
77 | NvSciError_TryItAgain = 0x00000201,
78 | /** [EBUSY] Resource is busy */
79 | NvSciError_Busy = 0x00000202,
80 | /** [EINTR] An interrupt ocurred */
81 | NvSciError_InterruptedCall = 0x00000203,
82 |
83 | /* Device errors */
84 | /** [ENODEV] No such device */
85 | NvSciError_NoSuchDevice = 0x00001000,
86 | /** [ENOSPC] No space left on device */
87 | NvSciError_NoSpace = 0x00001001,
88 | /** [ENXIO] No such device or address */
89 | NvSciError_NoSuchDevAddr = 0x00001002,
90 | /** [EIO] Input/output error */
91 | NvSciError_IO = 0x00001003,
92 | /** [ENOTTY] Inappropriate I/O control operation */
93 | NvSciError_InvalidIoctlNum = 0x00001004,
94 |
95 | /* File system errors */
96 | /** [ENOENT] No such file or directory*/
97 | NvSciError_NoSuchEntry = 0x00001100,
98 | /** [EBADF] Bad file descriptor */
99 | NvSciError_BadFileDesc = 0x00001101,
100 | /** [EBADFSYS] Corrupted file system detected */
101 | NvSciError_CorruptedFileSys = 0x00001102,
102 | /** [EEXIST] File already exists */
103 | NvSciError_FileExists = 0x00001103,
104 | /** [EISDIR] File is a directory */
105 | NvSciError_IsDirectory = 0x00001104,
106 | /** [EROFS] Read-only file system */
107 | NvSciError_ReadOnlyFileSys = 0x00001105,
108 | /** [ETXTBSY] Text file is busy */
109 | NvSciError_TextFileBusy = 0x00001106,
110 | /** [ENAMETOOLONG] File name is too long */
111 | NvSciError_FileNameTooLong = 0x00001107,
112 | /** [EFBIG] File is too large */
113 | NvSciError_FileTooBig = 0x00001108,
114 | /** [ELOOP] Too many levels of symbolic links */
115 | NvSciError_TooManySymbolLinks = 0x00001109,
116 | /** [EMFILE] Too many open files in process*/
117 | NvSciError_TooManyOpenFiles = 0x0000110A,
118 | /** [ENFILE] Too many open files in system */
119 | NvSciError_FileTableOverflow = 0x0000110B,
120 | /** End of file reached */
121 | NvSciError_EndOfFile = 0x0000110C,
122 |
123 |
124 | /* Communication errors */
125 | /** [ECONNRESET] Connection was closed or lost */
126 | NvSciError_ConnectionReset = 0x00001200,
127 | /** [EALREADY] Pending connection is already in progress */
128 | NvSciError_AlreadyInProgress = 0x00001201,
129 | /** [ENODATA] No message data available */
130 | NvSciError_NoData = 0x00001202,
131 | /** [ENOMSG] No message of the desired type available*/
132 | NvSciError_NoDesiredMessage = 0x00001203,
133 | /** [EMSGSIZE] Message is too large */
134 | NvSciError_MessageSize = 0x00001204,
135 | /** [ENOREMOTE] Remote node doesn't exist */
136 | NvSciError_NoRemote = 0x00001205,
137 |
138 | /* Process/thread errors */
139 | /** [ESRCH] No such process */
140 | NvSciError_NoSuchProcess = 0x00002000,
141 |
142 | /* Mutex errors */
143 | /** [ENOTRECOVERABLE] Mutex damaged by previous owner's death */
144 | NvSciError_MutexNotRecoverable = 0x00002100,
145 | /** [EOWNERDEAD] Previous owner died while holding mutex */
146 | NvSciError_LockOwnerDead = 0x00002101,
147 | /** [EDEADLK] Taking ownership would cause deadlock */
148 | NvSciError_ResourceDeadlock = 0x00002102,
149 |
150 | /** End of range for common error codes */
151 | NvSciError_CommonEnd = 0x00FFFFFF,
152 |
153 | /* Range 0x04000000 - 0x04FFFFFF : NvSciIpc errors */
154 | /** Unidentified NvSciIpc error with no additional info */
155 | NvSciError_NvSciIpcUnknown = 0x04000000,
156 | /** End of range for NvSciIpc errors */
157 | NvSciError_NvSciIpcEnd = 0x04FFFFFF,
158 | } NvSciError;
159 |
160 | /**
161 | * @brief Handle to the IPC endpoint.
162 | */
163 | typedef uint64_t NvSciIpcEndpoint;
164 |
165 |
166 | /**
167 | * @brief VUID(VM unique ID) of the IPC endpoint.
168 | */
169 | typedef uint64_t NvSciIpcEndpointVuid;
170 |
171 | /**
172 | * @brief authentication token of the IPC endpoint.
173 | */
174 | typedef uint64_t NvSciIpcEndpointAuthToken;
175 |
176 | /**
177 | * @brief Defines topology ID of the IPC endpoint.
178 | */
179 | typedef struct {
180 | /*! Holds SOC ID */
181 | uint32_t SocId;
182 | /*! Holds VMID */
183 | uint32_t VmId;
184 | } NvSciIpcTopoId;
185 |
186 | /**********************************************************************/
187 | /*********************** Function Definitions *************************/
188 | /**********************************************************************/
189 | NvSciError NvSciIpcEndpointGetAuthToken(NvSciIpcEndpoint handle,
190 | NvSciIpcEndpointAuthToken *authToken);
191 |
192 | NvSciError NvSciIpcEndpointValidateAuthTokenLinuxCurrent(
193 | NvSciIpcEndpointAuthToken authToken,
194 | NvSciIpcEndpointVuid *localUserVuid);
195 |
196 | NvSciError NvSciIpcEndpointMapVuid(NvSciIpcEndpointVuid localUserVuid,
197 | NvSciIpcTopoId *peerTopoId, NvSciIpcEndpointVuid *peerUserVuid);
198 |
199 | NvSciError NvSciIpcEndpointGetVuid(NvSciIpcEndpoint handle,
200 | NvSciIpcEndpointVuid *vuid);
201 |
202 | #endif /* __NVSCIIPC_INTERFACE_H__ */
203 |
--------------------------------------------------------------------------------
/nvsciipc/nvsciipc.c:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | /*
19 | * This is NvSciIpc kernel driver. At present its only use is to support
20 | * secure buffer sharing use case across processes.
21 | */
22 |
23 | #include
24 | #include
25 | #include
26 | #include
27 | #include
28 | #include
29 | #include
30 | #include
31 | #include
32 | #include
33 | #include
34 | #include
35 | #include
36 | #include
37 | #include
38 |
39 | #include "nvsciipc.h"
40 |
41 | /* enable it to debug auth API via ioctl */
42 | #define DEBUG_AUTH_API 1
43 | #define DEBUG_VALIDATE_TOKEN 0
44 |
45 | DEFINE_MUTEX(nvsciipc_mutex);
46 |
47 | static struct platform_device *nvsciipc_pdev;
48 | static struct nvsciipc *ctx;
49 |
50 | NvSciError NvSciIpcEndpointGetAuthToken(NvSciIpcEndpoint handle,
51 | NvSciIpcEndpointAuthToken *authToken)
52 | {
53 | INFO("Not supported in KMD, but in userspace library\n");
54 |
55 | return NvSciError_NotSupported;
56 | }
57 | EXPORT_SYMBOL(NvSciIpcEndpointGetAuthToken);
58 |
59 | NvSciError NvSciIpcEndpointGetVuid(NvSciIpcEndpoint handle,
60 | NvSciIpcEndpointVuid *vuid)
61 | {
62 | INFO("Not supported in KMD, but in userspace library\n");
63 |
64 | return NvSciError_NotSupported;
65 | }
66 | EXPORT_SYMBOL(NvSciIpcEndpointGetVuid);
67 |
68 | NvSciError NvSciIpcEndpointValidateAuthTokenLinuxCurrent(
69 | NvSciIpcEndpointAuthToken authToken,
70 | NvSciIpcEndpointVuid *localUserVuid)
71 | {
72 | struct fd f;
73 | struct file *filp;
74 | int i, ret, devlen;
75 | char node[NVSCIIPC_MAX_EP_NAME+16];
76 |
77 | if ((ctx == NULL) || (ctx->set_db_f != true)) {
78 | ERR("not initialized\n");
79 | return NvSciError_NotInitialized;
80 | }
81 |
82 | f = fdget((int)authToken);
83 | if (!f.file) {
84 | ERR("invalid auth token\n");
85 | return NvSciError_BadParameter;
86 | }
87 | filp = f.file;
88 |
89 | devlen = strlen(filp->f_path.dentry->d_name.name);
90 | #if DEBUG_VALIDATE_TOKEN
91 | INFO("token: %lld, dev name: %s, devlen: %d\n", authToken,
92 | filp->f_path.dentry->d_name.name, devlen);
93 | #endif
94 |
95 | for (i = 0; i < ctx->num_eps; i++) {
96 | ret = snprintf(node, sizeof(node), "%s%d",
97 | ctx->db[i]->dev_name, ctx->db[i]->id);
98 |
99 | if ((ret < 0) || (ret != devlen))
100 | continue;
101 |
102 | #if DEBUG_VALIDATE_TOKEN
103 | INFO("node:%s, vuid:0x%llx\n", node, ctx->db[i]->vuid);
104 | #endif
105 | /* compare node name itself only (w/o directory) */
106 | if (!strncmp(filp->f_path.dentry->d_name.name, node, ret)) {
107 | *localUserVuid = ctx->db[i]->vuid;
108 | break;
109 | }
110 | }
111 |
112 | if (i == ctx->num_eps) {
113 | fdput(f);
114 | ERR("wrong auth token passed\n");
115 | return NvSciError_BadParameter;
116 | }
117 |
118 | fdput(f);
119 |
120 | return NvSciError_Success;
121 | }
122 | EXPORT_SYMBOL(NvSciIpcEndpointValidateAuthTokenLinuxCurrent);
123 |
124 | NvSciError NvSciIpcEndpointMapVuid(NvSciIpcEndpointVuid localUserVuid,
125 | NvSciIpcTopoId *peerTopoId, NvSciIpcEndpointVuid *peerUserVuid)
126 | {
127 | uint32_t backend = NVSCIIPC_BACKEND_UNKNOWN;
128 | struct nvsciipc_config_entry *entry;
129 | int i;
130 | NvSciError ret;
131 |
132 | if ((peerTopoId == NULL) || (peerUserVuid == NULL)) {
133 | ERR("Invalid parameter\n");
134 | return NvSciError_BadParameter;
135 | }
136 |
137 | if ((ctx == NULL) || (ctx->set_db_f != true)) {
138 | ERR("not initialized\n");
139 | return NvSciError_NotInitialized;
140 | }
141 |
142 | for (i = 0; i < ctx->num_eps; i++) {
143 | if (ctx->db[i]->vuid == localUserVuid) {
144 | backend = ctx->db[i]->backend;
145 | entry = ctx->db[i];
146 | break;
147 | }
148 | }
149 |
150 | if (i == ctx->num_eps) {
151 | ERR("wrong localUserVuid passed\n");
152 | return NvSciError_BadParameter;
153 | }
154 |
155 | switch (backend) {
156 | case NVSCIIPC_BACKEND_ITC:
157 | case NVSCIIPC_BACKEND_IPC:
158 | peerTopoId->SocId = NVSCIIPC_SELF_SOCID;
159 | peerTopoId->VmId = NVSCIIPC_SELF_VMID;
160 | *peerUserVuid = (localUserVuid ^ 1UL);
161 | ret = NvSciError_Success;
162 | break;
163 | default:
164 | ret = NvSciError_NotSupported;
165 | break;
166 | }
167 |
168 | return ret;
169 | }
170 | EXPORT_SYMBOL(NvSciIpcEndpointMapVuid);
171 |
172 | static int nvsciipc_dev_open(struct inode *inode, struct file *filp)
173 | {
174 | struct nvsciipc *ctx = container_of(inode->i_cdev,
175 | struct nvsciipc, cdev);
176 |
177 | filp->private_data = ctx;
178 |
179 | return 0;
180 | }
181 |
182 | static void nvsciipc_free_db(struct nvsciipc *ctx)
183 | {
184 | int i;
185 |
186 | if ((ctx->num_eps != 0) && (ctx->set_db_f == true)) {
187 | for (i = 0; i < ctx->num_eps; i++)
188 | kfree(ctx->db[i]);
189 |
190 | kfree(ctx->db);
191 | }
192 |
193 | ctx->num_eps = 0;
194 | }
195 |
196 | static int nvsciipc_dev_release(struct inode *inode, struct file *filp)
197 | {
198 | filp->private_data = NULL;
199 |
200 | return 0;
201 | }
202 |
203 | #if DEBUG_AUTH_API
204 | static int nvsciipc_ioctl_validate_auth_token(struct nvsciipc *ctx,
205 | unsigned int cmd, unsigned long arg)
206 | {
207 | struct nvsciipc_validate_auth_token op;
208 | NvSciError err;
209 | int32_t ret = 0;
210 |
211 | if ((ctx->num_eps == 0) || (ctx->set_db_f != true)) {
212 | ERR("need to set endpoint database first\n");
213 | ret = -EPERM;
214 | goto exit;
215 | }
216 |
217 | if (copy_from_user(&op, (void __user *)arg, _IOC_SIZE(cmd))) {
218 | ERR("%s : copy_from_user failed\n", __func__);
219 | ret = -EFAULT;
220 | goto exit;
221 | }
222 |
223 | err = NvSciIpcEndpointValidateAuthTokenLinuxCurrent(op.auth_token,
224 | &op.local_vuid);
225 | if (err != NvSciError_Success) {
226 | ERR("%s : 0x%x\n", __func__, err);
227 | ret = -EINVAL;
228 | goto exit;
229 | }
230 |
231 | if (copy_to_user((void __user *)arg, &op, _IOC_SIZE(cmd))) {
232 | ERR("%s : copy_to_user failed\n", __func__);
233 | ret = -EFAULT;
234 | goto exit;
235 | }
236 |
237 | exit:
238 | return ret;
239 | }
240 |
241 | static int nvsciipc_ioctl_map_vuid(struct nvsciipc *ctx, unsigned int cmd,
242 | unsigned long arg)
243 | {
244 | struct nvsciipc_map_vuid op;
245 | NvSciError err;
246 | int32_t ret = 0;
247 |
248 | if ((ctx->num_eps == 0) || (ctx->set_db_f != true)) {
249 | ERR("need to set endpoint database first\n");
250 | ret = -EPERM;
251 | goto exit;
252 | }
253 |
254 | if (copy_from_user(&op, (void __user *)arg, _IOC_SIZE(cmd))) {
255 | ERR("%s : copy_from_user failed\n", __func__);
256 | ret = -EFAULT;
257 | goto exit;
258 | }
259 |
260 | err = NvSciIpcEndpointMapVuid(op.vuid, (NvSciIpcTopoId *)&op.peer_topoid,
261 | &op.peer_vuid);
262 | if (err != NvSciError_Success) {
263 | ERR("%s : 0x%x\n", __func__, err);
264 | ret = -EINVAL;
265 | goto exit;
266 | }
267 |
268 | if (copy_to_user((void __user *)arg, &op, _IOC_SIZE(cmd))) {
269 | ERR("%s : copy_to_user failed\n", __func__);
270 | ret = -EFAULT;
271 | goto exit;
272 | }
273 |
274 | exit:
275 | return ret;
276 | }
277 | #endif /* DEBUG_AUTH_API */
278 |
279 | static int nvsciipc_ioctl_get_db_by_name(struct nvsciipc *ctx, unsigned int cmd,
280 | unsigned long arg)
281 | {
282 | struct nvsciipc_get_db_by_name get_db;
283 | int i;
284 |
285 | if ((ctx->num_eps == 0) || (ctx->set_db_f != true)) {
286 | ERR("need to set endpoint database first\n");
287 | return -EPERM;
288 | }
289 |
290 | if (copy_from_user(&get_db, (void __user *)arg, _IOC_SIZE(cmd))) {
291 | ERR("%s : copy_from_user failed\n", __func__);
292 | return -EFAULT;
293 | }
294 |
295 | /* read operation */
296 | for (i = 0; i < ctx->num_eps; i++) {
297 | if (!strncmp(get_db.ep_name, ctx->db[i]->ep_name,
298 | NVSCIIPC_MAX_EP_NAME)) {
299 | get_db.entry = *ctx->db[i];
300 | get_db.idx = i;
301 | break;
302 | }
303 | }
304 |
305 | if (i == ctx->num_eps) {
306 | return -ENOENT;
307 | } else if (copy_to_user((void __user *)arg, &get_db,
308 | _IOC_SIZE(cmd))) {
309 | ERR("%s : copy_to_user failed\n", __func__);
310 | return -EFAULT;
311 | }
312 |
313 | return 0;
314 | }
315 |
316 | static int nvsciipc_ioctl_get_db_by_vuid(struct nvsciipc *ctx, unsigned int cmd,
317 | unsigned long arg)
318 | {
319 | struct nvsciipc_get_db_by_vuid get_db;
320 | int i;
321 |
322 | if ((ctx->num_eps == 0) || (ctx->set_db_f != true)) {
323 | ERR("need to set endpoint database first\n");
324 | return -EPERM;
325 | }
326 |
327 | if (copy_from_user(&get_db, (void __user *)arg, _IOC_SIZE(cmd))) {
328 | ERR("%s : copy_from_user failed\n", __func__);
329 | return -EFAULT;
330 | }
331 |
332 | /* read operation */
333 | for (i = 0; i < ctx->num_eps; i++) {
334 | if (get_db.vuid == ctx->db[i]->vuid) {
335 | get_db.entry = *ctx->db[i];
336 | get_db.idx = i;
337 | break;
338 | }
339 | }
340 |
341 | if (i == ctx->num_eps) {
342 | return -ENOENT;
343 | } else if (copy_to_user((void __user *)arg, &get_db,
344 | _IOC_SIZE(cmd))) {
345 | ERR("%s : copy_to_user failed\n", __func__);
346 | return -EFAULT;
347 | }
348 |
349 | return 0;
350 | }
351 |
352 | static int nvsciipc_ioctl_get_vuid(struct nvsciipc *ctx, unsigned int cmd,
353 | unsigned long arg)
354 | {
355 | struct nvsciipc_get_vuid get_vuid;
356 | int i;
357 |
358 | if ((ctx->num_eps == 0) || (ctx->set_db_f != true)) {
359 | ERR("need to set endpoint database first\n");
360 | return -EPERM;
361 | }
362 |
363 | if (copy_from_user(&get_vuid, (void __user *)arg, _IOC_SIZE(cmd))) {
364 | ERR("%s : copy_from_user failed\n", __func__);
365 | return -EFAULT;
366 | }
367 |
368 | /* read operation */
369 | for (i = 0; i < ctx->num_eps; i++) {
370 | if (!strncmp(get_vuid.ep_name, ctx->db[i]->ep_name,
371 | NVSCIIPC_MAX_EP_NAME)) {
372 | get_vuid.vuid = ctx->db[i]->vuid;
373 | break;
374 | }
375 | }
376 |
377 | if (i == ctx->num_eps) {
378 | return -ENOENT;
379 | } else if (copy_to_user((void __user *)arg, &get_vuid,
380 | _IOC_SIZE(cmd))) {
381 | ERR("%s : copy_to_user failed\n", __func__);
382 | return -EFAULT;
383 | }
384 |
385 | return 0;
386 | }
387 |
388 | static int nvsciipc_ioctl_set_db(struct nvsciipc *ctx, unsigned int cmd,
389 | unsigned long arg)
390 | {
391 | struct nvsciipc_db user_db;
392 | struct nvsciipc_config_entry **entry_ptr;
393 | int ret = 0;
394 | int i;
395 |
396 | /* check root user */
397 | if (current_cred()->uid.val != 0) {
398 | ERR("no permission to set db\n");
399 | return -EPERM;
400 | }
401 |
402 | if ((ctx->num_eps != 0) || (ctx->set_db_f == true)) {
403 | ERR("nvsciipc db is set already\n");
404 | return -EPERM;
405 | }
406 |
407 | if (copy_from_user(&user_db, (void __user *)arg, _IOC_SIZE(cmd))) {
408 | ERR("copying user db failed\n");
409 | return -EFAULT;
410 | }
411 |
412 | if (user_db.num_eps <= 0) {
413 | ERR("invalid value passed for num_eps\n");
414 | return -EINVAL;
415 | }
416 |
417 | ctx->num_eps = user_db.num_eps;
418 |
419 | entry_ptr = (struct nvsciipc_config_entry **)
420 | kzalloc(ctx->num_eps * sizeof(struct nvsciipc_config_entry *),
421 | GFP_KERNEL);
422 |
423 | if (entry_ptr == NULL) {
424 | ERR("memory allocation for entry_ptr failed\n");
425 | ret = -EFAULT;
426 | goto ptr_error;
427 | }
428 |
429 | ret = copy_from_user(entry_ptr, (void __user *)user_db.entry,
430 | ctx->num_eps * sizeof(struct nvsciipc_config_entry *));
431 | if (ret < 0) {
432 | ERR("copying entry ptr failed\n");
433 | ret = -EFAULT;
434 | goto ptr_error;
435 | }
436 |
437 | ctx->db = (struct nvsciipc_config_entry **)
438 | kzalloc(ctx->num_eps * sizeof(struct nvsciipc_config_entry *),
439 | GFP_KERNEL);
440 |
441 | if (ctx->db == NULL) {
442 | ERR("memory allocation for ctx->db failed\n");
443 | ret = -EFAULT;
444 | goto ptr_error;
445 | }
446 |
447 | for (i = 0; i < ctx->num_eps; i++) {
448 | ctx->db[i] = (struct nvsciipc_config_entry *)
449 | kzalloc(sizeof(struct nvsciipc_config_entry),
450 | GFP_KERNEL);
451 |
452 | if (ctx->db[i] == NULL) {
453 | ERR("memory allocation for ctx->db[%d] failed\n", i);
454 | ret = -EFAULT;
455 | goto ptr_error;
456 | }
457 |
458 | ret = copy_from_user(ctx->db[i], (void __user *)entry_ptr[i],
459 | sizeof(struct nvsciipc_config_entry));
460 | if (ret < 0) {
461 | ERR("copying config entry failed\n");
462 | ret = -EFAULT;
463 | goto ptr_error;
464 | }
465 | }
466 |
467 | kfree(entry_ptr);
468 |
469 | ctx->set_db_f = true;
470 |
471 | return ret;
472 |
473 | ptr_error:
474 | if (ctx->db != NULL) {
475 | for (i = 0; i < ctx->num_eps; i++) {
476 | if (ctx->db[i] != NULL) {
477 | memset(ctx->db[i], 0, sizeof(struct nvsciipc_config_entry));
478 | kfree(ctx->db[i]);
479 | }
480 | }
481 |
482 | kfree(ctx->db);
483 | ctx->db = NULL;
484 | }
485 |
486 | if (entry_ptr != NULL)
487 | kfree(entry_ptr);
488 |
489 | ctx->num_eps = 0;
490 |
491 | return ret;
492 | }
493 |
494 | static int nvsciipc_ioctl_get_dbsize(struct nvsciipc *ctx, unsigned int cmd,
495 | unsigned long arg)
496 | {
497 | int32_t ret = 0;
498 |
499 | if (ctx->set_db_f != true) {
500 | ERR("need to set endpoint database first\n");
501 | ret = -EPERM;
502 | goto exit;
503 | }
504 |
505 | if (copy_to_user((void __user *)arg, (void *)&ctx->num_eps,
506 | _IOC_SIZE(cmd))) {
507 | ERR("%s : copy_to_user failed\n", __func__);
508 | ret = -EFAULT;
509 | goto exit;
510 | }
511 |
512 | DBG("%s : entry count: %d\n", __func__, ctx->num_eps);
513 |
514 | exit:
515 | return ret;
516 | }
517 |
518 | static long nvsciipc_dev_ioctl(struct file *filp, unsigned int cmd,
519 | unsigned long arg)
520 | {
521 | struct nvsciipc *ctx = filp->private_data;
522 | long ret = 0;
523 |
524 | if (_IOC_TYPE(cmd) != NVSCIIPC_IOCTL_MAGIC) {
525 | ERR("%s: not a nvsciipc ioctl\n", __func__);
526 | ret = -ENOTTY;
527 | goto exit;
528 | }
529 |
530 | if (_IOC_NR(cmd) > NVSCIIPC_IOCTL_NUMBER_MAX) {
531 | ERR("%s: wrong nvsciipc ioctl cmd: 0x%x\n", __func__, cmd);
532 | ret = -ENOTTY;
533 | goto exit;
534 | }
535 |
536 | switch (cmd) {
537 | case NVSCIIPC_IOCTL_SET_DB:
538 | mutex_lock(&nvsciipc_mutex);
539 | ret = nvsciipc_ioctl_set_db(ctx, cmd, arg);
540 | mutex_unlock(&nvsciipc_mutex);
541 | break;
542 | case NVSCIIPC_IOCTL_GET_VUID:
543 | ret = nvsciipc_ioctl_get_vuid(ctx, cmd, arg);
544 | break;
545 | case NVSCIIPC_IOCTL_GET_DB_BY_NAME:
546 | ret = nvsciipc_ioctl_get_db_by_name(ctx, cmd, arg);
547 | break;
548 | case NVSCIIPC_IOCTL_GET_DB_BY_VUID:
549 | ret = nvsciipc_ioctl_get_db_by_vuid(ctx, cmd, arg);
550 | break;
551 | case NVSCIIPC_IOCTL_GET_DB_SIZE:
552 | ret = nvsciipc_ioctl_get_dbsize(ctx, cmd, arg);
553 | break;
554 | #if DEBUG_AUTH_API
555 | case NVSCIIPC_IOCTL_VALIDATE_AUTH_TOKEN:
556 | ret = nvsciipc_ioctl_validate_auth_token(ctx, cmd, arg);
557 | break;
558 | case NVSCIIPC_IOCTL_MAP_VUID:
559 | ret = nvsciipc_ioctl_map_vuid(ctx, cmd, arg);
560 | break;
561 | #endif /* DEBUG_AUTH_API */
562 | case NVSCIIPC_IOCTL_GET_VMID:
563 | ret = -EFAULT;
564 | break;
565 | default:
566 | ERR("unrecognised ioctl cmd: 0x%x\n", cmd);
567 | ret = -ENOTTY;
568 | break;
569 | }
570 |
571 | exit:
572 | return ret;
573 | }
574 |
575 | static ssize_t nvsciipc_dbg_read(struct file *filp, char __user *buf,
576 | size_t count, loff_t *f_pos)
577 | {
578 | struct nvsciipc *ctx = filp->private_data;
579 | int i;
580 |
581 | /* check root user */
582 | if (current_cred()->uid.val != 0) {
583 | ERR("no permission to read db\n");
584 | return -EPERM;
585 | }
586 |
587 | if (ctx->set_db_f != true) {
588 | ERR("need to set endpoint database first\n");
589 | return -EPERM;
590 | }
591 |
592 | for (i = 0; i < ctx->num_eps; i++) {
593 | INFO("EP[%03d]: ep_name: %s, dev_name: %s, backend: %u, nframes: %u, "
594 | "frame_size: %u, id: %u\n", i,
595 | ctx->db[i]->ep_name,
596 | ctx->db[i]->dev_name,
597 | ctx->db[i]->backend,
598 | ctx->db[i]->nframes,
599 | ctx->db[i]->frame_size,
600 | ctx->db[i]->id);
601 | }
602 |
603 | return 0;
604 | }
605 |
606 | static const struct file_operations nvsciipc_fops = {
607 | .owner = THIS_MODULE,
608 | .open = nvsciipc_dev_open,
609 | .release = nvsciipc_dev_release,
610 | .unlocked_ioctl = nvsciipc_dev_ioctl,
611 | .llseek = no_llseek,
612 | .read = nvsciipc_dbg_read,
613 | };
614 |
615 | static int nvsciipc_probe(struct platform_device *pdev)
616 | {
617 | int ret = 0;
618 |
619 | if (pdev == NULL) {
620 | ERR("invalid platform device\n");
621 | ret = -EINVAL;
622 | goto error;
623 | }
624 |
625 | ctx = devm_kzalloc(&pdev->dev, sizeof(struct nvsciipc), GFP_KERNEL);
626 | if (ctx == NULL) {
627 | ERR("devm_kzalloc failed for nvsciipc\n");
628 | ret = -ENOMEM;
629 | goto error;
630 | }
631 | ctx->set_db_f = false;
632 |
633 | ctx->dev = &(pdev->dev);
634 | platform_set_drvdata(pdev, ctx);
635 |
636 | ctx->nvsciipc_class = class_create(THIS_MODULE, MODULE_NAME);
637 | if (IS_ERR(ctx->nvsciipc_class)) {
638 | ERR("failed to create class: %ld\n",
639 | PTR_ERR(ctx->nvsciipc_class));
640 | ret = PTR_ERR(ctx->nvsciipc_class);
641 | goto error;
642 | }
643 |
644 | ret = alloc_chrdev_region(&(ctx->dev_t), 0, 1, MODULE_NAME);
645 | if (ret != 0) {
646 | ERR("alloc_chrdev_region() failed\n");
647 | goto error;
648 | }
649 |
650 | ctx->dev_t = MKDEV(MAJOR(ctx->dev_t), 0);
651 | cdev_init(&ctx->cdev, &nvsciipc_fops);
652 | ctx->cdev.owner = THIS_MODULE;
653 |
654 | ret = cdev_add(&(ctx->cdev), ctx->dev_t, 1);
655 | if (ret != 0) {
656 | ERR("cdev_add() failed\n");
657 | goto error;
658 | }
659 |
660 | if (snprintf(ctx->device_name, (MAX_NAME_SIZE - 1), "%s", MODULE_NAME) < 0) {
661 | pr_err("snprintf() failed\n");
662 | ret = -ENOMEM;
663 | goto error;
664 | }
665 |
666 | ctx->device = device_create(ctx->nvsciipc_class, NULL,
667 | ctx->dev_t, ctx,
668 | ctx->device_name, 0);
669 | if (IS_ERR(ctx->device)) {
670 | ret = PTR_ERR(ctx->device);
671 | ERR("device_create() failed\n");
672 | goto error;
673 | }
674 | dev_set_drvdata(ctx->device, ctx);
675 |
676 | INFO("loaded module\n");
677 |
678 | return ret;
679 |
680 | error:
681 | nvsciipc_cleanup(ctx);
682 |
683 | return ret;
684 | }
685 |
686 | static void nvsciipc_cleanup(struct nvsciipc *ctx)
687 | {
688 | if (ctx == NULL)
689 | return;
690 |
691 | nvsciipc_free_db(ctx);
692 |
693 | if (ctx->nvsciipc_class && ctx->dev_t)
694 | device_destroy(ctx->nvsciipc_class, ctx->dev_t);
695 |
696 | if (ctx->device != NULL) {
697 | cdev_del(&ctx->cdev);
698 | ctx->device = NULL;
699 | }
700 |
701 | if (ctx->dev_t) {
702 | unregister_chrdev_region(ctx->dev_t, 1);
703 | ctx->dev_t = 0;
704 | }
705 |
706 | if (ctx->nvsciipc_class) {
707 | class_destroy(ctx->nvsciipc_class);
708 | ctx->nvsciipc_class = NULL;
709 | }
710 |
711 | devm_kfree(ctx->dev, ctx);
712 | ctx = NULL;
713 | }
714 |
715 | static int nvsciipc_remove(struct platform_device *pdev)
716 | {
717 | struct nvsciipc *ctx = NULL;
718 |
719 | if (pdev == NULL) {
720 | ERR("%s: pdev is NULL\n", __func__);
721 | goto exit;
722 | }
723 |
724 | ctx = (struct nvsciipc *)platform_get_drvdata(pdev);
725 | if (ctx == NULL) {
726 | ERR("%s: ctx is NULL\n", __func__);
727 | goto exit;
728 | }
729 |
730 | nvsciipc_cleanup(ctx);
731 |
732 | exit:
733 | INFO("Unloaded module\n");
734 |
735 | return 0;
736 | }
737 |
738 | static struct platform_driver nvsciipc_driver = {
739 | .probe = nvsciipc_probe,
740 | .remove = nvsciipc_remove,
741 | .driver = {
742 | .name = MODULE_NAME,
743 | },
744 | };
745 |
746 | static int __init nvsciipc_module_init(void)
747 | {
748 | int ret;
749 |
750 | ret = platform_driver_register(&nvsciipc_driver);
751 | if (ret) {
752 | ERR("%s: platform_driver_register: %d\n", __func__, ret);
753 | return ret;
754 | }
755 |
756 | nvsciipc_pdev = platform_device_register_simple(MODULE_NAME, -1,
757 | NULL, 0);
758 | if (IS_ERR(nvsciipc_pdev)) {
759 | ERR("%s: platform_device_register_simple\n", __func__);
760 | platform_driver_unregister(&nvsciipc_driver);
761 | return PTR_ERR(nvsciipc_pdev);
762 | }
763 |
764 | return 0;
765 | }
766 |
767 | static void __exit nvsciipc_module_deinit(void)
768 | {
769 | // calls nvsciipc_remove internally
770 | platform_device_unregister(nvsciipc_pdev);
771 |
772 | platform_driver_unregister(&nvsciipc_driver);
773 | }
774 |
775 | module_init(nvsciipc_module_init);
776 | module_exit(nvsciipc_module_deinit);
777 |
778 | MODULE_LICENSE("GPL v2");
779 | MODULE_AUTHOR("Nvidia Corporation");
780 |
--------------------------------------------------------------------------------
/nvsciipc/nvsciipc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | #ifndef __NVSCIIPC_KERNEL_H__
19 | #define __NVSCIIPC_KERNEL_H__
20 |
21 | #include
22 | #include
23 |
24 | #define ERR(...) pr_err("nvsciipc: " __VA_ARGS__)
25 | #define INFO(...) pr_info("nvsciipc: " __VA_ARGS__)
26 | #define DBG(...) pr_debug("nvsciipc: " __VA_ARGS__)
27 |
28 | #define MODULE_NAME "nvsciipc"
29 | #define MAX_NAME_SIZE 64
30 |
31 | #define NVSCIIPC_BACKEND_ITC 0U
32 | #define NVSCIIPC_BACKEND_IPC 1U
33 | #define NVSCIIPC_BACKEND_IVC 2U
34 | #define NVSCIIPC_BACKEND_C2C_PCIE 3U
35 | #define NVSCIIPC_BACKEND_C2C_NPM 4U
36 | #define NVSCIIPC_BACKEND_UNKNOWN 0xFFFFFFFFU
37 |
38 | struct nvsciipc {
39 | struct device *dev;
40 |
41 | dev_t dev_t;
42 | struct class *nvsciipc_class;
43 | struct cdev cdev;
44 | struct device *device;
45 | char device_name[MAX_NAME_SIZE];
46 |
47 | int num_eps;
48 | struct nvsciipc_config_entry **db;
49 | volatile bool set_db_f;
50 | };
51 |
52 | struct vuid_bitfield_64 {
53 | uint64_t index : 16;
54 | uint64_t type : 4;
55 | uint64_t vmid : 8;
56 | uint64_t socid : 28;
57 | uint64_t reserved : 8;
58 | };
59 |
60 | union nvsciipc_vuid_64 {
61 | uint64_t value;
62 | struct vuid_bitfield_64 bit;
63 | };
64 |
65 | /***********************************************************************/
66 | /********************* Functions declaration ***************************/
67 | /***********************************************************************/
68 |
69 | static void nvsciipc_cleanup(struct nvsciipc *ctx);
70 |
71 | static int nvsciipc_dev_open(struct inode *inode, struct file *filp);
72 | static int nvsciipc_dev_release(struct inode *inode, struct file *filp);
73 | static long nvsciipc_dev_ioctl(struct file *filp, unsigned int cmd,
74 | unsigned long arg);
75 | static int nvsciipc_ioctl_get_vuid(struct nvsciipc *ctx, unsigned int cmd,
76 | unsigned long arg);
77 | static int nvsciipc_ioctl_set_db(struct nvsciipc *ctx, unsigned int cmd,
78 | unsigned long arg);
79 |
80 | #endif /* __NVSCIIPC_KERNEL_H__ */
81 |
--------------------------------------------------------------------------------
/nvsciipc/uapi/linux/nvsciipc_ioctl.h:
--------------------------------------------------------------------------------
1 | /*
2 | * SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 | * SPDX-License-Identifier: GPL-2.0-only
4 | *
5 | * This program is free software; you can redistribute it and/or modify it
6 | * under the terms and conditions of the GNU General Public License,
7 | * version 2, as published by the Free Software Foundation.
8 | *
9 | * This program is distributed in the hope it will be useful, but WITHOUT
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 | * more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program. If not, see .
16 | */
17 |
18 | #ifndef __NVSCIIPC_IOCTL_H__
19 | #define __NVSCIIPC_IOCTL_H__
20 |
21 | #include
22 |
23 | #define NVSCIIPC_MAJOR_VERSION (1U)
24 | #define NVSCIIPC_MINOR_VERSION (0U)
25 |
26 | #define NVSCIIPC_MAX_EP_NAME 64U
27 | #define NVSCIIPC_MAX_RDMA_NAME 64U
28 | #define NVSCIIPC_MAX_IP_NAME 16U
29 |
30 | struct nvsciipc_config_entry {
31 | /* endpoint name */
32 | char ep_name[NVSCIIPC_MAX_EP_NAME];
33 | /* node name for shm/sem */
34 | char dev_name[NVSCIIPC_MAX_EP_NAME];
35 | uint32_t backend; /* backend type */
36 | uint32_t nframes; /* frame count */
37 | uint32_t frame_size; /* frame size */
38 | /* ep id for inter-Proc/Thread
39 | * queue id for inter-VM
40 | * dev id for inter-Chip
41 | */
42 | uint32_t id;
43 | uint64_t vuid; /* VM-wide unique id */
44 | char rdma_dev_name[NVSCIIPC_MAX_RDMA_NAME];
45 | char remote_ip[NVSCIIPC_MAX_IP_NAME];
46 | uint32_t remote_port;
47 | uint32_t local_port;
48 | uint32_t peer_vmid;
49 | };
50 |
51 | struct nvsciipc_db {
52 | int num_eps;
53 | struct nvsciipc_config_entry **entry;
54 | };
55 |
56 | struct nvsciipc_get_vuid {
57 | char ep_name[NVSCIIPC_MAX_EP_NAME];
58 | uint64_t vuid;
59 | };
60 |
61 | struct nvsciipc_get_db_by_name {
62 | char ep_name[NVSCIIPC_MAX_EP_NAME];
63 | struct nvsciipc_config_entry entry;
64 | uint32_t idx;
65 | };
66 |
67 | struct nvsciipc_get_db_by_vuid {
68 | uint64_t vuid;
69 | struct nvsciipc_config_entry entry;
70 | uint32_t idx;
71 | };
72 |
73 | /* for userspace level test, debugging purpose only */
74 | struct nvsciipc_validate_auth_token {
75 | uint32_t auth_token;
76 | uint64_t local_vuid;
77 | };
78 |
79 | /* NvSciIpcTopoId type */
80 | struct nvsciipc_topoid {
81 | uint32_t socid;
82 | uint32_t vmid;
83 | };
84 |
85 | /* for userspace level test, debugging purpose only */
86 | struct nvsciipc_map_vuid {
87 | uint64_t vuid;
88 | struct nvsciipc_topoid peer_topoid;
89 | uint64_t peer_vuid;
90 | };
91 |
92 | /* IOCTL magic number - seen available in ioctl-number.txt*/
93 | #define NVSCIIPC_IOCTL_MAGIC 0xC3
94 |
95 | #define NVSCIIPC_IOCTL_SET_DB \
96 | _IOW(NVSCIIPC_IOCTL_MAGIC, 1, struct nvsciipc_db)
97 |
98 | #define NVSCIIPC_IOCTL_GET_VUID \
99 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 2, struct nvsciipc_get_vuid)
100 |
101 | #define NVSCIIPC_IOCTL_GET_DB_BY_NAME \
102 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 3, struct nvsciipc_get_db_by_name)
103 |
104 | #define NVSCIIPC_IOCTL_GET_DB_BY_VUID \
105 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 4, struct nvsciipc_get_db_by_vuid)
106 |
107 | #define NVSCIIPC_IOCTL_GET_DB_SIZE \
108 | _IOR(NVSCIIPC_IOCTL_MAGIC, 5, uint32_t)
109 |
110 | /* debugging purpose only */
111 | #define NVSCIIPC_IOCTL_VALIDATE_AUTH_TOKEN \
112 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 6, struct nvsciipc_validate_auth_token)
113 |
114 | /* debugging purpose only */
115 | #define NVSCIIPC_IOCTL_MAP_VUID \
116 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 7, struct nvsciipc_map_vuid)
117 |
118 | #define NVSCIIPC_IOCTL_GET_VMID \
119 | _IOWR(NVSCIIPC_IOCTL_MAGIC, 8, uint32_t)
120 |
121 | #define NVSCIIPC_IOCTL_NUMBER_MAX 8
122 |
123 | #endif /* __NVSCIIPC_IOCTL_H__ */
124 |
--------------------------------------------------------------------------------