blob: a2bac20ff19ddb73852f590b8605ff0514b84e8f [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * NVIDIA Tegra DRM GEM helper functions
4 *
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
7 *
8 * Based on the GEM/CMA helpers
9 *
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 */
12
13#include <linux/dma-buf.h>
14#include <linux/iommu.h>
David Brazdil0f672f62019-12-10 10:32:29 +000015
16#include <drm/drm_drv.h>
17#include <drm/drm_prime.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <drm/tegra_drm.h>
19
20#include "drm.h"
21#include "gem.h"
22
23static void tegra_bo_put(struct host1x_bo *bo)
24{
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
26
Olivier Deprez157378f2022-04-04 15:47:50 +020027 drm_gem_object_put(&obj->gem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028}
29
Olivier Deprez157378f2022-04-04 15:47:50 +020030/* XXX move this into lib/scatterlist.c? */
31static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 unsigned int nents, gfp_t gfp_mask)
33{
34 struct scatterlist *dst;
35 unsigned int i;
36 int err;
37
38 err = sg_alloc_table(sgt, nents, gfp_mask);
39 if (err < 0)
40 return err;
41
42 dst = sgt->sgl;
43
44 for (i = 0; i < nents; i++) {
45 sg_set_page(dst, sg_page(sg), sg->length, 0);
46 dst = sg_next(dst);
47 sg = sg_next(sg);
48 }
49
50 return 0;
51}
52
53static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
54 dma_addr_t *phys)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055{
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
Olivier Deprez157378f2022-04-04 15:47:50 +020057 struct sg_table *sgt;
58 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
Olivier Deprez157378f2022-04-04 15:47:50 +020060 /*
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
63 *
64 * Similarly, for buffers that have been allocated by the DMA API the
65 * physical address can be used for devices that are not attached to
66 * an IOMMU. For these devices, callers must pass a valid pointer via
67 * the @phys argument.
68 *
69 * Imported buffers were also already mapped at import time, so the
70 * existing mapping can be reused.
71 */
72 if (phys) {
73 *phys = obj->iova;
74 return NULL;
75 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076
Olivier Deprez157378f2022-04-04 15:47:50 +020077 /*
78 * If we don't have a mapping for this buffer yet, return an SG table
79 * so that host1x can do the mapping for us via the DMA API.
80 */
81 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
82 if (!sgt)
83 return ERR_PTR(-ENOMEM);
84
85 if (obj->pages) {
86 /*
87 * If the buffer object was allocated from the explicit IOMMU
88 * API code paths, construct an SG table from the pages.
89 */
90 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91 0, obj->gem.size, GFP_KERNEL);
92 if (err < 0)
93 goto free;
94 } else if (obj->sgt) {
95 /*
96 * If the buffer object already has an SG table but no pages
97 * were allocated for it, it means the buffer was imported and
98 * the SG table needs to be copied to avoid overwriting any
99 * other potential users of the original SG table.
100 */
101 err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl,
102 obj->sgt->orig_nents, GFP_KERNEL);
103 if (err < 0)
104 goto free;
105 } else {
106 /*
107 * If the buffer object had no pages allocated and if it was
108 * not imported, it had to be allocated with the DMA API, so
109 * the DMA API helper can be used.
110 */
111 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
112 obj->gem.size);
113 if (err < 0)
114 goto free;
115 }
116
117 return sgt;
118
119free:
120 kfree(sgt);
121 return ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000122}
123
Olivier Deprez157378f2022-04-04 15:47:50 +0200124static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000125{
Olivier Deprez157378f2022-04-04 15:47:50 +0200126 if (sgt) {
127 sg_free_table(sgt);
128 kfree(sgt);
129 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130}
131
132static void *tegra_bo_mmap(struct host1x_bo *bo)
133{
134 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135
136 if (obj->vaddr)
137 return obj->vaddr;
138 else if (obj->gem.import_attach)
139 return dma_buf_vmap(obj->gem.import_attach->dmabuf);
140 else
141 return vmap(obj->pages, obj->num_pages, VM_MAP,
142 pgprot_writecombine(PAGE_KERNEL));
143}
144
145static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
146{
147 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
148
149 if (obj->vaddr)
150 return;
151 else if (obj->gem.import_attach)
152 dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
153 else
154 vunmap(addr);
155}
156
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
158{
159 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
160
161 drm_gem_object_get(&obj->gem);
162
163 return bo;
164}
165
166static const struct host1x_bo_ops tegra_bo_ops = {
167 .get = tegra_bo_get,
168 .put = tegra_bo_put,
169 .pin = tegra_bo_pin,
170 .unpin = tegra_bo_unpin,
171 .mmap = tegra_bo_mmap,
172 .munmap = tegra_bo_munmap,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000173};
174
175static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
176{
177 int prot = IOMMU_READ | IOMMU_WRITE;
178 int err;
179
180 if (bo->mm)
181 return -EBUSY;
182
183 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
184 if (!bo->mm)
185 return -ENOMEM;
186
187 mutex_lock(&tegra->mm_lock);
188
189 err = drm_mm_insert_node_generic(&tegra->mm,
190 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
191 if (err < 0) {
192 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
193 err);
194 goto unlock;
195 }
196
Olivier Deprez157378f2022-04-04 15:47:50 +0200197 bo->iova = bo->mm->start;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198
Olivier Deprez157378f2022-04-04 15:47:50 +0200199 bo->size = iommu_map_sgtable(tegra->domain, bo->iova, bo->sgt, prot);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200 if (!bo->size) {
201 dev_err(tegra->drm->dev, "failed to map buffer\n");
202 err = -ENOMEM;
203 goto remove;
204 }
205
206 mutex_unlock(&tegra->mm_lock);
207
208 return 0;
209
210remove:
211 drm_mm_remove_node(bo->mm);
212unlock:
213 mutex_unlock(&tegra->mm_lock);
214 kfree(bo->mm);
215 return err;
216}
217
218static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
219{
220 if (!bo->mm)
221 return 0;
222
223 mutex_lock(&tegra->mm_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200224 iommu_unmap(tegra->domain, bo->iova, bo->size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225 drm_mm_remove_node(bo->mm);
226 mutex_unlock(&tegra->mm_lock);
227
228 kfree(bo->mm);
229
230 return 0;
231}
232
233static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
234 size_t size)
235{
236 struct tegra_bo *bo;
237 int err;
238
239 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
240 if (!bo)
241 return ERR_PTR(-ENOMEM);
242
243 host1x_bo_init(&bo->base, &tegra_bo_ops);
244 size = round_up(size, PAGE_SIZE);
245
246 err = drm_gem_object_init(drm, &bo->gem, size);
247 if (err < 0)
248 goto free;
249
250 err = drm_gem_create_mmap_offset(&bo->gem);
251 if (err < 0)
252 goto release;
253
254 return bo;
255
256release:
257 drm_gem_object_release(&bo->gem);
258free:
259 kfree(bo);
260 return ERR_PTR(err);
261}
262
263static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
264{
265 if (bo->pages) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200266 dma_unmap_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
268 sg_free_table(bo->sgt);
269 kfree(bo->sgt);
270 } else if (bo->vaddr) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200271 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272 }
273}
274
275static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
276{
277 int err;
278
279 bo->pages = drm_gem_get_pages(&bo->gem);
280 if (IS_ERR(bo->pages))
281 return PTR_ERR(bo->pages);
282
283 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
284
Olivier Deprez157378f2022-04-04 15:47:50 +0200285 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000286 if (IS_ERR(bo->sgt)) {
287 err = PTR_ERR(bo->sgt);
288 goto put_pages;
289 }
290
Olivier Deprez157378f2022-04-04 15:47:50 +0200291 err = dma_map_sgtable(drm->dev, bo->sgt, DMA_FROM_DEVICE, 0);
292 if (err)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 goto free_sgt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000294
295 return 0;
296
297free_sgt:
298 sg_free_table(bo->sgt);
299 kfree(bo->sgt);
300put_pages:
301 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
302 return err;
303}
304
305static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
306{
307 struct tegra_drm *tegra = drm->dev_private;
308 int err;
309
310 if (tegra->domain) {
311 err = tegra_bo_get_pages(drm, bo);
312 if (err < 0)
313 return err;
314
315 err = tegra_bo_iommu_map(tegra, bo);
316 if (err < 0) {
317 tegra_bo_free(drm, bo);
318 return err;
319 }
320 } else {
321 size_t size = bo->gem.size;
322
Olivier Deprez157378f2022-04-04 15:47:50 +0200323 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324 GFP_KERNEL | __GFP_NOWARN);
325 if (!bo->vaddr) {
326 dev_err(drm->dev,
327 "failed to allocate buffer of size %zu\n",
328 size);
329 return -ENOMEM;
330 }
331 }
332
333 return 0;
334}
335
336struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
337 unsigned long flags)
338{
339 struct tegra_bo *bo;
340 int err;
341
342 bo = tegra_bo_alloc_object(drm, size);
343 if (IS_ERR(bo))
344 return bo;
345
346 err = tegra_bo_alloc(drm, bo);
347 if (err < 0)
348 goto release;
349
350 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
351 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
352
353 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
354 bo->flags |= TEGRA_BO_BOTTOM_UP;
355
356 return bo;
357
358release:
359 drm_gem_object_release(&bo->gem);
360 kfree(bo);
361 return ERR_PTR(err);
362}
363
364struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
365 struct drm_device *drm,
366 size_t size,
367 unsigned long flags,
368 u32 *handle)
369{
370 struct tegra_bo *bo;
371 int err;
372
373 bo = tegra_bo_create(drm, size, flags);
374 if (IS_ERR(bo))
375 return bo;
376
377 err = drm_gem_handle_create(file, &bo->gem, handle);
378 if (err) {
379 tegra_bo_free_object(&bo->gem);
380 return ERR_PTR(err);
381 }
382
Olivier Deprez157378f2022-04-04 15:47:50 +0200383 drm_gem_object_put(&bo->gem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000384
385 return bo;
386}
387
388static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
389 struct dma_buf *buf)
390{
391 struct tegra_drm *tegra = drm->dev_private;
392 struct dma_buf_attachment *attach;
393 struct tegra_bo *bo;
394 int err;
395
396 bo = tegra_bo_alloc_object(drm, buf->size);
397 if (IS_ERR(bo))
398 return bo;
399
400 attach = dma_buf_attach(buf, drm->dev);
401 if (IS_ERR(attach)) {
402 err = PTR_ERR(attach);
403 goto free;
404 }
405
406 get_dma_buf(buf);
407
408 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
409 if (IS_ERR(bo->sgt)) {
410 err = PTR_ERR(bo->sgt);
411 goto detach;
412 }
413
414 if (tegra->domain) {
415 err = tegra_bo_iommu_map(tegra, bo);
416 if (err < 0)
417 goto detach;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418 }
419
420 bo->gem.import_attach = attach;
421
422 return bo;
423
424detach:
425 if (!IS_ERR_OR_NULL(bo->sgt))
426 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
427
428 dma_buf_detach(buf, attach);
429 dma_buf_put(buf);
430free:
431 drm_gem_object_release(&bo->gem);
432 kfree(bo);
433 return ERR_PTR(err);
434}
435
436void tegra_bo_free_object(struct drm_gem_object *gem)
437{
438 struct tegra_drm *tegra = gem->dev->dev_private;
439 struct tegra_bo *bo = to_tegra_bo(gem);
440
441 if (tegra->domain)
442 tegra_bo_iommu_unmap(tegra, bo);
443
444 if (gem->import_attach) {
445 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
446 DMA_TO_DEVICE);
447 drm_prime_gem_destroy(gem, NULL);
448 } else {
449 tegra_bo_free(gem->dev, bo);
450 }
451
452 drm_gem_object_release(gem);
453 kfree(bo);
454}
455
456int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
457 struct drm_mode_create_dumb *args)
458{
459 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
460 struct tegra_drm *tegra = drm->dev_private;
461 struct tegra_bo *bo;
462
463 args->pitch = round_up(min_pitch, tegra->pitch_align);
464 args->size = args->pitch * args->height;
465
466 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
467 &args->handle);
468 if (IS_ERR(bo))
469 return PTR_ERR(bo);
470
471 return 0;
472}
473
474static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
475{
476 struct vm_area_struct *vma = vmf->vma;
477 struct drm_gem_object *gem = vma->vm_private_data;
478 struct tegra_bo *bo = to_tegra_bo(gem);
479 struct page *page;
480 pgoff_t offset;
481
482 if (!bo->pages)
483 return VM_FAULT_SIGBUS;
484
485 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
486 page = bo->pages[offset];
487
488 return vmf_insert_page(vma, vmf->address, page);
489}
490
491const struct vm_operations_struct tegra_bo_vm_ops = {
492 .fault = tegra_bo_fault,
493 .open = drm_gem_vm_open,
494 .close = drm_gem_vm_close,
495};
496
497int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
498{
499 struct tegra_bo *bo = to_tegra_bo(gem);
500
501 if (!bo->pages) {
502 unsigned long vm_pgoff = vma->vm_pgoff;
503 int err;
504
505 /*
506 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
507 * and set the vm_pgoff (used as a fake buffer offset by DRM)
508 * to 0 as we want to map the whole buffer.
509 */
510 vma->vm_flags &= ~VM_PFNMAP;
511 vma->vm_pgoff = 0;
512
Olivier Deprez157378f2022-04-04 15:47:50 +0200513 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000514 gem->size);
515 if (err < 0) {
516 drm_gem_vm_close(vma);
517 return err;
518 }
519
520 vma->vm_pgoff = vm_pgoff;
521 } else {
522 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
523
524 vma->vm_flags |= VM_MIXEDMAP;
525 vma->vm_flags &= ~VM_PFNMAP;
526
527 vma->vm_page_prot = pgprot_writecombine(prot);
528 }
529
530 return 0;
531}
532
533int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
534{
535 struct drm_gem_object *gem;
536 int err;
537
538 err = drm_gem_mmap(file, vma);
539 if (err < 0)
540 return err;
541
542 gem = vma->vm_private_data;
543
544 return __tegra_gem_mmap(gem, vma);
545}
546
547static struct sg_table *
548tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
549 enum dma_data_direction dir)
550{
551 struct drm_gem_object *gem = attach->dmabuf->priv;
552 struct tegra_bo *bo = to_tegra_bo(gem);
553 struct sg_table *sgt;
554
555 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
556 if (!sgt)
557 return NULL;
558
559 if (bo->pages) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200560 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
561 0, gem->size, GFP_KERNEL) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000562 goto free;
563 } else {
Olivier Deprez157378f2022-04-04 15:47:50 +0200564 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
565 gem->size) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566 goto free;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000567 }
568
Olivier Deprez157378f2022-04-04 15:47:50 +0200569 if (dma_map_sgtable(attach->dev, sgt, dir, 0))
570 goto free;
571
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000572 return sgt;
573
574free:
575 sg_free_table(sgt);
576 kfree(sgt);
577 return NULL;
578}
579
580static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
581 struct sg_table *sgt,
582 enum dma_data_direction dir)
583{
584 struct drm_gem_object *gem = attach->dmabuf->priv;
585 struct tegra_bo *bo = to_tegra_bo(gem);
586
587 if (bo->pages)
Olivier Deprez157378f2022-04-04 15:47:50 +0200588 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000589
590 sg_free_table(sgt);
591 kfree(sgt);
592}
593
594static void tegra_gem_prime_release(struct dma_buf *buf)
595{
596 drm_gem_dmabuf_release(buf);
597}
598
599static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
600 enum dma_data_direction direction)
601{
602 struct drm_gem_object *gem = buf->priv;
603 struct tegra_bo *bo = to_tegra_bo(gem);
604 struct drm_device *drm = gem->dev;
605
606 if (bo->pages)
Olivier Deprez157378f2022-04-04 15:47:50 +0200607 dma_sync_sgtable_for_cpu(drm->dev, bo->sgt, DMA_FROM_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000608
609 return 0;
610}
611
612static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
613 enum dma_data_direction direction)
614{
615 struct drm_gem_object *gem = buf->priv;
616 struct tegra_bo *bo = to_tegra_bo(gem);
617 struct drm_device *drm = gem->dev;
618
619 if (bo->pages)
Olivier Deprez157378f2022-04-04 15:47:50 +0200620 dma_sync_sgtable_for_device(drm->dev, bo->sgt, DMA_TO_DEVICE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000621
622 return 0;
623}
624
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
626{
627 struct drm_gem_object *gem = buf->priv;
628 int err;
629
630 err = drm_gem_mmap_obj(gem, gem->size, vma);
631 if (err < 0)
632 return err;
633
634 return __tegra_gem_mmap(gem, vma);
635}
636
637static void *tegra_gem_prime_vmap(struct dma_buf *buf)
638{
639 struct drm_gem_object *gem = buf->priv;
640 struct tegra_bo *bo = to_tegra_bo(gem);
641
642 return bo->vaddr;
643}
644
645static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
646{
647}
648
649static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
650 .map_dma_buf = tegra_gem_prime_map_dma_buf,
651 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
652 .release = tegra_gem_prime_release,
653 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
654 .end_cpu_access = tegra_gem_prime_end_cpu_access,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000655 .mmap = tegra_gem_prime_mmap,
656 .vmap = tegra_gem_prime_vmap,
657 .vunmap = tegra_gem_prime_vunmap,
658};
659
David Brazdil0f672f62019-12-10 10:32:29 +0000660struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000661 int flags)
662{
663 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
664
665 exp_info.exp_name = KBUILD_MODNAME;
David Brazdil0f672f62019-12-10 10:32:29 +0000666 exp_info.owner = gem->dev->driver->fops->owner;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
668 exp_info.size = gem->size;
669 exp_info.flags = flags;
670 exp_info.priv = gem;
671
David Brazdil0f672f62019-12-10 10:32:29 +0000672 return drm_gem_dmabuf_export(gem->dev, &exp_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673}
674
675struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
676 struct dma_buf *buf)
677{
678 struct tegra_bo *bo;
679
680 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
681 struct drm_gem_object *gem = buf->priv;
682
683 if (gem->dev == drm) {
684 drm_gem_object_get(gem);
685 return gem;
686 }
687 }
688
689 bo = tegra_bo_import(drm, buf);
690 if (IS_ERR(bo))
691 return ERR_CAST(bo);
692
693 return &bo->gem;
694}