blob: 93e9bf73825952a30d132750bc6d62f7db02544d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
David Brazdil0f672f62019-12-10 10:32:29 +000024#include <linux/dma-resv.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025#include <linux/mm.h>
David Brazdil0f672f62019-12-10 10:32:29 +000026#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028
29#include <uapi/linux/dma-buf.h>
David Brazdil0f672f62019-12-10 10:32:29 +000030#include <uapi/linux/magic.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000031
32static inline int is_dma_buf_file(struct file *);
33
34struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37};
38
39static struct dma_buf_list db_list;
40
David Brazdil0f672f62019-12-10 10:32:29 +000041static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42{
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
Olivier Deprez0e641232021-09-23 10:07:05 +020048 spin_lock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +000049 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
Olivier Deprez0e641232021-09-23 10:07:05 +020051 spin_unlock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +000052
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55}
56
Olivier Deprez0e641232021-09-23 10:07:05 +020057static void dma_buf_release(struct dentry *dentry)
58{
59 struct dma_buf *dmabuf;
60
61 dmabuf = dentry->d_fsdata;
62 if (unlikely(!dmabuf))
63 return;
64
65 BUG_ON(dmabuf->vmapping_counter);
66
67 /*
68 * Any fences that a dma-buf poll can wait on should be signaled
69 * before releasing dma-buf. This is the responsibility of each
70 * driver that uses the reservation objects.
71 *
72 * If you hit this BUG() it means someone dropped their ref to the
73 * dma-buf while still having pending operation to the buffer.
74 */
75 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
76
77 dmabuf->ops->release(dmabuf);
78
79 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 dma_resv_fini(dmabuf->resv);
81
Olivier Deprez157378f2022-04-04 15:47:50 +020082 WARN_ON(!list_empty(&dmabuf->attachments));
Olivier Deprez0e641232021-09-23 10:07:05 +020083 module_put(dmabuf->owner);
84 kfree(dmabuf->name);
85 kfree(dmabuf);
86}
87
88static int dma_buf_file_release(struct inode *inode, struct file *file)
89{
90 struct dma_buf *dmabuf;
91
92 if (!is_dma_buf_file(file))
93 return -EINVAL;
94
95 dmabuf = file->private_data;
96
97 mutex_lock(&db_list.lock);
98 list_del(&dmabuf->list_node);
99 mutex_unlock(&db_list.lock);
100
101 return 0;
102}
103
David Brazdil0f672f62019-12-10 10:32:29 +0000104static const struct dentry_operations dma_buf_dentry_ops = {
105 .d_dname = dmabuffs_dname,
Olivier Deprez0e641232021-09-23 10:07:05 +0200106 .d_release = dma_buf_release,
David Brazdil0f672f62019-12-10 10:32:29 +0000107};
108
109static struct vfsmount *dma_buf_mnt;
110
111static int dma_buf_fs_init_context(struct fs_context *fc)
112{
113 struct pseudo_fs_context *ctx;
114
115 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
116 if (!ctx)
117 return -ENOMEM;
118 ctx->dops = &dma_buf_dentry_ops;
119 return 0;
120}
121
122static struct file_system_type dma_buf_fs_type = {
123 .name = "dmabuf",
124 .init_fs_context = dma_buf_fs_init_context,
125 .kill_sb = kill_anon_super,
126};
127
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
129{
130 struct dma_buf *dmabuf;
131
132 if (!is_dma_buf_file(file))
133 return -EINVAL;
134
135 dmabuf = file->private_data;
136
David Brazdil0f672f62019-12-10 10:32:29 +0000137 /* check if buffer supports mmap */
138 if (!dmabuf->ops->mmap)
139 return -EINVAL;
140
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 /* check for overflowing the buffer's size */
142 if (vma->vm_pgoff + vma_pages(vma) >
143 dmabuf->size >> PAGE_SHIFT)
144 return -EINVAL;
145
146 return dmabuf->ops->mmap(dmabuf, vma);
147}
148
149static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
150{
151 struct dma_buf *dmabuf;
152 loff_t base;
153
154 if (!is_dma_buf_file(file))
155 return -EBADF;
156
157 dmabuf = file->private_data;
158
159 /* only support discovering the end of the buffer,
160 but also allow SEEK_SET to maintain the idiomatic
161 SEEK_END(0), SEEK_CUR(0) pattern */
162 if (whence == SEEK_END)
163 base = dmabuf->size;
164 else if (whence == SEEK_SET)
165 base = 0;
166 else
167 return -EINVAL;
168
169 if (offset != 0)
170 return -EINVAL;
171
172 return base + offset;
173}
174
175/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200176 * DOC: implicit fence polling
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177 *
178 * To support cross-device and cross-driver synchronization of buffer access
Olivier Deprez157378f2022-04-04 15:47:50 +0200179 * implicit fences (represented internally in the kernel with &struct dma_fence)
180 * can be attached to a &dma_buf. The glue for that and a few related things are
David Brazdil0f672f62019-12-10 10:32:29 +0000181 * provided in the &dma_resv structure.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 *
183 * Userspace can query the state of these implicitly tracked fences using poll()
184 * and related system calls:
185 *
186 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
187 * most recent write or exclusive fence.
188 *
189 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
190 * all attached fences, shared and exclusive ones.
191 *
192 * Note that this only signals the completion of the respective fences, i.e. the
193 * DMA transfers are complete. Cache flushing and any other necessary
194 * preparations before CPU access can begin still need to happen.
195 */
196
197static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
198{
199 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
200 unsigned long flags;
201
202 spin_lock_irqsave(&dcb->poll->lock, flags);
203 wake_up_locked_poll(dcb->poll, dcb->active);
204 dcb->active = 0;
205 spin_unlock_irqrestore(&dcb->poll->lock, flags);
206}
207
208static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
209{
210 struct dma_buf *dmabuf;
David Brazdil0f672f62019-12-10 10:32:29 +0000211 struct dma_resv *resv;
212 struct dma_resv_list *fobj;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213 struct dma_fence *fence_excl;
214 __poll_t events;
215 unsigned shared_count, seq;
216
217 dmabuf = file->private_data;
218 if (!dmabuf || !dmabuf->resv)
219 return EPOLLERR;
220
221 resv = dmabuf->resv;
222
223 poll_wait(file, &dmabuf->poll, poll);
224
225 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
226 if (!events)
227 return 0;
228
229retry:
230 seq = read_seqcount_begin(&resv->seq);
231 rcu_read_lock();
232
233 fobj = rcu_dereference(resv->fence);
234 if (fobj)
235 shared_count = fobj->shared_count;
236 else
237 shared_count = 0;
238 fence_excl = rcu_dereference(resv->fence_excl);
239 if (read_seqcount_retry(&resv->seq, seq)) {
240 rcu_read_unlock();
241 goto retry;
242 }
243
244 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
245 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
246 __poll_t pevents = EPOLLIN;
247
248 if (shared_count == 0)
249 pevents |= EPOLLOUT;
250
251 spin_lock_irq(&dmabuf->poll.lock);
252 if (dcb->active) {
253 dcb->active |= pevents;
254 events &= ~pevents;
255 } else
256 dcb->active = pevents;
257 spin_unlock_irq(&dmabuf->poll.lock);
258
259 if (events & pevents) {
260 if (!dma_fence_get_rcu(fence_excl)) {
261 /* force a recheck */
262 events &= ~pevents;
263 dma_buf_poll_cb(NULL, &dcb->cb);
264 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
265 dma_buf_poll_cb)) {
266 events &= ~pevents;
267 dma_fence_put(fence_excl);
268 } else {
269 /*
270 * No callback queued, wake up any additional
271 * waiters.
272 */
273 dma_fence_put(fence_excl);
274 dma_buf_poll_cb(NULL, &dcb->cb);
275 }
276 }
277 }
278
279 if ((events & EPOLLOUT) && shared_count > 0) {
280 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
281 int i;
282
283 /* Only queue a new callback if no event has fired yet */
284 spin_lock_irq(&dmabuf->poll.lock);
285 if (dcb->active)
286 events &= ~EPOLLOUT;
287 else
288 dcb->active = EPOLLOUT;
289 spin_unlock_irq(&dmabuf->poll.lock);
290
291 if (!(events & EPOLLOUT))
292 goto out;
293
294 for (i = 0; i < shared_count; ++i) {
295 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
296
297 if (!dma_fence_get_rcu(fence)) {
298 /*
299 * fence refcount dropped to zero, this means
300 * that fobj has been freed
301 *
302 * call dma_buf_poll_cb and force a recheck!
303 */
304 events &= ~EPOLLOUT;
305 dma_buf_poll_cb(NULL, &dcb->cb);
306 break;
307 }
308 if (!dma_fence_add_callback(fence, &dcb->cb,
309 dma_buf_poll_cb)) {
310 dma_fence_put(fence);
311 events &= ~EPOLLOUT;
312 break;
313 }
314 dma_fence_put(fence);
315 }
316
317 /* No callback queued, wake up any additional waiters. */
318 if (i == shared_count)
319 dma_buf_poll_cb(NULL, &dcb->cb);
320 }
321
322out:
323 rcu_read_unlock();
324 return events;
325}
326
David Brazdil0f672f62019-12-10 10:32:29 +0000327/**
328 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
329 * The name of the dma-buf buffer can only be set when the dma-buf is not
330 * attached to any devices. It could theoritically support changing the
331 * name of the dma-buf if the same piece of memory is used for multiple
332 * purpose between different devices.
333 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200334 * @dmabuf: [in] dmabuf buffer that will be renamed.
335 * @buf: [in] A piece of userspace memory that contains the name of
336 * the dma-buf.
David Brazdil0f672f62019-12-10 10:32:29 +0000337 *
338 * Returns 0 on success. If the dma-buf buffer is already attached to
339 * devices, return -EBUSY.
340 *
341 */
342static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
343{
344 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
345 long ret = 0;
346
347 if (IS_ERR(name))
348 return PTR_ERR(name);
349
Olivier Deprez157378f2022-04-04 15:47:50 +0200350 dma_resv_lock(dmabuf->resv, NULL);
David Brazdil0f672f62019-12-10 10:32:29 +0000351 if (!list_empty(&dmabuf->attachments)) {
352 ret = -EBUSY;
353 kfree(name);
354 goto out_unlock;
355 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200356 spin_lock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000357 kfree(dmabuf->name);
358 dmabuf->name = name;
Olivier Deprez0e641232021-09-23 10:07:05 +0200359 spin_unlock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000360
361out_unlock:
Olivier Deprez157378f2022-04-04 15:47:50 +0200362 dma_resv_unlock(dmabuf->resv);
David Brazdil0f672f62019-12-10 10:32:29 +0000363 return ret;
364}
365
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000366static long dma_buf_ioctl(struct file *file,
367 unsigned int cmd, unsigned long arg)
368{
369 struct dma_buf *dmabuf;
370 struct dma_buf_sync sync;
371 enum dma_data_direction direction;
372 int ret;
373
374 dmabuf = file->private_data;
375
376 switch (cmd) {
377 case DMA_BUF_IOCTL_SYNC:
378 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
379 return -EFAULT;
380
381 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
382 return -EINVAL;
383
384 switch (sync.flags & DMA_BUF_SYNC_RW) {
385 case DMA_BUF_SYNC_READ:
386 direction = DMA_FROM_DEVICE;
387 break;
388 case DMA_BUF_SYNC_WRITE:
389 direction = DMA_TO_DEVICE;
390 break;
391 case DMA_BUF_SYNC_RW:
392 direction = DMA_BIDIRECTIONAL;
393 break;
394 default:
395 return -EINVAL;
396 }
397
398 if (sync.flags & DMA_BUF_SYNC_END)
399 ret = dma_buf_end_cpu_access(dmabuf, direction);
400 else
401 ret = dma_buf_begin_cpu_access(dmabuf, direction);
402
403 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000404
Olivier Deprez0e641232021-09-23 10:07:05 +0200405 case DMA_BUF_SET_NAME_A:
406 case DMA_BUF_SET_NAME_B:
David Brazdil0f672f62019-12-10 10:32:29 +0000407 return dma_buf_set_name(dmabuf, (const char __user *)arg);
408
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409 default:
410 return -ENOTTY;
411 }
412}
413
David Brazdil0f672f62019-12-10 10:32:29 +0000414static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
415{
416 struct dma_buf *dmabuf = file->private_data;
417
418 seq_printf(m, "size:\t%zu\n", dmabuf->size);
419 /* Don't count the temporary reference taken inside procfs seq_show */
420 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
421 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
Olivier Deprez0e641232021-09-23 10:07:05 +0200422 spin_lock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000423 if (dmabuf->name)
424 seq_printf(m, "name:\t%s\n", dmabuf->name);
Olivier Deprez0e641232021-09-23 10:07:05 +0200425 spin_unlock(&dmabuf->name_lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000426}
427
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428static const struct file_operations dma_buf_fops = {
Olivier Deprez0e641232021-09-23 10:07:05 +0200429 .release = dma_buf_file_release,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 .mmap = dma_buf_mmap_internal,
431 .llseek = dma_buf_llseek,
432 .poll = dma_buf_poll,
433 .unlocked_ioctl = dma_buf_ioctl,
Olivier Deprez157378f2022-04-04 15:47:50 +0200434 .compat_ioctl = compat_ptr_ioctl,
David Brazdil0f672f62019-12-10 10:32:29 +0000435 .show_fdinfo = dma_buf_show_fdinfo,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000436};
437
438/*
439 * is_dma_buf_file - Check if struct file* is associated with dma_buf
440 */
441static inline int is_dma_buf_file(struct file *file)
442{
443 return file->f_op == &dma_buf_fops;
444}
445
David Brazdil0f672f62019-12-10 10:32:29 +0000446static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
447{
448 struct file *file;
449 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
450
451 if (IS_ERR(inode))
452 return ERR_CAST(inode);
453
454 inode->i_size = dmabuf->size;
455 inode_set_bytes(inode, dmabuf->size);
456
457 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
458 flags, &dma_buf_fops);
459 if (IS_ERR(file))
460 goto err_alloc_file;
461 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
462 file->private_data = dmabuf;
463 file->f_path.dentry->d_fsdata = dmabuf;
464
465 return file;
466
467err_alloc_file:
468 iput(inode);
469 return file;
470}
471
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472/**
473 * DOC: dma buf device access
474 *
475 * For device DMA access to a shared DMA buffer the usual sequence of operations
476 * is fairly simple:
477 *
478 * 1. The exporter defines his exporter instance using
479 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
480 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
481 * as a file descriptor by calling dma_buf_fd().
482 *
483 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
484 * to share with: First the filedescriptor is converted to a &dma_buf using
485 * dma_buf_get(). Then the buffer is attached to the device using
486 * dma_buf_attach().
487 *
488 * Up to this stage the exporter is still free to migrate or reallocate the
489 * backing storage.
490 *
491 * 3. Once the buffer is attached to all devices userspace can initiate DMA
492 * access to the shared buffer. In the kernel this is done by calling
493 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
494 *
495 * 4. Once a driver is done with a shared buffer it needs to call
496 * dma_buf_detach() (after cleaning up any mappings) and then release the
497 * reference acquired with dma_buf_get by calling dma_buf_put().
498 *
499 * For the detailed semantics exporters are expected to implement see
500 * &dma_buf_ops.
501 */
502
503/**
504 * dma_buf_export - Creates a new dma_buf, and associates an anon file
505 * with this buffer, so it can be exported.
506 * Also connect the allocator specific data and ops to the buffer.
507 * Additionally, provide a name string for exporter; useful in debugging.
508 *
509 * @exp_info: [in] holds all the export related information provided
510 * by the exporter. see &struct dma_buf_export_info
511 * for further details.
512 *
513 * Returns, on success, a newly created dma_buf object, which wraps the
514 * supplied private data and operations for dma_buf_ops. On either missing
515 * ops, or error in allocating struct dma_buf, will return negative error.
516 *
517 * For most cases the easiest way to create @exp_info is through the
518 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
519 */
520struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
521{
522 struct dma_buf *dmabuf;
David Brazdil0f672f62019-12-10 10:32:29 +0000523 struct dma_resv *resv = exp_info->resv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000524 struct file *file;
525 size_t alloc_size = sizeof(struct dma_buf);
526 int ret;
527
528 if (!exp_info->resv)
David Brazdil0f672f62019-12-10 10:32:29 +0000529 alloc_size += sizeof(struct dma_resv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000530 else
531 /* prevent &dma_buf[1] == dma_buf->resv */
532 alloc_size += 1;
533
534 if (WARN_ON(!exp_info->priv
535 || !exp_info->ops
536 || !exp_info->ops->map_dma_buf
537 || !exp_info->ops->unmap_dma_buf
David Brazdil0f672f62019-12-10 10:32:29 +0000538 || !exp_info->ops->release)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000539 return ERR_PTR(-EINVAL);
540 }
541
Olivier Deprez157378f2022-04-04 15:47:50 +0200542 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
543 (exp_info->ops->pin || exp_info->ops->unpin)))
544 return ERR_PTR(-EINVAL);
545
546 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
547 return ERR_PTR(-EINVAL);
548
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 if (!try_module_get(exp_info->owner))
550 return ERR_PTR(-ENOENT);
551
552 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
553 if (!dmabuf) {
554 ret = -ENOMEM;
555 goto err_module;
556 }
557
558 dmabuf->priv = exp_info->priv;
559 dmabuf->ops = exp_info->ops;
560 dmabuf->size = exp_info->size;
561 dmabuf->exp_name = exp_info->exp_name;
562 dmabuf->owner = exp_info->owner;
Olivier Deprez0e641232021-09-23 10:07:05 +0200563 spin_lock_init(&dmabuf->name_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000564 init_waitqueue_head(&dmabuf->poll);
565 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
566 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
567
568 if (!resv) {
David Brazdil0f672f62019-12-10 10:32:29 +0000569 resv = (struct dma_resv *)&dmabuf[1];
570 dma_resv_init(resv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000571 }
572 dmabuf->resv = resv;
573
David Brazdil0f672f62019-12-10 10:32:29 +0000574 file = dma_buf_getfile(dmabuf, exp_info->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 if (IS_ERR(file)) {
576 ret = PTR_ERR(file);
577 goto err_dmabuf;
578 }
579
580 file->f_mode |= FMODE_LSEEK;
581 dmabuf->file = file;
582
583 mutex_init(&dmabuf->lock);
584 INIT_LIST_HEAD(&dmabuf->attachments);
585
586 mutex_lock(&db_list.lock);
587 list_add(&dmabuf->list_node, &db_list.head);
588 mutex_unlock(&db_list.lock);
589
590 return dmabuf;
591
592err_dmabuf:
593 kfree(dmabuf);
594err_module:
595 module_put(exp_info->owner);
596 return ERR_PTR(ret);
597}
598EXPORT_SYMBOL_GPL(dma_buf_export);
599
600/**
601 * dma_buf_fd - returns a file descriptor for the given dma_buf
602 * @dmabuf: [in] pointer to dma_buf for which fd is required.
603 * @flags: [in] flags to give to fd
604 *
605 * On success, returns an associated 'fd'. Else, returns error.
606 */
607int dma_buf_fd(struct dma_buf *dmabuf, int flags)
608{
609 int fd;
610
611 if (!dmabuf || !dmabuf->file)
612 return -EINVAL;
613
614 fd = get_unused_fd_flags(flags);
615 if (fd < 0)
616 return fd;
617
618 fd_install(fd, dmabuf->file);
619
620 return fd;
621}
622EXPORT_SYMBOL_GPL(dma_buf_fd);
623
624/**
625 * dma_buf_get - returns the dma_buf structure related to an fd
626 * @fd: [in] fd associated with the dma_buf to be returned
627 *
628 * On success, returns the dma_buf structure associated with an fd; uses
629 * file's refcounting done by fget to increase refcount. returns ERR_PTR
630 * otherwise.
631 */
632struct dma_buf *dma_buf_get(int fd)
633{
634 struct file *file;
635
636 file = fget(fd);
637
638 if (!file)
639 return ERR_PTR(-EBADF);
640
641 if (!is_dma_buf_file(file)) {
642 fput(file);
643 return ERR_PTR(-EINVAL);
644 }
645
646 return file->private_data;
647}
648EXPORT_SYMBOL_GPL(dma_buf_get);
649
650/**
651 * dma_buf_put - decreases refcount of the buffer
652 * @dmabuf: [in] buffer to reduce refcount of
653 *
654 * Uses file's refcounting done implicitly by fput().
655 *
656 * If, as a result of this call, the refcount becomes 0, the 'release' file
657 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
658 * in turn, and frees the memory allocated for dmabuf when exported.
659 */
660void dma_buf_put(struct dma_buf *dmabuf)
661{
662 if (WARN_ON(!dmabuf || !dmabuf->file))
663 return;
664
665 fput(dmabuf->file);
666}
667EXPORT_SYMBOL_GPL(dma_buf_put);
668
669/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200670 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671 * calls attach() of dma_buf_ops to allow device-specific attach functionality
Olivier Deprez157378f2022-04-04 15:47:50 +0200672 * @dmabuf: [in] buffer to attach device to.
673 * @dev: [in] device to be attached.
674 * @importer_ops: [in] importer operations for the attachment
675 * @importer_priv: [in] importer private pointer for the attachment
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 *
677 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
678 * must be cleaned up by calling dma_buf_detach().
679 *
680 * Returns:
681 *
682 * A pointer to newly created &dma_buf_attachment on success, or a negative
683 * error code wrapped into a pointer on failure.
684 *
685 * Note that this can fail if the backing storage of @dmabuf is in a place not
686 * accessible to @dev, and cannot be moved to a more suitable place. This is
687 * indicated with the error code -EBUSY.
688 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200689struct dma_buf_attachment *
690dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
691 const struct dma_buf_attach_ops *importer_ops,
692 void *importer_priv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000693{
694 struct dma_buf_attachment *attach;
695 int ret;
696
697 if (WARN_ON(!dmabuf || !dev))
698 return ERR_PTR(-EINVAL);
699
Olivier Deprez157378f2022-04-04 15:47:50 +0200700 if (WARN_ON(importer_ops && !importer_ops->move_notify))
701 return ERR_PTR(-EINVAL);
702
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000703 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
704 if (!attach)
705 return ERR_PTR(-ENOMEM);
706
707 attach->dev = dev;
708 attach->dmabuf = dmabuf;
Olivier Deprez157378f2022-04-04 15:47:50 +0200709 if (importer_ops)
710 attach->peer2peer = importer_ops->allow_peer2peer;
711 attach->importer_ops = importer_ops;
712 attach->importer_priv = importer_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713
714 if (dmabuf->ops->attach) {
715 ret = dmabuf->ops->attach(dmabuf, attach);
716 if (ret)
717 goto err_attach;
718 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200719 dma_resv_lock(dmabuf->resv, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 list_add(&attach->node, &dmabuf->attachments);
Olivier Deprez157378f2022-04-04 15:47:50 +0200721 dma_resv_unlock(dmabuf->resv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000722
Olivier Deprez157378f2022-04-04 15:47:50 +0200723 /* When either the importer or the exporter can't handle dynamic
724 * mappings we cache the mapping here to avoid issues with the
725 * reservation object lock.
726 */
727 if (dma_buf_attachment_is_dynamic(attach) !=
728 dma_buf_is_dynamic(dmabuf)) {
729 struct sg_table *sgt;
730
731 if (dma_buf_is_dynamic(attach->dmabuf)) {
732 dma_resv_lock(attach->dmabuf->resv, NULL);
733 ret = dma_buf_pin(attach);
734 if (ret)
735 goto err_unlock;
736 }
737
738 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
739 if (!sgt)
740 sgt = ERR_PTR(-ENOMEM);
741 if (IS_ERR(sgt)) {
742 ret = PTR_ERR(sgt);
743 goto err_unpin;
744 }
745 if (dma_buf_is_dynamic(attach->dmabuf))
746 dma_resv_unlock(attach->dmabuf->resv);
747 attach->sgt = sgt;
748 attach->dir = DMA_BIDIRECTIONAL;
749 }
David Brazdil0f672f62019-12-10 10:32:29 +0000750
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000751 return attach;
752
753err_attach:
754 kfree(attach);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000755 return ERR_PTR(ret);
Olivier Deprez157378f2022-04-04 15:47:50 +0200756
757err_unpin:
758 if (dma_buf_is_dynamic(attach->dmabuf))
759 dma_buf_unpin(attach);
760
761err_unlock:
762 if (dma_buf_is_dynamic(attach->dmabuf))
763 dma_resv_unlock(attach->dmabuf->resv);
764
765 dma_buf_detach(dmabuf, attach);
766 return ERR_PTR(ret);
767}
768EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
769
770/**
771 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
772 * @dmabuf: [in] buffer to attach device to.
773 * @dev: [in] device to be attached.
774 *
775 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
776 * mapping.
777 */
778struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
779 struct device *dev)
780{
781 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000782}
783EXPORT_SYMBOL_GPL(dma_buf_attach);
784
785/**
786 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
787 * optionally calls detach() of dma_buf_ops for device-specific detach
788 * @dmabuf: [in] buffer to detach from.
789 * @attach: [in] attachment to be detached; is free'd after this call.
790 *
791 * Clean up a device attachment obtained by calling dma_buf_attach().
792 */
793void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
794{
795 if (WARN_ON(!dmabuf || !attach))
796 return;
797
Olivier Deprez157378f2022-04-04 15:47:50 +0200798 if (attach->sgt) {
799 if (dma_buf_is_dynamic(attach->dmabuf))
800 dma_resv_lock(attach->dmabuf->resv, NULL);
801
David Brazdil0f672f62019-12-10 10:32:29 +0000802 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
803
Olivier Deprez157378f2022-04-04 15:47:50 +0200804 if (dma_buf_is_dynamic(attach->dmabuf)) {
805 dma_buf_unpin(attach);
806 dma_resv_unlock(attach->dmabuf->resv);
807 }
808 }
809
810 dma_resv_lock(dmabuf->resv, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 list_del(&attach->node);
Olivier Deprez157378f2022-04-04 15:47:50 +0200812 dma_resv_unlock(dmabuf->resv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000813 if (dmabuf->ops->detach)
814 dmabuf->ops->detach(dmabuf, attach);
815
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 kfree(attach);
817}
818EXPORT_SYMBOL_GPL(dma_buf_detach);
819
820/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200821 * dma_buf_pin - Lock down the DMA-buf
822 *
823 * @attach: [in] attachment which should be pinned
824 *
825 * Returns:
826 * 0 on success, negative error code on failure.
827 */
828int dma_buf_pin(struct dma_buf_attachment *attach)
829{
830 struct dma_buf *dmabuf = attach->dmabuf;
831 int ret = 0;
832
833 dma_resv_assert_held(dmabuf->resv);
834
835 if (dmabuf->ops->pin)
836 ret = dmabuf->ops->pin(attach);
837
838 return ret;
839}
840EXPORT_SYMBOL_GPL(dma_buf_pin);
841
842/**
843 * dma_buf_unpin - Remove lock from DMA-buf
844 *
845 * @attach: [in] attachment which should be unpinned
846 */
847void dma_buf_unpin(struct dma_buf_attachment *attach)
848{
849 struct dma_buf *dmabuf = attach->dmabuf;
850
851 dma_resv_assert_held(dmabuf->resv);
852
853 if (dmabuf->ops->unpin)
854 dmabuf->ops->unpin(attach);
855}
856EXPORT_SYMBOL_GPL(dma_buf_unpin);
857
858/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
860 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
861 * dma_buf_ops.
862 * @attach: [in] attachment whose scatterlist is to be returned
863 * @direction: [in] direction of DMA transfer
864 *
865 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
866 * on error. May return -EINTR if it is interrupted by a signal.
867 *
868 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
869 * the underlying backing storage is pinned for as long as a mapping exists,
870 * therefore users/importers should not hold onto a mapping for undue amounts of
871 * time.
872 */
873struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
874 enum dma_data_direction direction)
875{
876 struct sg_table *sg_table;
Olivier Deprez157378f2022-04-04 15:47:50 +0200877 int r;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878
879 might_sleep();
880
881 if (WARN_ON(!attach || !attach->dmabuf))
882 return ERR_PTR(-EINVAL);
883
Olivier Deprez157378f2022-04-04 15:47:50 +0200884 if (dma_buf_attachment_is_dynamic(attach))
885 dma_resv_assert_held(attach->dmabuf->resv);
886
David Brazdil0f672f62019-12-10 10:32:29 +0000887 if (attach->sgt) {
888 /*
889 * Two mappings with different directions for the same
890 * attachment are not allowed.
891 */
892 if (attach->dir != direction &&
893 attach->dir != DMA_BIDIRECTIONAL)
894 return ERR_PTR(-EBUSY);
895
896 return attach->sgt;
897 }
898
Olivier Deprez157378f2022-04-04 15:47:50 +0200899 if (dma_buf_is_dynamic(attach->dmabuf)) {
900 dma_resv_assert_held(attach->dmabuf->resv);
901 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
902 r = dma_buf_pin(attach);
903 if (r)
904 return ERR_PTR(r);
905 }
906 }
907
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000908 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
909 if (!sg_table)
910 sg_table = ERR_PTR(-ENOMEM);
911
Olivier Deprez157378f2022-04-04 15:47:50 +0200912 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
913 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
914 dma_buf_unpin(attach);
915
David Brazdil0f672f62019-12-10 10:32:29 +0000916 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
917 attach->sgt = sg_table;
918 attach->dir = direction;
919 }
920
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000921 return sg_table;
922}
923EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
924
925/**
926 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
927 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
928 * dma_buf_ops.
929 * @attach: [in] attachment to unmap buffer from
930 * @sg_table: [in] scatterlist info of the buffer to unmap
931 * @direction: [in] direction of DMA transfer
932 *
933 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
934 */
935void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
936 struct sg_table *sg_table,
937 enum dma_data_direction direction)
938{
939 might_sleep();
940
941 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
942 return;
943
Olivier Deprez157378f2022-04-04 15:47:50 +0200944 if (dma_buf_attachment_is_dynamic(attach))
945 dma_resv_assert_held(attach->dmabuf->resv);
946
David Brazdil0f672f62019-12-10 10:32:29 +0000947 if (attach->sgt == sg_table)
948 return;
949
Olivier Deprez157378f2022-04-04 15:47:50 +0200950 if (dma_buf_is_dynamic(attach->dmabuf))
951 dma_resv_assert_held(attach->dmabuf->resv);
952
David Brazdil0f672f62019-12-10 10:32:29 +0000953 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
Olivier Deprez157378f2022-04-04 15:47:50 +0200954
955 if (dma_buf_is_dynamic(attach->dmabuf) &&
956 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
957 dma_buf_unpin(attach);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000958}
959EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
960
961/**
Olivier Deprez157378f2022-04-04 15:47:50 +0200962 * dma_buf_move_notify - notify attachments that DMA-buf is moving
963 *
964 * @dmabuf: [in] buffer which is moving
965 *
966 * Informs all attachmenst that they need to destroy and recreated all their
967 * mappings.
968 */
969void dma_buf_move_notify(struct dma_buf *dmabuf)
970{
971 struct dma_buf_attachment *attach;
972
973 dma_resv_assert_held(dmabuf->resv);
974
975 list_for_each_entry(attach, &dmabuf->attachments, node)
976 if (attach->importer_ops)
977 attach->importer_ops->move_notify(attach);
978}
979EXPORT_SYMBOL_GPL(dma_buf_move_notify);
980
981/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000982 * DOC: cpu access
983 *
984 * There are mutliple reasons for supporting CPU access to a dma buffer object:
985 *
986 * - Fallback operations in the kernel, for example when a device is connected
987 * over USB and the kernel needs to shuffle the data around first before
988 * sending it away. Cache coherency is handled by braketing any transactions
989 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
990 * access.
991 *
Olivier Deprez157378f2022-04-04 15:47:50 +0200992 * Since for most kernel internal dma-buf accesses need the entire buffer, a
993 * vmap interface is introduced. Note that on very old 32-bit architectures
994 * vmalloc space might be limited and result in vmap calls failing.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000995 *
996 * Interfaces::
997 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
998 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
999 *
1000 * The vmap call can fail if there is no vmap support in the exporter, or if
1001 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
1002 * that the dma-buf layer keeps a reference count for all vmap access and
1003 * calls down into the exporter's vmap function only when no vmapping exists,
1004 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
1005 * provided by taking the dma_buf->lock mutex.
1006 *
1007 * - For full compatibility on the importer side with existing userspace
1008 * interfaces, which might already support mmap'ing buffers. This is needed in
1009 * many processing pipelines (e.g. feeding a software rendered image into a
1010 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1011 * framework already supported this and for DMA buffer file descriptors to
1012 * replace ION buffers mmap support was needed.
1013 *
1014 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1015 * fd. But like for CPU access there's a need to braket the actual access,
1016 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1017 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1018 * be restarted.
1019 *
1020 * Some systems might need some sort of cache coherency management e.g. when
1021 * CPU and GPU domains are being accessed through dma-buf at the same time.
1022 * To circumvent this problem there are begin/end coherency markers, that
1023 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1024 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1025 * sequence would be used like following:
1026 *
1027 * - mmap dma-buf fd
1028 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1029 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1030 * want (with the new data being consumed by say the GPU or the scanout
1031 * device)
1032 * - munmap once you don't need the buffer any more
1033 *
1034 * For correctness and optimal performance, it is always required to use
1035 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1036 * mapped address. Userspace cannot rely on coherent access, even when there
1037 * are systems where it just works without calling these ioctls.
1038 *
1039 * - And as a CPU fallback in userspace processing pipelines.
1040 *
1041 * Similar to the motivation for kernel cpu access it is again important that
1042 * the userspace code of a given importing subsystem can use the same
1043 * interfaces with a imported dma-buf buffer object as with a native buffer
1044 * object. This is especially important for drm where the userspace part of
1045 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1046 * use a different way to mmap a buffer rather invasive.
1047 *
1048 * The assumption in the current dma-buf interfaces is that redirecting the
1049 * initial mmap is all that's needed. A survey of some of the existing
1050 * subsystems shows that no driver seems to do any nefarious thing like
1051 * syncing up with outstanding asynchronous processing on the device or
1052 * allocating special resources at fault time. So hopefully this is good
1053 * enough, since adding interfaces to intercept pagefaults and allow pte
1054 * shootdowns would increase the complexity quite a bit.
1055 *
1056 * Interface::
1057 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1058 * unsigned long);
1059 *
1060 * If the importing subsystem simply provides a special-purpose mmap call to
1061 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
1062 * equally achieve that for a dma-buf object.
1063 */
1064
1065static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1066 enum dma_data_direction direction)
1067{
1068 bool write = (direction == DMA_BIDIRECTIONAL ||
1069 direction == DMA_TO_DEVICE);
David Brazdil0f672f62019-12-10 10:32:29 +00001070 struct dma_resv *resv = dmabuf->resv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001071 long ret;
1072
1073 /* Wait on any implicit rendering fences */
David Brazdil0f672f62019-12-10 10:32:29 +00001074 ret = dma_resv_wait_timeout_rcu(resv, write, true,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001075 MAX_SCHEDULE_TIMEOUT);
1076 if (ret < 0)
1077 return ret;
1078
1079 return 0;
1080}
1081
1082/**
1083 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1084 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1085 * preparations. Coherency is only guaranteed in the specified range for the
1086 * specified access direction.
1087 * @dmabuf: [in] buffer to prepare cpu access for.
1088 * @direction: [in] length of range for cpu access.
1089 *
1090 * After the cpu access is complete the caller should call
1091 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1092 * it guaranteed to be coherent with other DMA access.
1093 *
1094 * Can return negative error values, returns 0 on success.
1095 */
1096int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1097 enum dma_data_direction direction)
1098{
1099 int ret = 0;
1100
1101 if (WARN_ON(!dmabuf))
1102 return -EINVAL;
1103
1104 if (dmabuf->ops->begin_cpu_access)
1105 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1106
1107 /* Ensure that all fences are waited upon - but we first allow
1108 * the native handler the chance to do so more efficiently if it
1109 * chooses. A double invocation here will be reasonably cheap no-op.
1110 */
1111 if (ret == 0)
1112 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1113
1114 return ret;
1115}
1116EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1117
1118/**
1119 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1120 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1121 * actions. Coherency is only guaranteed in the specified range for the
1122 * specified access direction.
1123 * @dmabuf: [in] buffer to complete cpu access for.
1124 * @direction: [in] length of range for cpu access.
1125 *
1126 * This terminates CPU access started with dma_buf_begin_cpu_access().
1127 *
1128 * Can return negative error values, returns 0 on success.
1129 */
1130int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1131 enum dma_data_direction direction)
1132{
1133 int ret = 0;
1134
1135 WARN_ON(!dmabuf);
1136
1137 if (dmabuf->ops->end_cpu_access)
1138 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1139
1140 return ret;
1141}
1142EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1143
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001144
1145/**
1146 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1147 * @dmabuf: [in] buffer that should back the vma
1148 * @vma: [in] vma for the mmap
1149 * @pgoff: [in] offset in pages where this mmap should start within the
1150 * dma-buf buffer.
1151 *
1152 * This function adjusts the passed in vma so that it points at the file of the
1153 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1154 * checking on the size of the vma. Then it calls the exporters mmap function to
1155 * set up the mapping.
1156 *
1157 * Can return negative error values, returns 0 on success.
1158 */
1159int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1160 unsigned long pgoff)
1161{
1162 struct file *oldfile;
1163 int ret;
1164
1165 if (WARN_ON(!dmabuf || !vma))
1166 return -EINVAL;
1167
David Brazdil0f672f62019-12-10 10:32:29 +00001168 /* check if buffer supports mmap */
1169 if (!dmabuf->ops->mmap)
1170 return -EINVAL;
1171
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001172 /* check for offset overflow */
1173 if (pgoff + vma_pages(vma) < pgoff)
1174 return -EOVERFLOW;
1175
1176 /* check for overflowing the buffer's size */
1177 if (pgoff + vma_pages(vma) >
1178 dmabuf->size >> PAGE_SHIFT)
1179 return -EINVAL;
1180
1181 /* readjust the vma */
1182 get_file(dmabuf->file);
1183 oldfile = vma->vm_file;
1184 vma->vm_file = dmabuf->file;
1185 vma->vm_pgoff = pgoff;
1186
1187 ret = dmabuf->ops->mmap(dmabuf, vma);
1188 if (ret) {
1189 /* restore old parameters on failure */
1190 vma->vm_file = oldfile;
1191 fput(dmabuf->file);
1192 } else {
1193 if (oldfile)
1194 fput(oldfile);
1195 }
1196 return ret;
1197
1198}
1199EXPORT_SYMBOL_GPL(dma_buf_mmap);
1200
1201/**
1202 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1203 * address space. Same restrictions as for vmap and friends apply.
1204 * @dmabuf: [in] buffer to vmap
1205 *
1206 * This call may fail due to lack of virtual mapping address space.
1207 * These calls are optional in drivers. The intended use for them
1208 * is for mapping objects linear in kernel space for high use objects.
1209 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1210 *
1211 * Returns NULL on error.
1212 */
1213void *dma_buf_vmap(struct dma_buf *dmabuf)
1214{
1215 void *ptr;
1216
1217 if (WARN_ON(!dmabuf))
1218 return NULL;
1219
1220 if (!dmabuf->ops->vmap)
1221 return NULL;
1222
1223 mutex_lock(&dmabuf->lock);
1224 if (dmabuf->vmapping_counter) {
1225 dmabuf->vmapping_counter++;
1226 BUG_ON(!dmabuf->vmap_ptr);
1227 ptr = dmabuf->vmap_ptr;
1228 goto out_unlock;
1229 }
1230
1231 BUG_ON(dmabuf->vmap_ptr);
1232
1233 ptr = dmabuf->ops->vmap(dmabuf);
1234 if (WARN_ON_ONCE(IS_ERR(ptr)))
1235 ptr = NULL;
1236 if (!ptr)
1237 goto out_unlock;
1238
1239 dmabuf->vmap_ptr = ptr;
1240 dmabuf->vmapping_counter = 1;
1241
1242out_unlock:
1243 mutex_unlock(&dmabuf->lock);
1244 return ptr;
1245}
1246EXPORT_SYMBOL_GPL(dma_buf_vmap);
1247
1248/**
1249 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1250 * @dmabuf: [in] buffer to vunmap
1251 * @vaddr: [in] vmap to vunmap
1252 */
1253void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1254{
1255 if (WARN_ON(!dmabuf))
1256 return;
1257
1258 BUG_ON(!dmabuf->vmap_ptr);
1259 BUG_ON(dmabuf->vmapping_counter == 0);
1260 BUG_ON(dmabuf->vmap_ptr != vaddr);
1261
1262 mutex_lock(&dmabuf->lock);
1263 if (--dmabuf->vmapping_counter == 0) {
1264 if (dmabuf->ops->vunmap)
1265 dmabuf->ops->vunmap(dmabuf, vaddr);
1266 dmabuf->vmap_ptr = NULL;
1267 }
1268 mutex_unlock(&dmabuf->lock);
1269}
1270EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1271
1272#ifdef CONFIG_DEBUG_FS
1273static int dma_buf_debug_show(struct seq_file *s, void *unused)
1274{
1275 int ret;
1276 struct dma_buf *buf_obj;
1277 struct dma_buf_attachment *attach_obj;
David Brazdil0f672f62019-12-10 10:32:29 +00001278 struct dma_resv *robj;
1279 struct dma_resv_list *fobj;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001280 struct dma_fence *fence;
1281 unsigned seq;
1282 int count = 0, attach_count, shared_count, i;
1283 size_t size = 0;
1284
1285 ret = mutex_lock_interruptible(&db_list.lock);
1286
1287 if (ret)
1288 return ret;
1289
1290 seq_puts(s, "\nDma-buf Objects:\n");
David Brazdil0f672f62019-12-10 10:32:29 +00001291 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1292 "size", "flags", "mode", "count", "ino");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001293
1294 list_for_each_entry(buf_obj, &db_list.head, list_node) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001295
Olivier Deprez157378f2022-04-04 15:47:50 +02001296 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1297 if (ret)
1298 goto error_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001299
David Brazdil0f672f62019-12-10 10:32:29 +00001300 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001301 buf_obj->size,
1302 buf_obj->file->f_flags, buf_obj->file->f_mode,
1303 file_count(buf_obj->file),
David Brazdil0f672f62019-12-10 10:32:29 +00001304 buf_obj->exp_name,
1305 file_inode(buf_obj->file)->i_ino,
1306 buf_obj->name ?: "");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001307
1308 robj = buf_obj->resv;
1309 while (true) {
1310 seq = read_seqcount_begin(&robj->seq);
1311 rcu_read_lock();
1312 fobj = rcu_dereference(robj->fence);
1313 shared_count = fobj ? fobj->shared_count : 0;
1314 fence = rcu_dereference(robj->fence_excl);
1315 if (!read_seqcount_retry(&robj->seq, seq))
1316 break;
1317 rcu_read_unlock();
1318 }
1319
1320 if (fence)
1321 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1322 fence->ops->get_driver_name(fence),
1323 fence->ops->get_timeline_name(fence),
1324 dma_fence_is_signaled(fence) ? "" : "un");
1325 for (i = 0; i < shared_count; i++) {
1326 fence = rcu_dereference(fobj->shared[i]);
1327 if (!dma_fence_get_rcu(fence))
1328 continue;
1329 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1330 fence->ops->get_driver_name(fence),
1331 fence->ops->get_timeline_name(fence),
1332 dma_fence_is_signaled(fence) ? "" : "un");
David Brazdil0f672f62019-12-10 10:32:29 +00001333 dma_fence_put(fence);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001334 }
1335 rcu_read_unlock();
1336
1337 seq_puts(s, "\tAttached Devices:\n");
1338 attach_count = 0;
1339
1340 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1341 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1342 attach_count++;
1343 }
Olivier Deprez157378f2022-04-04 15:47:50 +02001344 dma_resv_unlock(buf_obj->resv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001345
1346 seq_printf(s, "Total %d devices attached\n\n",
1347 attach_count);
1348
1349 count++;
1350 size += buf_obj->size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001351 }
1352
1353 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1354
1355 mutex_unlock(&db_list.lock);
1356 return 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001357
1358error_unlock:
1359 mutex_unlock(&db_list.lock);
1360 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001361}
1362
David Brazdil0f672f62019-12-10 10:32:29 +00001363DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001364
1365static struct dentry *dma_buf_debugfs_dir;
1366
1367static int dma_buf_init_debugfs(void)
1368{
1369 struct dentry *d;
1370 int err = 0;
1371
1372 d = debugfs_create_dir("dma_buf", NULL);
1373 if (IS_ERR(d))
1374 return PTR_ERR(d);
1375
1376 dma_buf_debugfs_dir = d;
1377
1378 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1379 NULL, &dma_buf_debug_fops);
1380 if (IS_ERR(d)) {
1381 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1382 debugfs_remove_recursive(dma_buf_debugfs_dir);
1383 dma_buf_debugfs_dir = NULL;
1384 err = PTR_ERR(d);
1385 }
1386
1387 return err;
1388}
1389
1390static void dma_buf_uninit_debugfs(void)
1391{
1392 debugfs_remove_recursive(dma_buf_debugfs_dir);
1393}
1394#else
1395static inline int dma_buf_init_debugfs(void)
1396{
1397 return 0;
1398}
1399static inline void dma_buf_uninit_debugfs(void)
1400{
1401}
1402#endif
1403
1404static int __init dma_buf_init(void)
1405{
David Brazdil0f672f62019-12-10 10:32:29 +00001406 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1407 if (IS_ERR(dma_buf_mnt))
1408 return PTR_ERR(dma_buf_mnt);
1409
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001410 mutex_init(&db_list.lock);
1411 INIT_LIST_HEAD(&db_list.head);
1412 dma_buf_init_debugfs();
1413 return 0;
1414}
1415subsys_initcall(dma_buf_init);
1416
1417static void __exit dma_buf_deinit(void)
1418{
1419 dma_buf_uninit_debugfs();
David Brazdil0f672f62019-12-10 10:32:29 +00001420 kern_unmount(dma_buf_mnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001421}
1422__exitcall(dma_buf_deinit);