blob: 8fdb271354061c667715affcd4704c4c64aaaf71 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/*
2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * IN THE SOFTWARE.
24 *
25 * Authors:
26 *
27 */
28
29/**
30 * DOC: Overview
31 *
David Brazdil0f672f62019-12-10 10:32:29 +000032 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) provide a
33 * container for a synchronization primitive which can be used by userspace
34 * to explicitly synchronize GPU commands, can be shared between userspace
35 * processes, and can be shared between different DRM drivers.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036 * Their primary use-case is to implement Vulkan fences and semaphores.
David Brazdil0f672f62019-12-10 10:32:29 +000037 * The syncobj userspace API provides ioctls for several operations:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000038 *
David Brazdil0f672f62019-12-10 10:32:29 +000039 * - Creation and destruction of syncobjs
40 * - Import and export of syncobjs to/from a syncobj file descriptor
41 * - Import and export a syncobj's underlying fence to/from a sync file
42 * - Reset a syncobj (set its fence to NULL)
43 * - Signal a syncobj (set a trivially signaled fence)
44 * - Wait for a syncobj's fence to appear and be signaled
45 *
46 * At it's core, a syncobj is simply a wrapper around a pointer to a struct
47 * &dma_fence which may be NULL.
48 * When a syncobj is first created, its pointer is either NULL or a pointer
49 * to an already signaled fence depending on whether the
50 * &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
51 * &DRM_IOCTL_SYNCOBJ_CREATE.
52 * When GPU work which signals a syncobj is enqueued in a DRM driver,
53 * the syncobj fence is replaced with a fence which will be signaled by the
54 * completion of that work.
55 * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
56 * driver retrieves syncobj's current fence at the time the work is enqueued
57 * waits on that fence before submitting the work to hardware.
58 * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
59 * All manipulation of the syncobjs's fence happens in terms of the current
60 * fence at the time the ioctl is called by userspace regardless of whether
61 * that operation is an immediate host-side operation (signal or reset) or
62 * or an operation which is enqueued in some driver queue.
63 * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
64 * manipulate a syncobj from the host by resetting its pointer to NULL or
65 * setting its pointer to a fence which is already signaled.
66 *
67 *
68 * Host-side wait on syncobjs
69 * --------------------------
70 *
71 * &DRM_IOCTL_SYNCOBJ_WAIT takes an array of syncobj handles and does a
72 * host-side wait on all of the syncobj fences simultaneously.
73 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL is set, the wait ioctl will wait on
74 * all of the syncobj fences to be signaled before it returns.
75 * Otherwise, it returns once at least one syncobj fence has been signaled
76 * and the index of a signaled fence is written back to the client.
77 *
78 * Unlike the enqueued GPU work dependencies which fail if they see a NULL
79 * fence in a syncobj, if &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is set,
80 * the host-side wait will first wait for the syncobj to receive a non-NULL
81 * fence and then wait on that fence.
82 * If &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT is not set and any one of the
83 * syncobjs in the array has a NULL fence, -EINVAL will be returned.
84 * Assuming the syncobj starts off with a NULL fence, this allows a client
85 * to do a host wait in one thread (or process) which waits on GPU work
86 * submitted in another thread (or process) without having to manually
87 * synchronize between the two.
88 * This requirement is inherited from the Vulkan fence API.
89 *
90 *
91 * Import/export of syncobjs
92 * -------------------------
93 *
94 * &DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE and &DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
95 * provide two mechanisms for import/export of syncobjs.
96 *
97 * The first lets the client import or export an entire syncobj to a file
98 * descriptor.
99 * These fd's are opaque and have no other use case, except passing the
100 * syncobj between processes.
101 * All exported file descriptors and any syncobj handles created as a
102 * result of importing those file descriptors own a reference to the
103 * same underlying struct &drm_syncobj and the syncobj can be used
104 * persistently across all the processes with which it is shared.
105 * The syncobj is freed only once the last reference is dropped.
106 * Unlike dma-buf, importing a syncobj creates a new handle (with its own
107 * reference) for every import instead of de-duplicating.
108 * The primary use-case of this persistent import/export is for shared
109 * Vulkan fences and semaphores.
110 *
111 * The second import/export mechanism, which is indicated by
112 * &DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE or
113 * &DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE lets the client
114 * import/export the syncobj's current fence from/to a &sync_file.
115 * When a syncobj is exported to a sync file, that sync file wraps the
116 * sycnobj's fence at the time of export and any later signal or reset
117 * operations on the syncobj will not affect the exported sync file.
118 * When a sync file is imported into a syncobj, the syncobj's fence is set
119 * to the fence wrapped by that sync file.
120 * Because sync files are immutable, resetting or signaling the syncobj
121 * will not affect any sync files whose fences have been imported into the
122 * syncobj.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123 */
124
David Brazdil0f672f62019-12-10 10:32:29 +0000125#include <linux/anon_inodes.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126#include <linux/file.h>
127#include <linux/fs.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000128#include <linux/sched/signal.h>
David Brazdil0f672f62019-12-10 10:32:29 +0000129#include <linux/sync_file.h>
130#include <linux/uaccess.h>
131
132#include <drm/drm.h>
133#include <drm/drm_drv.h>
134#include <drm/drm_file.h>
135#include <drm/drm_gem.h>
136#include <drm/drm_print.h>
137#include <drm/drm_syncobj.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138
139#include "drm_internal.h"
David Brazdil0f672f62019-12-10 10:32:29 +0000140
141struct syncobj_wait_entry {
142 struct list_head node;
143 struct task_struct *task;
144 struct dma_fence *fence;
145 struct dma_fence_cb fence_cb;
146 u64 point;
147};
148
149static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
150 struct syncobj_wait_entry *wait);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151
152/**
153 * drm_syncobj_find - lookup and reference a sync object.
154 * @file_private: drm file private pointer
155 * @handle: sync object handle to lookup.
156 *
157 * Returns a reference to the syncobj pointed to by handle or NULL. The
158 * reference must be released by calling drm_syncobj_put().
159 */
160struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
161 u32 handle)
162{
163 struct drm_syncobj *syncobj;
164
165 spin_lock(&file_private->syncobj_table_lock);
166
167 /* Check if we currently have a reference on the object */
168 syncobj = idr_find(&file_private->syncobj_idr, handle);
169 if (syncobj)
170 drm_syncobj_get(syncobj);
171
172 spin_unlock(&file_private->syncobj_table_lock);
173
174 return syncobj;
175}
176EXPORT_SYMBOL(drm_syncobj_find);
177
David Brazdil0f672f62019-12-10 10:32:29 +0000178static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
179 struct syncobj_wait_entry *wait)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180{
David Brazdil0f672f62019-12-10 10:32:29 +0000181 struct dma_fence *fence;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
David Brazdil0f672f62019-12-10 10:32:29 +0000183 if (wait->fence)
184 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185
186 spin_lock(&syncobj->lock);
187 /* We've already tried once to get a fence and failed. Now that we
188 * have the lock, try one more time just to be sure we don't add a
189 * callback when a fence has already been set.
190 */
David Brazdil0f672f62019-12-10 10:32:29 +0000191 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
192 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
193 dma_fence_put(fence);
194 list_add_tail(&wait->node, &syncobj->cb_list);
195 } else if (!fence) {
196 wait->fence = dma_fence_get_stub();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000197 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000198 wait->fence = fence;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199 }
200 spin_unlock(&syncobj->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000201}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202
David Brazdil0f672f62019-12-10 10:32:29 +0000203static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
204 struct syncobj_wait_entry *wait)
205{
206 if (!wait->node.next)
207 return;
208
209 spin_lock(&syncobj->lock);
210 list_del_init(&wait->node);
211 spin_unlock(&syncobj->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212}
213
214/**
David Brazdil0f672f62019-12-10 10:32:29 +0000215 * drm_syncobj_add_point - add new timeline point to the syncobj
216 * @syncobj: sync object to add timeline point do
217 * @chain: chain node to use to add the point
218 * @fence: fence to encapsulate in the chain node
219 * @point: sequence number to use for the point
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 *
David Brazdil0f672f62019-12-10 10:32:29 +0000221 * Add the chain node as new timeline point to the syncobj.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000222 */
David Brazdil0f672f62019-12-10 10:32:29 +0000223void drm_syncobj_add_point(struct drm_syncobj *syncobj,
224 struct dma_fence_chain *chain,
225 struct dma_fence *fence,
226 uint64_t point)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000227{
David Brazdil0f672f62019-12-10 10:32:29 +0000228 struct syncobj_wait_entry *cur, *tmp;
229 struct dma_fence *prev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
David Brazdil0f672f62019-12-10 10:32:29 +0000231 dma_fence_get(fence);
232
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233 spin_lock(&syncobj->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000234
235 prev = drm_syncobj_fence_get(syncobj);
236 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
237 if (prev && prev->seqno >= point)
238 DRM_ERROR("You are adding an unorder point to timeline!\n");
239 dma_fence_chain_init(chain, prev, fence, point);
240 rcu_assign_pointer(syncobj->fence, &chain->base);
241
242 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
243 syncobj_wait_syncobj_func(syncobj, cur);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244 spin_unlock(&syncobj->lock);
David Brazdil0f672f62019-12-10 10:32:29 +0000245
246 /* Walk the chain once to trigger garbage collection */
247 dma_fence_chain_for_each(fence, prev);
248 dma_fence_put(prev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000249}
David Brazdil0f672f62019-12-10 10:32:29 +0000250EXPORT_SYMBOL(drm_syncobj_add_point);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251
252/**
253 * drm_syncobj_replace_fence - replace fence in a sync object.
254 * @syncobj: Sync object to replace fence in
255 * @fence: fence to install in sync file.
256 *
257 * This replaces the fence on a sync object.
258 */
259void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
260 struct dma_fence *fence)
261{
262 struct dma_fence *old_fence;
David Brazdil0f672f62019-12-10 10:32:29 +0000263 struct syncobj_wait_entry *cur, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264
265 if (fence)
266 dma_fence_get(fence);
267
268 spin_lock(&syncobj->lock);
269
270 old_fence = rcu_dereference_protected(syncobj->fence,
271 lockdep_is_held(&syncobj->lock));
272 rcu_assign_pointer(syncobj->fence, fence);
273
274 if (fence != old_fence) {
David Brazdil0f672f62019-12-10 10:32:29 +0000275 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
276 syncobj_wait_syncobj_func(syncobj, cur);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000277 }
278
279 spin_unlock(&syncobj->lock);
280
281 dma_fence_put(old_fence);
282}
283EXPORT_SYMBOL(drm_syncobj_replace_fence);
284
David Brazdil0f672f62019-12-10 10:32:29 +0000285/**
286 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
287 * @syncobj: sync object to assign the fence on
288 *
289 * Assign a already signaled stub fence to the sync object.
290 */
291static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000292{
David Brazdil0f672f62019-12-10 10:32:29 +0000293 struct dma_fence *fence = dma_fence_get_stub();
294
295 drm_syncobj_replace_fence(syncobj, fence);
296 dma_fence_put(fence);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297}
298
David Brazdil0f672f62019-12-10 10:32:29 +0000299/* 5s default for wait submission */
300#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301/**
302 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
303 * @file_private: drm file private pointer
304 * @handle: sync object handle to lookup.
David Brazdil0f672f62019-12-10 10:32:29 +0000305 * @point: timeline point
306 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307 * @fence: out parameter for the fence
308 *
309 * This is just a convenience function that combines drm_syncobj_find() and
310 * drm_syncobj_fence_get().
311 *
312 * Returns 0 on success or a negative error value on failure. On success @fence
313 * contains a reference to the fence, which must be released by calling
314 * dma_fence_put().
315 */
316int drm_syncobj_find_fence(struct drm_file *file_private,
David Brazdil0f672f62019-12-10 10:32:29 +0000317 u32 handle, u64 point, u64 flags,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000318 struct dma_fence **fence)
319{
320 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
David Brazdil0f672f62019-12-10 10:32:29 +0000321 struct syncobj_wait_entry wait;
322 u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
323 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000324
325 if (!syncobj)
326 return -ENOENT;
327
328 *fence = drm_syncobj_fence_get(syncobj);
David Brazdil0f672f62019-12-10 10:32:29 +0000329
330 if (*fence) {
331 ret = dma_fence_chain_find_seqno(fence, point);
332 if (!ret)
Olivier Deprez0e641232021-09-23 10:07:05 +0200333 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +0000334 dma_fence_put(*fence);
335 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000336 ret = -EINVAL;
337 }
David Brazdil0f672f62019-12-10 10:32:29 +0000338
339 if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
Olivier Deprez0e641232021-09-23 10:07:05 +0200340 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +0000341
342 memset(&wait, 0, sizeof(wait));
343 wait.task = current;
344 wait.point = point;
345 drm_syncobj_fence_add_wait(syncobj, &wait);
346
347 do {
348 set_current_state(TASK_INTERRUPTIBLE);
349 if (wait.fence) {
350 ret = 0;
351 break;
352 }
353 if (timeout == 0) {
354 ret = -ETIME;
355 break;
356 }
357
358 if (signal_pending(current)) {
359 ret = -ERESTARTSYS;
360 break;
361 }
362
363 timeout = schedule_timeout(timeout);
364 } while (1);
365
366 __set_current_state(TASK_RUNNING);
367 *fence = wait.fence;
368
369 if (wait.node.next)
370 drm_syncobj_remove_wait(syncobj, &wait);
371
Olivier Deprez0e641232021-09-23 10:07:05 +0200372out:
373 drm_syncobj_put(syncobj);
374
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 return ret;
376}
377EXPORT_SYMBOL(drm_syncobj_find_fence);
378
379/**
380 * drm_syncobj_free - free a sync object.
381 * @kref: kref to free.
382 *
383 * Only to be called from kref_put in drm_syncobj_put.
384 */
385void drm_syncobj_free(struct kref *kref)
386{
387 struct drm_syncobj *syncobj = container_of(kref,
388 struct drm_syncobj,
389 refcount);
390 drm_syncobj_replace_fence(syncobj, NULL);
391 kfree(syncobj);
392}
393EXPORT_SYMBOL(drm_syncobj_free);
394
395/**
396 * drm_syncobj_create - create a new syncobj
397 * @out_syncobj: returned syncobj
398 * @flags: DRM_SYNCOBJ_* flags
399 * @fence: if non-NULL, the syncobj will represent this fence
400 *
401 * This is the first function to create a sync object. After creating, drivers
402 * probably want to make it available to userspace, either through
403 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
404 *
405 * Returns 0 on success or a negative error value on failure.
406 */
407int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
408 struct dma_fence *fence)
409{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000410 struct drm_syncobj *syncobj;
411
412 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
413 if (!syncobj)
414 return -ENOMEM;
415
416 kref_init(&syncobj->refcount);
417 INIT_LIST_HEAD(&syncobj->cb_list);
418 spin_lock_init(&syncobj->lock);
419
David Brazdil0f672f62019-12-10 10:32:29 +0000420 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
421 drm_syncobj_assign_null_handle(syncobj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422
423 if (fence)
424 drm_syncobj_replace_fence(syncobj, fence);
425
426 *out_syncobj = syncobj;
427 return 0;
428}
429EXPORT_SYMBOL(drm_syncobj_create);
430
431/**
432 * drm_syncobj_get_handle - get a handle from a syncobj
433 * @file_private: drm file private pointer
434 * @syncobj: Sync object to export
435 * @handle: out parameter with the new handle
436 *
437 * Exports a sync object created with drm_syncobj_create() as a handle on
438 * @file_private to userspace.
439 *
440 * Returns 0 on success or a negative error value on failure.
441 */
442int drm_syncobj_get_handle(struct drm_file *file_private,
443 struct drm_syncobj *syncobj, u32 *handle)
444{
445 int ret;
446
447 /* take a reference to put in the idr */
448 drm_syncobj_get(syncobj);
449
450 idr_preload(GFP_KERNEL);
451 spin_lock(&file_private->syncobj_table_lock);
452 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
453 spin_unlock(&file_private->syncobj_table_lock);
454
455 idr_preload_end();
456
457 if (ret < 0) {
458 drm_syncobj_put(syncobj);
459 return ret;
460 }
461
462 *handle = ret;
463 return 0;
464}
465EXPORT_SYMBOL(drm_syncobj_get_handle);
466
467static int drm_syncobj_create_as_handle(struct drm_file *file_private,
468 u32 *handle, uint32_t flags)
469{
470 int ret;
471 struct drm_syncobj *syncobj;
472
473 ret = drm_syncobj_create(&syncobj, flags, NULL);
474 if (ret)
475 return ret;
476
477 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
478 drm_syncobj_put(syncobj);
479 return ret;
480}
481
482static int drm_syncobj_destroy(struct drm_file *file_private,
483 u32 handle)
484{
485 struct drm_syncobj *syncobj;
486
487 spin_lock(&file_private->syncobj_table_lock);
488 syncobj = idr_remove(&file_private->syncobj_idr, handle);
489 spin_unlock(&file_private->syncobj_table_lock);
490
491 if (!syncobj)
492 return -EINVAL;
493
494 drm_syncobj_put(syncobj);
495 return 0;
496}
497
498static int drm_syncobj_file_release(struct inode *inode, struct file *file)
499{
500 struct drm_syncobj *syncobj = file->private_data;
501
502 drm_syncobj_put(syncobj);
503 return 0;
504}
505
506static const struct file_operations drm_syncobj_file_fops = {
507 .release = drm_syncobj_file_release,
508};
509
510/**
511 * drm_syncobj_get_fd - get a file descriptor from a syncobj
512 * @syncobj: Sync object to export
513 * @p_fd: out parameter with the new file descriptor
514 *
515 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
516 *
517 * Returns 0 on success or a negative error value on failure.
518 */
519int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
520{
521 struct file *file;
522 int fd;
523
524 fd = get_unused_fd_flags(O_CLOEXEC);
525 if (fd < 0)
526 return fd;
527
528 file = anon_inode_getfile("syncobj_file",
529 &drm_syncobj_file_fops,
530 syncobj, 0);
531 if (IS_ERR(file)) {
532 put_unused_fd(fd);
533 return PTR_ERR(file);
534 }
535
536 drm_syncobj_get(syncobj);
537 fd_install(fd, file);
538
539 *p_fd = fd;
540 return 0;
541}
542EXPORT_SYMBOL(drm_syncobj_get_fd);
543
544static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
545 u32 handle, int *p_fd)
546{
547 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
548 int ret;
549
550 if (!syncobj)
551 return -EINVAL;
552
553 ret = drm_syncobj_get_fd(syncobj, p_fd);
554 drm_syncobj_put(syncobj);
555 return ret;
556}
557
558static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
559 int fd, u32 *handle)
560{
561 struct drm_syncobj *syncobj;
David Brazdil0f672f62019-12-10 10:32:29 +0000562 struct fd f = fdget(fd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 int ret;
564
David Brazdil0f672f62019-12-10 10:32:29 +0000565 if (!f.file)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566 return -EINVAL;
567
David Brazdil0f672f62019-12-10 10:32:29 +0000568 if (f.file->f_op != &drm_syncobj_file_fops) {
569 fdput(f);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000570 return -EINVAL;
571 }
572
573 /* take a reference to put in the idr */
David Brazdil0f672f62019-12-10 10:32:29 +0000574 syncobj = f.file->private_data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575 drm_syncobj_get(syncobj);
576
577 idr_preload(GFP_KERNEL);
578 spin_lock(&file_private->syncobj_table_lock);
579 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
580 spin_unlock(&file_private->syncobj_table_lock);
581 idr_preload_end();
582
583 if (ret > 0) {
584 *handle = ret;
585 ret = 0;
586 } else
587 drm_syncobj_put(syncobj);
588
David Brazdil0f672f62019-12-10 10:32:29 +0000589 fdput(f);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000590 return ret;
591}
592
593static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
594 int fd, int handle)
595{
596 struct dma_fence *fence = sync_file_get_fence(fd);
597 struct drm_syncobj *syncobj;
598
599 if (!fence)
600 return -EINVAL;
601
602 syncobj = drm_syncobj_find(file_private, handle);
603 if (!syncobj) {
604 dma_fence_put(fence);
605 return -ENOENT;
606 }
607
608 drm_syncobj_replace_fence(syncobj, fence);
609 dma_fence_put(fence);
610 drm_syncobj_put(syncobj);
611 return 0;
612}
613
614static int drm_syncobj_export_sync_file(struct drm_file *file_private,
615 int handle, int *p_fd)
616{
617 int ret;
618 struct dma_fence *fence;
619 struct sync_file *sync_file;
620 int fd = get_unused_fd_flags(O_CLOEXEC);
621
622 if (fd < 0)
623 return fd;
624
David Brazdil0f672f62019-12-10 10:32:29 +0000625 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 if (ret)
627 goto err_put_fd;
628
629 sync_file = sync_file_create(fence);
630
631 dma_fence_put(fence);
632
633 if (!sync_file) {
634 ret = -EINVAL;
635 goto err_put_fd;
636 }
637
638 fd_install(fd, sync_file->file);
639
640 *p_fd = fd;
641 return 0;
642err_put_fd:
643 put_unused_fd(fd);
644 return ret;
645}
646/**
647 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
648 * @file_private: drm file-private structure to set up
649 *
650 * Called at device open time, sets up the structure for handling refcounting
651 * of sync objects.
652 */
653void
654drm_syncobj_open(struct drm_file *file_private)
655{
656 idr_init_base(&file_private->syncobj_idr, 1);
657 spin_lock_init(&file_private->syncobj_table_lock);
658}
659
660static int
661drm_syncobj_release_handle(int id, void *ptr, void *data)
662{
663 struct drm_syncobj *syncobj = ptr;
664
665 drm_syncobj_put(syncobj);
666 return 0;
667}
668
669/**
670 * drm_syncobj_release - release file-private sync object resources
671 * @file_private: drm file-private structure to clean up
672 *
673 * Called at close time when the filp is going away.
674 *
675 * Releases any remaining references on objects by this filp.
676 */
677void
678drm_syncobj_release(struct drm_file *file_private)
679{
680 idr_for_each(&file_private->syncobj_idr,
681 &drm_syncobj_release_handle, file_private);
682 idr_destroy(&file_private->syncobj_idr);
683}
684
685int
686drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
687 struct drm_file *file_private)
688{
689 struct drm_syncobj_create *args = data;
690
691 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +0000692 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000693
694 /* no valid flags yet */
695 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
696 return -EINVAL;
697
698 return drm_syncobj_create_as_handle(file_private,
699 &args->handle, args->flags);
700}
701
702int
703drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
704 struct drm_file *file_private)
705{
706 struct drm_syncobj_destroy *args = data;
707
708 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +0000709 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710
711 /* make sure padding is empty */
712 if (args->pad)
713 return -EINVAL;
714 return drm_syncobj_destroy(file_private, args->handle);
715}
716
717int
718drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
719 struct drm_file *file_private)
720{
721 struct drm_syncobj_handle *args = data;
722
723 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +0000724 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725
726 if (args->pad)
727 return -EINVAL;
728
729 if (args->flags != 0 &&
730 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
731 return -EINVAL;
732
733 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
734 return drm_syncobj_export_sync_file(file_private, args->handle,
735 &args->fd);
736
737 return drm_syncobj_handle_to_fd(file_private, args->handle,
738 &args->fd);
739}
740
741int
742drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
743 struct drm_file *file_private)
744{
745 struct drm_syncobj_handle *args = data;
746
747 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +0000748 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749
750 if (args->pad)
751 return -EINVAL;
752
753 if (args->flags != 0 &&
754 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
755 return -EINVAL;
756
757 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
758 return drm_syncobj_import_sync_file_fence(file_private,
759 args->fd,
760 args->handle);
761
762 return drm_syncobj_fd_to_handle(file_private, args->fd,
763 &args->handle);
764}
765
David Brazdil0f672f62019-12-10 10:32:29 +0000766static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
767 struct drm_syncobj_transfer *args)
768{
769 struct drm_syncobj *timeline_syncobj = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770 struct dma_fence *fence;
David Brazdil0f672f62019-12-10 10:32:29 +0000771 struct dma_fence_chain *chain;
772 int ret;
773
774 timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
775 if (!timeline_syncobj) {
776 return -ENOENT;
777 }
778 ret = drm_syncobj_find_fence(file_private, args->src_handle,
779 args->src_point, args->flags,
780 &fence);
781 if (ret)
782 goto err;
783 chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
784 if (!chain) {
785 ret = -ENOMEM;
786 goto err1;
787 }
788 drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
789err1:
790 dma_fence_put(fence);
791err:
792 drm_syncobj_put(timeline_syncobj);
793
794 return ret;
795}
796
797static int
798drm_syncobj_transfer_to_binary(struct drm_file *file_private,
799 struct drm_syncobj_transfer *args)
800{
801 struct drm_syncobj *binary_syncobj = NULL;
802 struct dma_fence *fence;
803 int ret;
804
805 binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
806 if (!binary_syncobj)
807 return -ENOENT;
808 ret = drm_syncobj_find_fence(file_private, args->src_handle,
809 args->src_point, args->flags, &fence);
810 if (ret)
811 goto err;
812 drm_syncobj_replace_fence(binary_syncobj, fence);
813 dma_fence_put(fence);
814err:
815 drm_syncobj_put(binary_syncobj);
816
817 return ret;
818}
819int
820drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
821 struct drm_file *file_private)
822{
823 struct drm_syncobj_transfer *args = data;
824 int ret;
825
826 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
827 return -EOPNOTSUPP;
828
829 if (args->pad)
830 return -EINVAL;
831
832 if (args->dst_point)
833 ret = drm_syncobj_transfer_to_timeline(file_private, args);
834 else
835 ret = drm_syncobj_transfer_to_binary(file_private, args);
836
837 return ret;
838}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000839
840static void syncobj_wait_fence_func(struct dma_fence *fence,
841 struct dma_fence_cb *cb)
842{
843 struct syncobj_wait_entry *wait =
844 container_of(cb, struct syncobj_wait_entry, fence_cb);
845
846 wake_up_process(wait->task);
847}
848
849static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
David Brazdil0f672f62019-12-10 10:32:29 +0000850 struct syncobj_wait_entry *wait)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000851{
David Brazdil0f672f62019-12-10 10:32:29 +0000852 struct dma_fence *fence;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000853
854 /* This happens inside the syncobj lock */
David Brazdil0f672f62019-12-10 10:32:29 +0000855 fence = rcu_dereference_protected(syncobj->fence,
856 lockdep_is_held(&syncobj->lock));
857 dma_fence_get(fence);
858 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
859 dma_fence_put(fence);
860 return;
861 } else if (!fence) {
862 wait->fence = dma_fence_get_stub();
863 } else {
864 wait->fence = fence;
865 }
866
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000867 wake_up_process(wait->task);
David Brazdil0f672f62019-12-10 10:32:29 +0000868 list_del_init(&wait->node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000869}
870
871static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
David Brazdil0f672f62019-12-10 10:32:29 +0000872 void __user *user_points,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000873 uint32_t count,
874 uint32_t flags,
875 signed long timeout,
876 uint32_t *idx)
877{
878 struct syncobj_wait_entry *entries;
879 struct dma_fence *fence;
David Brazdil0f672f62019-12-10 10:32:29 +0000880 uint64_t *points;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000881 uint32_t signaled_count, i;
882
David Brazdil0f672f62019-12-10 10:32:29 +0000883 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
884 if (points == NULL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000885 return -ENOMEM;
886
David Brazdil0f672f62019-12-10 10:32:29 +0000887 if (!user_points) {
888 memset(points, 0, count * sizeof(uint64_t));
889
890 } else if (copy_from_user(points, user_points,
891 sizeof(uint64_t) * count)) {
892 timeout = -EFAULT;
893 goto err_free_points;
894 }
895
896 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
897 if (!entries) {
898 timeout = -ENOMEM;
899 goto err_free_points;
900 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000901 /* Walk the list of sync objects and initialize entries. We do
902 * this up-front so that we can properly return -EINVAL if there is
903 * a syncobj with a missing fence and then never have the chance of
904 * returning -EINVAL again.
905 */
906 signaled_count = 0;
907 for (i = 0; i < count; ++i) {
David Brazdil0f672f62019-12-10 10:32:29 +0000908 struct dma_fence *fence;
909
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000910 entries[i].task = current;
David Brazdil0f672f62019-12-10 10:32:29 +0000911 entries[i].point = points[i];
912 fence = drm_syncobj_fence_get(syncobjs[i]);
913 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
914 dma_fence_put(fence);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000915 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
916 continue;
917 } else {
David Brazdil0f672f62019-12-10 10:32:29 +0000918 timeout = -EINVAL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000919 goto cleanup_entries;
920 }
921 }
922
David Brazdil0f672f62019-12-10 10:32:29 +0000923 if (fence)
924 entries[i].fence = fence;
925 else
926 entries[i].fence = dma_fence_get_stub();
927
928 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
929 dma_fence_is_signaled(entries[i].fence)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000930 if (signaled_count == 0 && idx)
931 *idx = i;
932 signaled_count++;
933 }
934 }
935
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000936 if (signaled_count == count ||
937 (signaled_count > 0 &&
938 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
939 goto cleanup_entries;
940
941 /* There's a very annoying laxness in the dma_fence API here, in
942 * that backends are not required to automatically report when a
943 * fence is signaled prior to fence->ops->enable_signaling() being
944 * called. So here if we fail to match signaled_count, we need to
945 * fallthough and try a 0 timeout wait!
946 */
947
948 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
David Brazdil0f672f62019-12-10 10:32:29 +0000949 for (i = 0; i < count; ++i)
950 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000951 }
952
953 do {
954 set_current_state(TASK_INTERRUPTIBLE);
955
956 signaled_count = 0;
957 for (i = 0; i < count; ++i) {
958 fence = entries[i].fence;
959 if (!fence)
960 continue;
961
David Brazdil0f672f62019-12-10 10:32:29 +0000962 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
963 dma_fence_is_signaled(fence) ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000964 (!entries[i].fence_cb.func &&
965 dma_fence_add_callback(fence,
966 &entries[i].fence_cb,
967 syncobj_wait_fence_func))) {
968 /* The fence has been signaled */
969 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
970 signaled_count++;
971 } else {
972 if (idx)
973 *idx = i;
974 goto done_waiting;
975 }
976 }
977 }
978
979 if (signaled_count == count)
980 goto done_waiting;
981
982 if (timeout == 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000983 timeout = -ETIME;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000984 goto done_waiting;
985 }
986
David Brazdil0f672f62019-12-10 10:32:29 +0000987 if (signal_pending(current)) {
988 timeout = -ERESTARTSYS;
989 goto done_waiting;
990 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000991
David Brazdil0f672f62019-12-10 10:32:29 +0000992 timeout = schedule_timeout(timeout);
993 } while (1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000994
995done_waiting:
996 __set_current_state(TASK_RUNNING);
997
998cleanup_entries:
999 for (i = 0; i < count; ++i) {
David Brazdil0f672f62019-12-10 10:32:29 +00001000 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001001 if (entries[i].fence_cb.func)
1002 dma_fence_remove_callback(entries[i].fence,
1003 &entries[i].fence_cb);
1004 dma_fence_put(entries[i].fence);
1005 }
1006 kfree(entries);
1007
David Brazdil0f672f62019-12-10 10:32:29 +00001008err_free_points:
1009 kfree(points);
1010
1011 return timeout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001012}
1013
1014/**
1015 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
1016 *
1017 * @timeout_nsec: timeout nsec component in ns, 0 for poll
1018 *
1019 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
1020 */
David Brazdil0f672f62019-12-10 10:32:29 +00001021signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001022{
1023 ktime_t abs_timeout, now;
1024 u64 timeout_ns, timeout_jiffies64;
1025
1026 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
1027 if (timeout_nsec == 0)
1028 return 0;
1029
1030 abs_timeout = ns_to_ktime(timeout_nsec);
1031 now = ktime_get();
1032
1033 if (!ktime_after(abs_timeout, now))
1034 return 0;
1035
1036 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
1037
1038 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
1039 /* clamp timeout to avoid infinite timeout */
1040 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
1041 return MAX_SCHEDULE_TIMEOUT - 1;
1042
1043 return timeout_jiffies64 + 1;
1044}
David Brazdil0f672f62019-12-10 10:32:29 +00001045EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001046
1047static int drm_syncobj_array_wait(struct drm_device *dev,
1048 struct drm_file *file_private,
1049 struct drm_syncobj_wait *wait,
David Brazdil0f672f62019-12-10 10:32:29 +00001050 struct drm_syncobj_timeline_wait *timeline_wait,
1051 struct drm_syncobj **syncobjs, bool timeline)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001052{
David Brazdil0f672f62019-12-10 10:32:29 +00001053 signed long timeout = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001054 uint32_t first = ~0;
1055
David Brazdil0f672f62019-12-10 10:32:29 +00001056 if (!timeline) {
1057 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
1058 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1059 NULL,
1060 wait->count_handles,
1061 wait->flags,
1062 timeout, &first);
1063 if (timeout < 0)
1064 return timeout;
1065 wait->first_signaled = first;
1066 } else {
1067 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
1068 timeout = drm_syncobj_array_wait_timeout(syncobjs,
1069 u64_to_user_ptr(timeline_wait->points),
1070 timeline_wait->count_handles,
1071 timeline_wait->flags,
1072 timeout, &first);
1073 if (timeout < 0)
1074 return timeout;
1075 timeline_wait->first_signaled = first;
1076 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001077 return 0;
1078}
1079
1080static int drm_syncobj_array_find(struct drm_file *file_private,
1081 void __user *user_handles,
1082 uint32_t count_handles,
1083 struct drm_syncobj ***syncobjs_out)
1084{
1085 uint32_t i, *handles;
1086 struct drm_syncobj **syncobjs;
1087 int ret;
1088
1089 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
1090 if (handles == NULL)
1091 return -ENOMEM;
1092
1093 if (copy_from_user(handles, user_handles,
1094 sizeof(uint32_t) * count_handles)) {
1095 ret = -EFAULT;
1096 goto err_free_handles;
1097 }
1098
1099 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
1100 if (syncobjs == NULL) {
1101 ret = -ENOMEM;
1102 goto err_free_handles;
1103 }
1104
1105 for (i = 0; i < count_handles; i++) {
1106 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
1107 if (!syncobjs[i]) {
1108 ret = -ENOENT;
1109 goto err_put_syncobjs;
1110 }
1111 }
1112
1113 kfree(handles);
1114 *syncobjs_out = syncobjs;
1115 return 0;
1116
1117err_put_syncobjs:
1118 while (i-- > 0)
1119 drm_syncobj_put(syncobjs[i]);
1120 kfree(syncobjs);
1121err_free_handles:
1122 kfree(handles);
1123
1124 return ret;
1125}
1126
1127static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
1128 uint32_t count)
1129{
1130 uint32_t i;
1131 for (i = 0; i < count; i++)
1132 drm_syncobj_put(syncobjs[i]);
1133 kfree(syncobjs);
1134}
1135
1136int
1137drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
1138 struct drm_file *file_private)
1139{
1140 struct drm_syncobj_wait *args = data;
1141 struct drm_syncobj **syncobjs;
1142 int ret = 0;
1143
1144 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +00001145 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001146
1147 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1148 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
1149 return -EINVAL;
1150
1151 if (args->count_handles == 0)
1152 return -EINVAL;
1153
1154 ret = drm_syncobj_array_find(file_private,
1155 u64_to_user_ptr(args->handles),
1156 args->count_handles,
1157 &syncobjs);
1158 if (ret < 0)
1159 return ret;
1160
1161 ret = drm_syncobj_array_wait(dev, file_private,
David Brazdil0f672f62019-12-10 10:32:29 +00001162 args, NULL, syncobjs, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001163
1164 drm_syncobj_array_free(syncobjs, args->count_handles);
1165
1166 return ret;
1167}
1168
1169int
David Brazdil0f672f62019-12-10 10:32:29 +00001170drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
1171 struct drm_file *file_private)
1172{
1173 struct drm_syncobj_timeline_wait *args = data;
1174 struct drm_syncobj **syncobjs;
1175 int ret = 0;
1176
1177 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1178 return -EOPNOTSUPP;
1179
1180 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
1181 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1182 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
1183 return -EINVAL;
1184
1185 if (args->count_handles == 0)
1186 return -EINVAL;
1187
1188 ret = drm_syncobj_array_find(file_private,
1189 u64_to_user_ptr(args->handles),
1190 args->count_handles,
1191 &syncobjs);
1192 if (ret < 0)
1193 return ret;
1194
1195 ret = drm_syncobj_array_wait(dev, file_private,
1196 NULL, args, syncobjs, true);
1197
1198 drm_syncobj_array_free(syncobjs, args->count_handles);
1199
1200 return ret;
1201}
1202
1203
1204int
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1206 struct drm_file *file_private)
1207{
1208 struct drm_syncobj_array *args = data;
1209 struct drm_syncobj **syncobjs;
1210 uint32_t i;
1211 int ret;
1212
1213 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +00001214 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001215
1216 if (args->pad != 0)
1217 return -EINVAL;
1218
1219 if (args->count_handles == 0)
1220 return -EINVAL;
1221
1222 ret = drm_syncobj_array_find(file_private,
1223 u64_to_user_ptr(args->handles),
1224 args->count_handles,
1225 &syncobjs);
1226 if (ret < 0)
1227 return ret;
1228
1229 for (i = 0; i < args->count_handles; i++)
1230 drm_syncobj_replace_fence(syncobjs[i], NULL);
1231
1232 drm_syncobj_array_free(syncobjs, args->count_handles);
1233
1234 return 0;
1235}
1236
1237int
1238drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1239 struct drm_file *file_private)
1240{
1241 struct drm_syncobj_array *args = data;
1242 struct drm_syncobj **syncobjs;
1243 uint32_t i;
1244 int ret;
1245
1246 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
David Brazdil0f672f62019-12-10 10:32:29 +00001247 return -EOPNOTSUPP;
1248
1249 if (args->pad != 0)
1250 return -EINVAL;
1251
1252 if (args->count_handles == 0)
1253 return -EINVAL;
1254
1255 ret = drm_syncobj_array_find(file_private,
1256 u64_to_user_ptr(args->handles),
1257 args->count_handles,
1258 &syncobjs);
1259 if (ret < 0)
1260 return ret;
1261
1262 for (i = 0; i < args->count_handles; i++)
1263 drm_syncobj_assign_null_handle(syncobjs[i]);
1264
1265 drm_syncobj_array_free(syncobjs, args->count_handles);
1266
1267 return ret;
1268}
1269
1270int
1271drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
1272 struct drm_file *file_private)
1273{
1274 struct drm_syncobj_timeline_array *args = data;
1275 struct drm_syncobj **syncobjs;
1276 struct dma_fence_chain **chains;
1277 uint64_t *points;
1278 uint32_t i, j;
1279 int ret;
1280
1281 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1282 return -EOPNOTSUPP;
1283
1284 if (args->pad != 0)
1285 return -EINVAL;
1286
1287 if (args->count_handles == 0)
1288 return -EINVAL;
1289
1290 ret = drm_syncobj_array_find(file_private,
1291 u64_to_user_ptr(args->handles),
1292 args->count_handles,
1293 &syncobjs);
1294 if (ret < 0)
1295 return ret;
1296
1297 points = kmalloc_array(args->count_handles, sizeof(*points),
1298 GFP_KERNEL);
1299 if (!points) {
1300 ret = -ENOMEM;
1301 goto out;
1302 }
1303 if (!u64_to_user_ptr(args->points)) {
1304 memset(points, 0, args->count_handles * sizeof(uint64_t));
1305 } else if (copy_from_user(points, u64_to_user_ptr(args->points),
1306 sizeof(uint64_t) * args->count_handles)) {
1307 ret = -EFAULT;
1308 goto err_points;
1309 }
1310
1311 chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
1312 if (!chains) {
1313 ret = -ENOMEM;
1314 goto err_points;
1315 }
1316 for (i = 0; i < args->count_handles; i++) {
1317 chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
1318 if (!chains[i]) {
1319 for (j = 0; j < i; j++)
1320 kfree(chains[j]);
1321 ret = -ENOMEM;
1322 goto err_chains;
1323 }
1324 }
1325
1326 for (i = 0; i < args->count_handles; i++) {
1327 struct dma_fence *fence = dma_fence_get_stub();
1328
1329 drm_syncobj_add_point(syncobjs[i], chains[i],
1330 fence, points[i]);
1331 dma_fence_put(fence);
1332 }
1333err_chains:
1334 kfree(chains);
1335err_points:
1336 kfree(points);
1337out:
1338 drm_syncobj_array_free(syncobjs, args->count_handles);
1339
1340 return ret;
1341}
1342
1343int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
1344 struct drm_file *file_private)
1345{
1346 struct drm_syncobj_timeline_array *args = data;
1347 struct drm_syncobj **syncobjs;
1348 uint64_t __user *points = u64_to_user_ptr(args->points);
1349 uint32_t i;
1350 int ret;
1351
1352 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
1353 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001354
1355 if (args->pad != 0)
1356 return -EINVAL;
1357
1358 if (args->count_handles == 0)
1359 return -EINVAL;
1360
1361 ret = drm_syncobj_array_find(file_private,
1362 u64_to_user_ptr(args->handles),
1363 args->count_handles,
1364 &syncobjs);
1365 if (ret < 0)
1366 return ret;
1367
1368 for (i = 0; i < args->count_handles; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001369 struct dma_fence_chain *chain;
1370 struct dma_fence *fence;
1371 uint64_t point;
1372
1373 fence = drm_syncobj_fence_get(syncobjs[i]);
1374 chain = to_dma_fence_chain(fence);
1375 if (chain) {
1376 struct dma_fence *iter, *last_signaled = NULL;
1377
1378 dma_fence_chain_for_each(iter, fence) {
1379 if (iter->context != fence->context) {
1380 dma_fence_put(iter);
1381 /* It is most likely that timeline has
1382 * unorder points. */
1383 break;
1384 }
1385 dma_fence_put(last_signaled);
1386 last_signaled = dma_fence_get(iter);
1387 }
1388 point = dma_fence_is_signaled(last_signaled) ?
1389 last_signaled->seqno :
1390 to_dma_fence_chain(last_signaled)->prev_seqno;
1391 dma_fence_put(last_signaled);
1392 } else {
1393 point = 0;
1394 }
1395 ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
1396 ret = ret ? -EFAULT : 0;
1397 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001398 break;
1399 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001400 drm_syncobj_array_free(syncobjs, args->count_handles);
1401
1402 return ret;
1403}