Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright © 2008 Intel Corporation |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 21 | * IN THE SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Eric Anholt <eric@anholt.net> |
| 25 | * |
| 26 | */ |
| 27 | |
| 28 | #include <linux/types.h> |
| 29 | #include <linux/slab.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/uaccess.h> |
| 32 | #include <linux/fs.h> |
| 33 | #include <linux/file.h> |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/mman.h> |
| 36 | #include <linux/pagemap.h> |
| 37 | #include <linux/shmem_fs.h> |
| 38 | #include <linux/dma-buf.h> |
| 39 | #include <linux/mem_encrypt.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 40 | #include <linux/pagevec.h> |
| 41 | |
| 42 | #include <drm/drm.h> |
| 43 | #include <drm/drm_device.h> |
| 44 | #include <drm/drm_drv.h> |
| 45 | #include <drm/drm_file.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | #include <drm/drm_gem.h> |
| 47 | #include <drm/drm_print.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 48 | #include <drm/drm_vma_manager.h> |
| 49 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | #include "drm_internal.h" |
| 51 | |
| 52 | /** @file drm_gem.c |
| 53 | * |
| 54 | * This file provides some of the base ioctls and library routines for |
| 55 | * the graphics memory manager implemented by each device driver. |
| 56 | * |
| 57 | * Because various devices have different requirements in terms of |
| 58 | * synchronization and migration strategies, implementing that is left up to |
| 59 | * the driver, and all that the general API provides should be generic -- |
| 60 | * allocating objects, reading/writing data with the cpu, freeing objects. |
| 61 | * Even there, platform-dependent optimizations for reading/writing data with |
| 62 | * the CPU mean we'll likely hook those out to driver-specific calls. However, |
| 63 | * the DRI2 implementation wants to have at least allocate/mmap be generic. |
| 64 | * |
| 65 | * The goal was to have swap-backed object allocation managed through |
| 66 | * struct file. However, file descriptors as handles to a struct file have |
| 67 | * two major failings: |
| 68 | * - Process limits prevent more than 1024 or so being used at a time by |
| 69 | * default. |
| 70 | * - Inability to allocate high fds will aggravate the X Server's select() |
| 71 | * handling, and likely that of many GL client applications as well. |
| 72 | * |
| 73 | * This led to a plan of using our own integer IDs (called handles, following |
| 74 | * DRM terminology) to mimic fds, and implement the fd syscalls we need as |
| 75 | * ioctls. The objects themselves will still include the struct file so |
| 76 | * that we can transition to fds if the required kernel infrastructure shows |
| 77 | * up at a later date, and as our interface with shmfs for memory allocation. |
| 78 | */ |
| 79 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | /** |
| 81 | * drm_gem_init - Initialize the GEM device fields |
| 82 | * @dev: drm_devic structure to initialize |
| 83 | */ |
| 84 | int |
| 85 | drm_gem_init(struct drm_device *dev) |
| 86 | { |
| 87 | struct drm_vma_offset_manager *vma_offset_manager; |
| 88 | |
| 89 | mutex_init(&dev->object_name_lock); |
| 90 | idr_init_base(&dev->object_name_idr, 1); |
| 91 | |
| 92 | vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); |
| 93 | if (!vma_offset_manager) { |
| 94 | DRM_ERROR("out of memory\n"); |
| 95 | return -ENOMEM; |
| 96 | } |
| 97 | |
| 98 | dev->vma_offset_manager = vma_offset_manager; |
| 99 | drm_vma_offset_manager_init(vma_offset_manager, |
| 100 | DRM_FILE_PAGE_OFFSET_START, |
| 101 | DRM_FILE_PAGE_OFFSET_SIZE); |
| 102 | |
| 103 | return 0; |
| 104 | } |
| 105 | |
| 106 | void |
| 107 | drm_gem_destroy(struct drm_device *dev) |
| 108 | { |
| 109 | |
| 110 | drm_vma_offset_manager_destroy(dev->vma_offset_manager); |
| 111 | kfree(dev->vma_offset_manager); |
| 112 | dev->vma_offset_manager = NULL; |
| 113 | } |
| 114 | |
| 115 | /** |
| 116 | * drm_gem_object_init - initialize an allocated shmem-backed GEM object |
| 117 | * @dev: drm_device the object should be initialized for |
| 118 | * @obj: drm_gem_object to initialize |
| 119 | * @size: object size |
| 120 | * |
| 121 | * Initialize an already allocated GEM object of the specified size with |
| 122 | * shmfs backing store. |
| 123 | */ |
| 124 | int drm_gem_object_init(struct drm_device *dev, |
| 125 | struct drm_gem_object *obj, size_t size) |
| 126 | { |
| 127 | struct file *filp; |
| 128 | |
| 129 | drm_gem_private_object_init(dev, obj, size); |
| 130 | |
| 131 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
| 132 | if (IS_ERR(filp)) |
| 133 | return PTR_ERR(filp); |
| 134 | |
| 135 | obj->filp = filp; |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | EXPORT_SYMBOL(drm_gem_object_init); |
| 140 | |
| 141 | /** |
| 142 | * drm_gem_private_object_init - initialize an allocated private GEM object |
| 143 | * @dev: drm_device the object should be initialized for |
| 144 | * @obj: drm_gem_object to initialize |
| 145 | * @size: object size |
| 146 | * |
| 147 | * Initialize an already allocated GEM object of the specified size with |
| 148 | * no GEM provided backing store. Instead the caller is responsible for |
| 149 | * backing the object and handling it. |
| 150 | */ |
| 151 | void drm_gem_private_object_init(struct drm_device *dev, |
| 152 | struct drm_gem_object *obj, size_t size) |
| 153 | { |
| 154 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
| 155 | |
| 156 | obj->dev = dev; |
| 157 | obj->filp = NULL; |
| 158 | |
| 159 | kref_init(&obj->refcount); |
| 160 | obj->handle_count = 0; |
| 161 | obj->size = size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 162 | dma_resv_init(&obj->_resv); |
| 163 | if (!obj->resv) |
| 164 | obj->resv = &obj->_resv; |
| 165 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | drm_vma_node_reset(&obj->vma_node); |
| 167 | } |
| 168 | EXPORT_SYMBOL(drm_gem_private_object_init); |
| 169 | |
| 170 | static void |
| 171 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
| 172 | { |
| 173 | /* |
| 174 | * Note: obj->dma_buf can't disappear as long as we still hold a |
| 175 | * handle reference in obj->handle_count. |
| 176 | */ |
| 177 | mutex_lock(&filp->prime.lock); |
| 178 | if (obj->dma_buf) { |
| 179 | drm_prime_remove_buf_handle_locked(&filp->prime, |
| 180 | obj->dma_buf); |
| 181 | } |
| 182 | mutex_unlock(&filp->prime.lock); |
| 183 | } |
| 184 | |
| 185 | /** |
| 186 | * drm_gem_object_handle_free - release resources bound to userspace handles |
| 187 | * @obj: GEM object to clean up. |
| 188 | * |
| 189 | * Called after the last handle to the object has been closed |
| 190 | * |
| 191 | * Removes any name for the object. Note that this must be |
| 192 | * called before drm_gem_object_free or we'll be touching |
| 193 | * freed memory |
| 194 | */ |
| 195 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) |
| 196 | { |
| 197 | struct drm_device *dev = obj->dev; |
| 198 | |
| 199 | /* Remove any name for this object */ |
| 200 | if (obj->name) { |
| 201 | idr_remove(&dev->object_name_idr, obj->name); |
| 202 | obj->name = 0; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) |
| 207 | { |
| 208 | /* Unbreak the reference cycle if we have an exported dma_buf. */ |
| 209 | if (obj->dma_buf) { |
| 210 | dma_buf_put(obj->dma_buf); |
| 211 | obj->dma_buf = NULL; |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | static void |
| 216 | drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) |
| 217 | { |
| 218 | struct drm_device *dev = obj->dev; |
| 219 | bool final = false; |
| 220 | |
| 221 | if (WARN_ON(obj->handle_count == 0)) |
| 222 | return; |
| 223 | |
| 224 | /* |
| 225 | * Must bump handle count first as this may be the last |
| 226 | * ref, in which case the object would disappear before we |
| 227 | * checked for a name |
| 228 | */ |
| 229 | |
| 230 | mutex_lock(&dev->object_name_lock); |
| 231 | if (--obj->handle_count == 0) { |
| 232 | drm_gem_object_handle_free(obj); |
| 233 | drm_gem_object_exported_dma_buf_free(obj); |
| 234 | final = true; |
| 235 | } |
| 236 | mutex_unlock(&dev->object_name_lock); |
| 237 | |
| 238 | if (final) |
| 239 | drm_gem_object_put_unlocked(obj); |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * Called at device or object close to release the file's |
| 244 | * handle references on objects. |
| 245 | */ |
| 246 | static int |
| 247 | drm_gem_object_release_handle(int id, void *ptr, void *data) |
| 248 | { |
| 249 | struct drm_file *file_priv = data; |
| 250 | struct drm_gem_object *obj = ptr; |
| 251 | struct drm_device *dev = obj->dev; |
| 252 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 253 | if (obj->funcs && obj->funcs->close) |
| 254 | obj->funcs->close(obj, file_priv); |
| 255 | else if (dev->driver->gem_close_object) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 256 | dev->driver->gem_close_object(obj, file_priv); |
| 257 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 258 | drm_gem_remove_prime_handles(obj, file_priv); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 259 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
| 260 | |
| 261 | drm_gem_object_handle_put_unlocked(obj); |
| 262 | |
| 263 | return 0; |
| 264 | } |
| 265 | |
| 266 | /** |
| 267 | * drm_gem_handle_delete - deletes the given file-private handle |
| 268 | * @filp: drm file-private structure to use for the handle look up |
| 269 | * @handle: userspace handle to delete |
| 270 | * |
| 271 | * Removes the GEM handle from the @filp lookup table which has been added with |
| 272 | * drm_gem_handle_create(). If this is the last handle also cleans up linked |
| 273 | * resources like GEM names. |
| 274 | */ |
| 275 | int |
| 276 | drm_gem_handle_delete(struct drm_file *filp, u32 handle) |
| 277 | { |
| 278 | struct drm_gem_object *obj; |
| 279 | |
| 280 | spin_lock(&filp->table_lock); |
| 281 | |
| 282 | /* Check if we currently have a reference on the object */ |
| 283 | obj = idr_replace(&filp->object_idr, NULL, handle); |
| 284 | spin_unlock(&filp->table_lock); |
| 285 | if (IS_ERR_OR_NULL(obj)) |
| 286 | return -EINVAL; |
| 287 | |
| 288 | /* Release driver's reference and decrement refcount. */ |
| 289 | drm_gem_object_release_handle(handle, obj, filp); |
| 290 | |
| 291 | /* And finally make the handle available for future allocations. */ |
| 292 | spin_lock(&filp->table_lock); |
| 293 | idr_remove(&filp->object_idr, handle); |
| 294 | spin_unlock(&filp->table_lock); |
| 295 | |
| 296 | return 0; |
| 297 | } |
| 298 | EXPORT_SYMBOL(drm_gem_handle_delete); |
| 299 | |
| 300 | /** |
| 301 | * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object |
| 302 | * @file: drm file-private structure containing the gem object |
| 303 | * @dev: corresponding drm_device |
| 304 | * @handle: gem object handle |
| 305 | * @offset: return location for the fake mmap offset |
| 306 | * |
| 307 | * This implements the &drm_driver.dumb_map_offset kms driver callback for |
| 308 | * drivers which use gem to manage their backing storage. |
| 309 | * |
| 310 | * Returns: |
| 311 | * 0 on success or a negative error code on failure. |
| 312 | */ |
| 313 | int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, |
| 314 | u32 handle, u64 *offset) |
| 315 | { |
| 316 | struct drm_gem_object *obj; |
| 317 | int ret; |
| 318 | |
| 319 | obj = drm_gem_object_lookup(file, handle); |
| 320 | if (!obj) |
| 321 | return -ENOENT; |
| 322 | |
| 323 | /* Don't allow imported objects to be mapped */ |
| 324 | if (obj->import_attach) { |
| 325 | ret = -EINVAL; |
| 326 | goto out; |
| 327 | } |
| 328 | |
| 329 | ret = drm_gem_create_mmap_offset(obj); |
| 330 | if (ret) |
| 331 | goto out; |
| 332 | |
| 333 | *offset = drm_vma_node_offset_addr(&obj->vma_node); |
| 334 | out: |
| 335 | drm_gem_object_put_unlocked(obj); |
| 336 | |
| 337 | return ret; |
| 338 | } |
| 339 | EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); |
| 340 | |
| 341 | /** |
| 342 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
| 343 | * @file: drm file-private structure to remove the dumb handle from |
| 344 | * @dev: corresponding drm_device |
| 345 | * @handle: the dumb handle to remove |
| 346 | * |
| 347 | * This implements the &drm_driver.dumb_destroy kms driver callback for drivers |
| 348 | * which use gem to manage their backing storage. |
| 349 | */ |
| 350 | int drm_gem_dumb_destroy(struct drm_file *file, |
| 351 | struct drm_device *dev, |
| 352 | uint32_t handle) |
| 353 | { |
| 354 | return drm_gem_handle_delete(file, handle); |
| 355 | } |
| 356 | EXPORT_SYMBOL(drm_gem_dumb_destroy); |
| 357 | |
| 358 | /** |
| 359 | * drm_gem_handle_create_tail - internal functions to create a handle |
| 360 | * @file_priv: drm file-private structure to register the handle for |
| 361 | * @obj: object to register |
| 362 | * @handlep: pointer to return the created handle to the caller |
| 363 | * |
| 364 | * This expects the &drm_device.object_name_lock to be held already and will |
| 365 | * drop it before returning. Used to avoid races in establishing new handles |
| 366 | * when importing an object from either an flink name or a dma-buf. |
| 367 | * |
| 368 | * Handles must be release again through drm_gem_handle_delete(). This is done |
| 369 | * when userspace closes @file_priv for all attached handles, or through the |
| 370 | * GEM_CLOSE ioctl for individual handles. |
| 371 | */ |
| 372 | int |
| 373 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
| 374 | struct drm_gem_object *obj, |
| 375 | u32 *handlep) |
| 376 | { |
| 377 | struct drm_device *dev = obj->dev; |
| 378 | u32 handle; |
| 379 | int ret; |
| 380 | |
| 381 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); |
| 382 | if (obj->handle_count++ == 0) |
| 383 | drm_gem_object_get(obj); |
| 384 | |
| 385 | /* |
| 386 | * Get the user-visible handle using idr. Preload and perform |
| 387 | * allocation under our spinlock. |
| 388 | */ |
| 389 | idr_preload(GFP_KERNEL); |
| 390 | spin_lock(&file_priv->table_lock); |
| 391 | |
| 392 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
| 393 | |
| 394 | spin_unlock(&file_priv->table_lock); |
| 395 | idr_preload_end(); |
| 396 | |
| 397 | mutex_unlock(&dev->object_name_lock); |
| 398 | if (ret < 0) |
| 399 | goto err_unref; |
| 400 | |
| 401 | handle = ret; |
| 402 | |
| 403 | ret = drm_vma_node_allow(&obj->vma_node, file_priv); |
| 404 | if (ret) |
| 405 | goto err_remove; |
| 406 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 407 | if (obj->funcs && obj->funcs->open) { |
| 408 | ret = obj->funcs->open(obj, file_priv); |
| 409 | if (ret) |
| 410 | goto err_revoke; |
| 411 | } else if (dev->driver->gem_open_object) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 412 | ret = dev->driver->gem_open_object(obj, file_priv); |
| 413 | if (ret) |
| 414 | goto err_revoke; |
| 415 | } |
| 416 | |
| 417 | *handlep = handle; |
| 418 | return 0; |
| 419 | |
| 420 | err_revoke: |
| 421 | drm_vma_node_revoke(&obj->vma_node, file_priv); |
| 422 | err_remove: |
| 423 | spin_lock(&file_priv->table_lock); |
| 424 | idr_remove(&file_priv->object_idr, handle); |
| 425 | spin_unlock(&file_priv->table_lock); |
| 426 | err_unref: |
| 427 | drm_gem_object_handle_put_unlocked(obj); |
| 428 | return ret; |
| 429 | } |
| 430 | |
| 431 | /** |
| 432 | * drm_gem_handle_create - create a gem handle for an object |
| 433 | * @file_priv: drm file-private structure to register the handle for |
| 434 | * @obj: object to register |
| 435 | * @handlep: pionter to return the created handle to the caller |
| 436 | * |
| 437 | * Create a handle for this object. This adds a handle reference to the object, |
| 438 | * which includes a regular reference count. Callers will likely want to |
| 439 | * dereference the object afterwards. |
| 440 | * |
| 441 | * Since this publishes @obj to userspace it must be fully set up by this point, |
| 442 | * drivers must call this last in their buffer object creation callbacks. |
| 443 | */ |
| 444 | int drm_gem_handle_create(struct drm_file *file_priv, |
| 445 | struct drm_gem_object *obj, |
| 446 | u32 *handlep) |
| 447 | { |
| 448 | mutex_lock(&obj->dev->object_name_lock); |
| 449 | |
| 450 | return drm_gem_handle_create_tail(file_priv, obj, handlep); |
| 451 | } |
| 452 | EXPORT_SYMBOL(drm_gem_handle_create); |
| 453 | |
| 454 | |
| 455 | /** |
| 456 | * drm_gem_free_mmap_offset - release a fake mmap offset for an object |
| 457 | * @obj: obj in question |
| 458 | * |
| 459 | * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). |
| 460 | * |
| 461 | * Note that drm_gem_object_release() already calls this function, so drivers |
| 462 | * don't have to take care of releasing the mmap offset themselves when freeing |
| 463 | * the GEM object. |
| 464 | */ |
| 465 | void |
| 466 | drm_gem_free_mmap_offset(struct drm_gem_object *obj) |
| 467 | { |
| 468 | struct drm_device *dev = obj->dev; |
| 469 | |
| 470 | drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); |
| 471 | } |
| 472 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
| 473 | |
| 474 | /** |
| 475 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
| 476 | * @obj: obj in question |
| 477 | * @size: the virtual size |
| 478 | * |
| 479 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
| 480 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
| 481 | * up the object based on the offset and sets up the various memory mapping |
| 482 | * structures. |
| 483 | * |
| 484 | * This routine allocates and attaches a fake offset for @obj, in cases where |
| 485 | * the virtual size differs from the physical size (ie. &drm_gem_object.size). |
| 486 | * Otherwise just use drm_gem_create_mmap_offset(). |
| 487 | * |
| 488 | * This function is idempotent and handles an already allocated mmap offset |
| 489 | * transparently. Drivers do not need to check for this case. |
| 490 | */ |
| 491 | int |
| 492 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
| 493 | { |
| 494 | struct drm_device *dev = obj->dev; |
| 495 | |
| 496 | return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, |
| 497 | size / PAGE_SIZE); |
| 498 | } |
| 499 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
| 500 | |
| 501 | /** |
| 502 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
| 503 | * @obj: obj in question |
| 504 | * |
| 505 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
| 506 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
| 507 | * up the object based on the offset and sets up the various memory mapping |
| 508 | * structures. |
| 509 | * |
| 510 | * This routine allocates and attaches a fake offset for @obj. |
| 511 | * |
| 512 | * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release |
| 513 | * the fake offset again. |
| 514 | */ |
| 515 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) |
| 516 | { |
| 517 | return drm_gem_create_mmap_offset_size(obj, obj->size); |
| 518 | } |
| 519 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); |
| 520 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 521 | /* |
| 522 | * Move pages to appropriate lru and release the pagevec, decrementing the |
| 523 | * ref count of those pages. |
| 524 | */ |
| 525 | static void drm_gem_check_release_pagevec(struct pagevec *pvec) |
| 526 | { |
| 527 | check_move_unevictable_pages(pvec); |
| 528 | __pagevec_release(pvec); |
| 529 | cond_resched(); |
| 530 | } |
| 531 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 532 | /** |
| 533 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
| 534 | * from shmem |
| 535 | * @obj: obj in question |
| 536 | * |
| 537 | * This reads the page-array of the shmem-backing storage of the given gem |
| 538 | * object. An array of pages is returned. If a page is not allocated or |
| 539 | * swapped-out, this will allocate/swap-in the required pages. Note that the |
| 540 | * whole object is covered by the page-array and pinned in memory. |
| 541 | * |
| 542 | * Use drm_gem_put_pages() to release the array and unpin all pages. |
| 543 | * |
| 544 | * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). |
| 545 | * If you require other GFP-masks, you have to do those allocations yourself. |
| 546 | * |
| 547 | * Note that you are not allowed to change gfp-zones during runtime. That is, |
| 548 | * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as |
| 549 | * set during initialization. If you have special zone constraints, set them |
| 550 | * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care |
| 551 | * to keep pages in the required zone during swap-in. |
| 552 | */ |
| 553 | struct page **drm_gem_get_pages(struct drm_gem_object *obj) |
| 554 | { |
| 555 | struct address_space *mapping; |
| 556 | struct page *p, **pages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 557 | struct pagevec pvec; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 558 | int i, npages; |
| 559 | |
| 560 | /* This is the shared memory object that backs the GEM resource */ |
| 561 | mapping = obj->filp->f_mapping; |
| 562 | |
| 563 | /* We already BUG_ON() for non-page-aligned sizes in |
| 564 | * drm_gem_object_init(), so we should never hit this unless |
| 565 | * driver author is doing something really wrong: |
| 566 | */ |
| 567 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
| 568 | |
| 569 | npages = obj->size >> PAGE_SHIFT; |
| 570 | |
| 571 | pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); |
| 572 | if (pages == NULL) |
| 573 | return ERR_PTR(-ENOMEM); |
| 574 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 575 | mapping_set_unevictable(mapping); |
| 576 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 577 | for (i = 0; i < npages; i++) { |
| 578 | p = shmem_read_mapping_page(mapping, i); |
| 579 | if (IS_ERR(p)) |
| 580 | goto fail; |
| 581 | pages[i] = p; |
| 582 | |
| 583 | /* Make sure shmem keeps __GFP_DMA32 allocated pages in the |
| 584 | * correct region during swapin. Note that this requires |
| 585 | * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) |
| 586 | * so shmem can relocate pages during swapin if required. |
| 587 | */ |
| 588 | BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && |
| 589 | (page_to_pfn(p) >= 0x00100000UL)); |
| 590 | } |
| 591 | |
| 592 | return pages; |
| 593 | |
| 594 | fail: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 595 | mapping_clear_unevictable(mapping); |
| 596 | pagevec_init(&pvec); |
| 597 | while (i--) { |
| 598 | if (!pagevec_add(&pvec, pages[i])) |
| 599 | drm_gem_check_release_pagevec(&pvec); |
| 600 | } |
| 601 | if (pagevec_count(&pvec)) |
| 602 | drm_gem_check_release_pagevec(&pvec); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 603 | |
| 604 | kvfree(pages); |
| 605 | return ERR_CAST(p); |
| 606 | } |
| 607 | EXPORT_SYMBOL(drm_gem_get_pages); |
| 608 | |
| 609 | /** |
| 610 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
| 611 | * @obj: obj in question |
| 612 | * @pages: pages to free |
| 613 | * @dirty: if true, pages will be marked as dirty |
| 614 | * @accessed: if true, the pages will be marked as accessed |
| 615 | */ |
| 616 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
| 617 | bool dirty, bool accessed) |
| 618 | { |
| 619 | int i, npages; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 620 | struct address_space *mapping; |
| 621 | struct pagevec pvec; |
| 622 | |
| 623 | mapping = file_inode(obj->filp)->i_mapping; |
| 624 | mapping_clear_unevictable(mapping); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 625 | |
| 626 | /* We already BUG_ON() for non-page-aligned sizes in |
| 627 | * drm_gem_object_init(), so we should never hit this unless |
| 628 | * driver author is doing something really wrong: |
| 629 | */ |
| 630 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); |
| 631 | |
| 632 | npages = obj->size >> PAGE_SHIFT; |
| 633 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 634 | pagevec_init(&pvec); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 635 | for (i = 0; i < npages; i++) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 636 | if (!pages[i]) |
| 637 | continue; |
| 638 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 639 | if (dirty) |
| 640 | set_page_dirty(pages[i]); |
| 641 | |
| 642 | if (accessed) |
| 643 | mark_page_accessed(pages[i]); |
| 644 | |
| 645 | /* Undo the reference we took when populating the table */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 646 | if (!pagevec_add(&pvec, pages[i])) |
| 647 | drm_gem_check_release_pagevec(&pvec); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 648 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 649 | if (pagevec_count(&pvec)) |
| 650 | drm_gem_check_release_pagevec(&pvec); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 651 | |
| 652 | kvfree(pages); |
| 653 | } |
| 654 | EXPORT_SYMBOL(drm_gem_put_pages); |
| 655 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 656 | static int objects_lookup(struct drm_file *filp, u32 *handle, int count, |
| 657 | struct drm_gem_object **objs) |
| 658 | { |
| 659 | int i, ret = 0; |
| 660 | struct drm_gem_object *obj; |
| 661 | |
| 662 | spin_lock(&filp->table_lock); |
| 663 | |
| 664 | for (i = 0; i < count; i++) { |
| 665 | /* Check if we currently have a reference on the object */ |
| 666 | obj = idr_find(&filp->object_idr, handle[i]); |
| 667 | if (!obj) { |
| 668 | ret = -ENOENT; |
| 669 | break; |
| 670 | } |
| 671 | drm_gem_object_get(obj); |
| 672 | objs[i] = obj; |
| 673 | } |
| 674 | spin_unlock(&filp->table_lock); |
| 675 | |
| 676 | return ret; |
| 677 | } |
| 678 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 679 | /** |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 680 | * drm_gem_objects_lookup - look up GEM objects from an array of handles |
| 681 | * @filp: DRM file private date |
| 682 | * @bo_handles: user pointer to array of userspace handle |
| 683 | * @count: size of handle array |
| 684 | * @objs_out: returned pointer to array of drm_gem_object pointers |
| 685 | * |
| 686 | * Takes an array of userspace handles and returns a newly allocated array of |
| 687 | * GEM objects. |
| 688 | * |
| 689 | * For a single handle lookup, use drm_gem_object_lookup(). |
| 690 | * |
| 691 | * Returns: |
| 692 | * |
| 693 | * @objs filled in with GEM object pointers. Returned GEM objects need to be |
| 694 | * released with drm_gem_object_put(). -ENOENT is returned on a lookup |
| 695 | * failure. 0 is returned on success. |
| 696 | * |
| 697 | */ |
| 698 | int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, |
| 699 | int count, struct drm_gem_object ***objs_out) |
| 700 | { |
| 701 | int ret; |
| 702 | u32 *handles; |
| 703 | struct drm_gem_object **objs; |
| 704 | |
| 705 | if (!count) |
| 706 | return 0; |
| 707 | |
| 708 | objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), |
| 709 | GFP_KERNEL | __GFP_ZERO); |
| 710 | if (!objs) |
| 711 | return -ENOMEM; |
| 712 | |
| 713 | handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); |
| 714 | if (!handles) { |
| 715 | ret = -ENOMEM; |
| 716 | goto out; |
| 717 | } |
| 718 | |
| 719 | if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { |
| 720 | ret = -EFAULT; |
| 721 | DRM_DEBUG("Failed to copy in GEM handles\n"); |
| 722 | goto out; |
| 723 | } |
| 724 | |
| 725 | ret = objects_lookup(filp, handles, count, objs); |
| 726 | *objs_out = objs; |
| 727 | |
| 728 | out: |
| 729 | kvfree(handles); |
| 730 | return ret; |
| 731 | |
| 732 | } |
| 733 | EXPORT_SYMBOL(drm_gem_objects_lookup); |
| 734 | |
| 735 | /** |
| 736 | * drm_gem_object_lookup - look up a GEM object from its handle |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 737 | * @filp: DRM file private date |
| 738 | * @handle: userspace handle |
| 739 | * |
| 740 | * Returns: |
| 741 | * |
| 742 | * A reference to the object named by the handle if such exists on @filp, NULL |
| 743 | * otherwise. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 744 | * |
| 745 | * If looking up an array of handles, use drm_gem_objects_lookup(). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 746 | */ |
| 747 | struct drm_gem_object * |
| 748 | drm_gem_object_lookup(struct drm_file *filp, u32 handle) |
| 749 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 750 | struct drm_gem_object *obj = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 751 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 752 | objects_lookup(filp, &handle, 1, &obj); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 753 | return obj; |
| 754 | } |
| 755 | EXPORT_SYMBOL(drm_gem_object_lookup); |
| 756 | |
| 757 | /** |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 758 | * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects |
| 759 | * shared and/or exclusive fences. |
| 760 | * @filep: DRM file private date |
| 761 | * @handle: userspace handle |
| 762 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence |
| 763 | * @timeout: timeout value in jiffies or zero to return immediately |
| 764 | * |
| 765 | * Returns: |
| 766 | * |
| 767 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or |
| 768 | * greater than 0 on success. |
| 769 | */ |
| 770 | long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, |
| 771 | bool wait_all, unsigned long timeout) |
| 772 | { |
| 773 | long ret; |
| 774 | struct drm_gem_object *obj; |
| 775 | |
| 776 | obj = drm_gem_object_lookup(filep, handle); |
| 777 | if (!obj) { |
| 778 | DRM_DEBUG("Failed to look up GEM BO %d\n", handle); |
| 779 | return -EINVAL; |
| 780 | } |
| 781 | |
| 782 | ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, |
| 783 | true, timeout); |
| 784 | if (ret == 0) |
| 785 | ret = -ETIME; |
| 786 | else if (ret > 0) |
| 787 | ret = 0; |
| 788 | |
| 789 | drm_gem_object_put_unlocked(obj); |
| 790 | |
| 791 | return ret; |
| 792 | } |
| 793 | EXPORT_SYMBOL(drm_gem_dma_resv_wait); |
| 794 | |
| 795 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 796 | * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl |
| 797 | * @dev: drm_device |
| 798 | * @data: ioctl data |
| 799 | * @file_priv: drm file-private structure |
| 800 | * |
| 801 | * Releases the handle to an mm object. |
| 802 | */ |
| 803 | int |
| 804 | drm_gem_close_ioctl(struct drm_device *dev, void *data, |
| 805 | struct drm_file *file_priv) |
| 806 | { |
| 807 | struct drm_gem_close *args = data; |
| 808 | int ret; |
| 809 | |
| 810 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 811 | return -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 812 | |
| 813 | ret = drm_gem_handle_delete(file_priv, args->handle); |
| 814 | |
| 815 | return ret; |
| 816 | } |
| 817 | |
| 818 | /** |
| 819 | * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl |
| 820 | * @dev: drm_device |
| 821 | * @data: ioctl data |
| 822 | * @file_priv: drm file-private structure |
| 823 | * |
| 824 | * Create a global name for an object, returning the name. |
| 825 | * |
| 826 | * Note that the name does not hold a reference; when the object |
| 827 | * is freed, the name goes away. |
| 828 | */ |
| 829 | int |
| 830 | drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
| 831 | struct drm_file *file_priv) |
| 832 | { |
| 833 | struct drm_gem_flink *args = data; |
| 834 | struct drm_gem_object *obj; |
| 835 | int ret; |
| 836 | |
| 837 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 838 | return -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 839 | |
| 840 | obj = drm_gem_object_lookup(file_priv, args->handle); |
| 841 | if (obj == NULL) |
| 842 | return -ENOENT; |
| 843 | |
| 844 | mutex_lock(&dev->object_name_lock); |
| 845 | /* prevent races with concurrent gem_close. */ |
| 846 | if (obj->handle_count == 0) { |
| 847 | ret = -ENOENT; |
| 848 | goto err; |
| 849 | } |
| 850 | |
| 851 | if (!obj->name) { |
| 852 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); |
| 853 | if (ret < 0) |
| 854 | goto err; |
| 855 | |
| 856 | obj->name = ret; |
| 857 | } |
| 858 | |
| 859 | args->name = (uint64_t) obj->name; |
| 860 | ret = 0; |
| 861 | |
| 862 | err: |
| 863 | mutex_unlock(&dev->object_name_lock); |
| 864 | drm_gem_object_put_unlocked(obj); |
| 865 | return ret; |
| 866 | } |
| 867 | |
| 868 | /** |
| 869 | * drm_gem_open - implementation of the GEM_OPEN ioctl |
| 870 | * @dev: drm_device |
| 871 | * @data: ioctl data |
| 872 | * @file_priv: drm file-private structure |
| 873 | * |
| 874 | * Open an object using the global name, returning a handle and the size. |
| 875 | * |
| 876 | * This handle (of course) holds a reference to the object, so the object |
| 877 | * will not go away until the handle is deleted. |
| 878 | */ |
| 879 | int |
| 880 | drm_gem_open_ioctl(struct drm_device *dev, void *data, |
| 881 | struct drm_file *file_priv) |
| 882 | { |
| 883 | struct drm_gem_open *args = data; |
| 884 | struct drm_gem_object *obj; |
| 885 | int ret; |
| 886 | u32 handle; |
| 887 | |
| 888 | if (!drm_core_check_feature(dev, DRIVER_GEM)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 889 | return -EOPNOTSUPP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 890 | |
| 891 | mutex_lock(&dev->object_name_lock); |
| 892 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
| 893 | if (obj) { |
| 894 | drm_gem_object_get(obj); |
| 895 | } else { |
| 896 | mutex_unlock(&dev->object_name_lock); |
| 897 | return -ENOENT; |
| 898 | } |
| 899 | |
| 900 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
| 901 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); |
| 902 | drm_gem_object_put_unlocked(obj); |
| 903 | if (ret) |
| 904 | return ret; |
| 905 | |
| 906 | args->handle = handle; |
| 907 | args->size = obj->size; |
| 908 | |
| 909 | return 0; |
| 910 | } |
| 911 | |
| 912 | /** |
| 913 | * gem_gem_open - initalizes GEM file-private structures at devnode open time |
| 914 | * @dev: drm_device which is being opened by userspace |
| 915 | * @file_private: drm file-private structure to set up |
| 916 | * |
| 917 | * Called at device open time, sets up the structure for handling refcounting |
| 918 | * of mm objects. |
| 919 | */ |
| 920 | void |
| 921 | drm_gem_open(struct drm_device *dev, struct drm_file *file_private) |
| 922 | { |
| 923 | idr_init_base(&file_private->object_idr, 1); |
| 924 | spin_lock_init(&file_private->table_lock); |
| 925 | } |
| 926 | |
| 927 | /** |
| 928 | * drm_gem_release - release file-private GEM resources |
| 929 | * @dev: drm_device which is being closed by userspace |
| 930 | * @file_private: drm file-private structure to clean up |
| 931 | * |
| 932 | * Called at close time when the filp is going away. |
| 933 | * |
| 934 | * Releases any remaining references on objects by this filp. |
| 935 | */ |
| 936 | void |
| 937 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
| 938 | { |
| 939 | idr_for_each(&file_private->object_idr, |
| 940 | &drm_gem_object_release_handle, file_private); |
| 941 | idr_destroy(&file_private->object_idr); |
| 942 | } |
| 943 | |
| 944 | /** |
| 945 | * drm_gem_object_release - release GEM buffer object resources |
| 946 | * @obj: GEM buffer object |
| 947 | * |
| 948 | * This releases any structures and resources used by @obj and is the invers of |
| 949 | * drm_gem_object_init(). |
| 950 | */ |
| 951 | void |
| 952 | drm_gem_object_release(struct drm_gem_object *obj) |
| 953 | { |
| 954 | WARN_ON(obj->dma_buf); |
| 955 | |
| 956 | if (obj->filp) |
| 957 | fput(obj->filp); |
| 958 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 959 | dma_resv_fini(&obj->_resv); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 960 | drm_gem_free_mmap_offset(obj); |
| 961 | } |
| 962 | EXPORT_SYMBOL(drm_gem_object_release); |
| 963 | |
| 964 | /** |
| 965 | * drm_gem_object_free - free a GEM object |
| 966 | * @kref: kref of the object to free |
| 967 | * |
| 968 | * Called after the last reference to the object has been lost. |
| 969 | * Must be called holding &drm_device.struct_mutex. |
| 970 | * |
| 971 | * Frees the object |
| 972 | */ |
| 973 | void |
| 974 | drm_gem_object_free(struct kref *kref) |
| 975 | { |
| 976 | struct drm_gem_object *obj = |
| 977 | container_of(kref, struct drm_gem_object, refcount); |
| 978 | struct drm_device *dev = obj->dev; |
| 979 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 980 | if (obj->funcs) { |
| 981 | obj->funcs->free(obj); |
| 982 | } else if (dev->driver->gem_free_object_unlocked) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 983 | dev->driver->gem_free_object_unlocked(obj); |
| 984 | } else if (dev->driver->gem_free_object) { |
| 985 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
| 986 | |
| 987 | dev->driver->gem_free_object(obj); |
| 988 | } |
| 989 | } |
| 990 | EXPORT_SYMBOL(drm_gem_object_free); |
| 991 | |
| 992 | /** |
| 993 | * drm_gem_object_put_unlocked - drop a GEM buffer object reference |
| 994 | * @obj: GEM buffer object |
| 995 | * |
| 996 | * This releases a reference to @obj. Callers must not hold the |
| 997 | * &drm_device.struct_mutex lock when calling this function. |
| 998 | * |
| 999 | * See also __drm_gem_object_put(). |
| 1000 | */ |
| 1001 | void |
| 1002 | drm_gem_object_put_unlocked(struct drm_gem_object *obj) |
| 1003 | { |
| 1004 | struct drm_device *dev; |
| 1005 | |
| 1006 | if (!obj) |
| 1007 | return; |
| 1008 | |
| 1009 | dev = obj->dev; |
| 1010 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1011 | if (dev->driver->gem_free_object) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1012 | might_lock(&dev->struct_mutex); |
| 1013 | if (kref_put_mutex(&obj->refcount, drm_gem_object_free, |
| 1014 | &dev->struct_mutex)) |
| 1015 | mutex_unlock(&dev->struct_mutex); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1016 | } else { |
| 1017 | kref_put(&obj->refcount, drm_gem_object_free); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1018 | } |
| 1019 | } |
| 1020 | EXPORT_SYMBOL(drm_gem_object_put_unlocked); |
| 1021 | |
| 1022 | /** |
| 1023 | * drm_gem_object_put - release a GEM buffer object reference |
| 1024 | * @obj: GEM buffer object |
| 1025 | * |
| 1026 | * This releases a reference to @obj. Callers must hold the |
| 1027 | * &drm_device.struct_mutex lock when calling this function, even when the |
| 1028 | * driver doesn't use &drm_device.struct_mutex for anything. |
| 1029 | * |
| 1030 | * For drivers not encumbered with legacy locking use |
| 1031 | * drm_gem_object_put_unlocked() instead. |
| 1032 | */ |
| 1033 | void |
| 1034 | drm_gem_object_put(struct drm_gem_object *obj) |
| 1035 | { |
| 1036 | if (obj) { |
| 1037 | WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); |
| 1038 | |
| 1039 | kref_put(&obj->refcount, drm_gem_object_free); |
| 1040 | } |
| 1041 | } |
| 1042 | EXPORT_SYMBOL(drm_gem_object_put); |
| 1043 | |
| 1044 | /** |
| 1045 | * drm_gem_vm_open - vma->ops->open implementation for GEM |
| 1046 | * @vma: VM area structure |
| 1047 | * |
| 1048 | * This function implements the #vm_operations_struct open() callback for GEM |
| 1049 | * drivers. This must be used together with drm_gem_vm_close(). |
| 1050 | */ |
| 1051 | void drm_gem_vm_open(struct vm_area_struct *vma) |
| 1052 | { |
| 1053 | struct drm_gem_object *obj = vma->vm_private_data; |
| 1054 | |
| 1055 | drm_gem_object_get(obj); |
| 1056 | } |
| 1057 | EXPORT_SYMBOL(drm_gem_vm_open); |
| 1058 | |
| 1059 | /** |
| 1060 | * drm_gem_vm_close - vma->ops->close implementation for GEM |
| 1061 | * @vma: VM area structure |
| 1062 | * |
| 1063 | * This function implements the #vm_operations_struct close() callback for GEM |
| 1064 | * drivers. This must be used together with drm_gem_vm_open(). |
| 1065 | */ |
| 1066 | void drm_gem_vm_close(struct vm_area_struct *vma) |
| 1067 | { |
| 1068 | struct drm_gem_object *obj = vma->vm_private_data; |
| 1069 | |
| 1070 | drm_gem_object_put_unlocked(obj); |
| 1071 | } |
| 1072 | EXPORT_SYMBOL(drm_gem_vm_close); |
| 1073 | |
| 1074 | /** |
| 1075 | * drm_gem_mmap_obj - memory map a GEM object |
| 1076 | * @obj: the GEM object to map |
| 1077 | * @obj_size: the object size to be mapped, in bytes |
| 1078 | * @vma: VMA for the area to be mapped |
| 1079 | * |
| 1080 | * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops |
| 1081 | * provided by the driver. Depending on their requirements, drivers can either |
| 1082 | * provide a fault handler in their gem_vm_ops (in which case any accesses to |
| 1083 | * the object will be trapped, to perform migration, GTT binding, surface |
| 1084 | * register allocation, or performance monitoring), or mmap the buffer memory |
| 1085 | * synchronously after calling drm_gem_mmap_obj. |
| 1086 | * |
| 1087 | * This function is mainly intended to implement the DMABUF mmap operation, when |
| 1088 | * the GEM object is not looked up based on its fake offset. To implement the |
| 1089 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. |
| 1090 | * |
| 1091 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
| 1092 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So |
| 1093 | * callers must verify access restrictions before calling this helper. |
| 1094 | * |
| 1095 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
| 1096 | * size, or if no gem_vm_ops are provided. |
| 1097 | */ |
| 1098 | int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
| 1099 | struct vm_area_struct *vma) |
| 1100 | { |
| 1101 | struct drm_device *dev = obj->dev; |
| 1102 | |
| 1103 | /* Check for valid size. */ |
| 1104 | if (obj_size < vma->vm_end - vma->vm_start) |
| 1105 | return -EINVAL; |
| 1106 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1107 | if (obj->funcs && obj->funcs->vm_ops) |
| 1108 | vma->vm_ops = obj->funcs->vm_ops; |
| 1109 | else if (dev->driver->gem_vm_ops) |
| 1110 | vma->vm_ops = dev->driver->gem_vm_ops; |
| 1111 | else |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1112 | return -EINVAL; |
| 1113 | |
| 1114 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1115 | vma->vm_private_data = obj; |
| 1116 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
| 1117 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
| 1118 | |
| 1119 | /* Take a ref for this mapping of the object, so that the fault |
| 1120 | * handler can dereference the mmap offset's pointer to the object. |
| 1121 | * This reference is cleaned up by the corresponding vm_close |
| 1122 | * (which should happen whether the vma was created by this call, or |
| 1123 | * by a vm_open due to mremap or partial unmap or whatever). |
| 1124 | */ |
| 1125 | drm_gem_object_get(obj); |
| 1126 | |
| 1127 | return 0; |
| 1128 | } |
| 1129 | EXPORT_SYMBOL(drm_gem_mmap_obj); |
| 1130 | |
| 1131 | /** |
| 1132 | * drm_gem_mmap - memory map routine for GEM objects |
| 1133 | * @filp: DRM file pointer |
| 1134 | * @vma: VMA for the area to be mapped |
| 1135 | * |
| 1136 | * If a driver supports GEM object mapping, mmap calls on the DRM file |
| 1137 | * descriptor will end up here. |
| 1138 | * |
| 1139 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
| 1140 | * contain the fake offset we created when the GTT map ioctl was called on |
| 1141 | * the object) and map it with a call to drm_gem_mmap_obj(). |
| 1142 | * |
| 1143 | * If the caller is not granted access to the buffer object, the mmap will fail |
| 1144 | * with EACCES. Please see the vma manager for more information. |
| 1145 | */ |
| 1146 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
| 1147 | { |
| 1148 | struct drm_file *priv = filp->private_data; |
| 1149 | struct drm_device *dev = priv->minor->dev; |
| 1150 | struct drm_gem_object *obj = NULL; |
| 1151 | struct drm_vma_offset_node *node; |
| 1152 | int ret; |
| 1153 | |
| 1154 | if (drm_dev_is_unplugged(dev)) |
| 1155 | return -ENODEV; |
| 1156 | |
| 1157 | drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
| 1158 | node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
| 1159 | vma->vm_pgoff, |
| 1160 | vma_pages(vma)); |
| 1161 | if (likely(node)) { |
| 1162 | obj = container_of(node, struct drm_gem_object, vma_node); |
| 1163 | /* |
| 1164 | * When the object is being freed, after it hits 0-refcnt it |
| 1165 | * proceeds to tear down the object. In the process it will |
| 1166 | * attempt to remove the VMA offset and so acquire this |
| 1167 | * mgr->vm_lock. Therefore if we find an object with a 0-refcnt |
| 1168 | * that matches our range, we know it is in the process of being |
| 1169 | * destroyed and will be freed as soon as we release the lock - |
| 1170 | * so we have to check for the 0-refcnted object and treat it as |
| 1171 | * invalid. |
| 1172 | */ |
| 1173 | if (!kref_get_unless_zero(&obj->refcount)) |
| 1174 | obj = NULL; |
| 1175 | } |
| 1176 | drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
| 1177 | |
| 1178 | if (!obj) |
| 1179 | return -EINVAL; |
| 1180 | |
| 1181 | if (!drm_vma_node_is_allowed(node, priv)) { |
| 1182 | drm_gem_object_put_unlocked(obj); |
| 1183 | return -EACCES; |
| 1184 | } |
| 1185 | |
| 1186 | if (node->readonly) { |
| 1187 | if (vma->vm_flags & VM_WRITE) { |
| 1188 | drm_gem_object_put_unlocked(obj); |
| 1189 | return -EINVAL; |
| 1190 | } |
| 1191 | |
| 1192 | vma->vm_flags &= ~VM_MAYWRITE; |
| 1193 | } |
| 1194 | |
| 1195 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
| 1196 | vma); |
| 1197 | |
| 1198 | drm_gem_object_put_unlocked(obj); |
| 1199 | |
| 1200 | return ret; |
| 1201 | } |
| 1202 | EXPORT_SYMBOL(drm_gem_mmap); |
| 1203 | |
| 1204 | void drm_gem_print_info(struct drm_printer *p, unsigned int indent, |
| 1205 | const struct drm_gem_object *obj) |
| 1206 | { |
| 1207 | drm_printf_indent(p, indent, "name=%d\n", obj->name); |
| 1208 | drm_printf_indent(p, indent, "refcount=%u\n", |
| 1209 | kref_read(&obj->refcount)); |
| 1210 | drm_printf_indent(p, indent, "start=%08lx\n", |
| 1211 | drm_vma_node_start(&obj->vma_node)); |
| 1212 | drm_printf_indent(p, indent, "size=%zu\n", obj->size); |
| 1213 | drm_printf_indent(p, indent, "imported=%s\n", |
| 1214 | obj->import_attach ? "yes" : "no"); |
| 1215 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1216 | if (obj->funcs && obj->funcs->print_info) |
| 1217 | obj->funcs->print_info(p, indent, obj); |
| 1218 | else if (obj->dev->driver->gem_print_info) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1219 | obj->dev->driver->gem_print_info(p, indent, obj); |
| 1220 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1221 | |
| 1222 | int drm_gem_pin(struct drm_gem_object *obj) |
| 1223 | { |
| 1224 | if (obj->funcs && obj->funcs->pin) |
| 1225 | return obj->funcs->pin(obj); |
| 1226 | else if (obj->dev->driver->gem_prime_pin) |
| 1227 | return obj->dev->driver->gem_prime_pin(obj); |
| 1228 | else |
| 1229 | return 0; |
| 1230 | } |
| 1231 | |
| 1232 | void drm_gem_unpin(struct drm_gem_object *obj) |
| 1233 | { |
| 1234 | if (obj->funcs && obj->funcs->unpin) |
| 1235 | obj->funcs->unpin(obj); |
| 1236 | else if (obj->dev->driver->gem_prime_unpin) |
| 1237 | obj->dev->driver->gem_prime_unpin(obj); |
| 1238 | } |
| 1239 | |
| 1240 | void *drm_gem_vmap(struct drm_gem_object *obj) |
| 1241 | { |
| 1242 | void *vaddr; |
| 1243 | |
| 1244 | if (obj->funcs && obj->funcs->vmap) |
| 1245 | vaddr = obj->funcs->vmap(obj); |
| 1246 | else if (obj->dev->driver->gem_prime_vmap) |
| 1247 | vaddr = obj->dev->driver->gem_prime_vmap(obj); |
| 1248 | else |
| 1249 | vaddr = ERR_PTR(-EOPNOTSUPP); |
| 1250 | |
| 1251 | if (!vaddr) |
| 1252 | vaddr = ERR_PTR(-ENOMEM); |
| 1253 | |
| 1254 | return vaddr; |
| 1255 | } |
| 1256 | |
| 1257 | void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) |
| 1258 | { |
| 1259 | if (!vaddr) |
| 1260 | return; |
| 1261 | |
| 1262 | if (obj->funcs && obj->funcs->vunmap) |
| 1263 | obj->funcs->vunmap(obj, vaddr); |
| 1264 | else if (obj->dev->driver->gem_prime_vunmap) |
| 1265 | obj->dev->driver->gem_prime_vunmap(obj, vaddr); |
| 1266 | } |
| 1267 | |
| 1268 | /** |
| 1269 | * drm_gem_lock_reservations - Sets up the ww context and acquires |
| 1270 | * the lock on an array of GEM objects. |
| 1271 | * |
| 1272 | * Once you've locked your reservations, you'll want to set up space |
| 1273 | * for your shared fences (if applicable), submit your job, then |
| 1274 | * drm_gem_unlock_reservations(). |
| 1275 | * |
| 1276 | * @objs: drm_gem_objects to lock |
| 1277 | * @count: Number of objects in @objs |
| 1278 | * @acquire_ctx: struct ww_acquire_ctx that will be initialized as |
| 1279 | * part of tracking this set of locked reservations. |
| 1280 | */ |
| 1281 | int |
| 1282 | drm_gem_lock_reservations(struct drm_gem_object **objs, int count, |
| 1283 | struct ww_acquire_ctx *acquire_ctx) |
| 1284 | { |
| 1285 | int contended = -1; |
| 1286 | int i, ret; |
| 1287 | |
| 1288 | ww_acquire_init(acquire_ctx, &reservation_ww_class); |
| 1289 | |
| 1290 | retry: |
| 1291 | if (contended != -1) { |
| 1292 | struct drm_gem_object *obj = objs[contended]; |
| 1293 | |
| 1294 | ret = dma_resv_lock_slow_interruptible(obj->resv, |
| 1295 | acquire_ctx); |
| 1296 | if (ret) { |
| 1297 | ww_acquire_done(acquire_ctx); |
| 1298 | return ret; |
| 1299 | } |
| 1300 | } |
| 1301 | |
| 1302 | for (i = 0; i < count; i++) { |
| 1303 | if (i == contended) |
| 1304 | continue; |
| 1305 | |
| 1306 | ret = dma_resv_lock_interruptible(objs[i]->resv, |
| 1307 | acquire_ctx); |
| 1308 | if (ret) { |
| 1309 | int j; |
| 1310 | |
| 1311 | for (j = 0; j < i; j++) |
| 1312 | dma_resv_unlock(objs[j]->resv); |
| 1313 | |
| 1314 | if (contended != -1 && contended >= i) |
| 1315 | dma_resv_unlock(objs[contended]->resv); |
| 1316 | |
| 1317 | if (ret == -EDEADLK) { |
| 1318 | contended = i; |
| 1319 | goto retry; |
| 1320 | } |
| 1321 | |
| 1322 | ww_acquire_done(acquire_ctx); |
| 1323 | return ret; |
| 1324 | } |
| 1325 | } |
| 1326 | |
| 1327 | ww_acquire_done(acquire_ctx); |
| 1328 | |
| 1329 | return 0; |
| 1330 | } |
| 1331 | EXPORT_SYMBOL(drm_gem_lock_reservations); |
| 1332 | |
| 1333 | void |
| 1334 | drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, |
| 1335 | struct ww_acquire_ctx *acquire_ctx) |
| 1336 | { |
| 1337 | int i; |
| 1338 | |
| 1339 | for (i = 0; i < count; i++) |
| 1340 | dma_resv_unlock(objs[i]->resv); |
| 1341 | |
| 1342 | ww_acquire_fini(acquire_ctx); |
| 1343 | } |
| 1344 | EXPORT_SYMBOL(drm_gem_unlock_reservations); |
| 1345 | |
| 1346 | /** |
| 1347 | * drm_gem_fence_array_add - Adds the fence to an array of fences to be |
| 1348 | * waited on, deduplicating fences from the same context. |
| 1349 | * |
| 1350 | * @fence_array: array of dma_fence * for the job to block on. |
| 1351 | * @fence: the dma_fence to add to the list of dependencies. |
| 1352 | * |
| 1353 | * Returns: |
| 1354 | * 0 on success, or an error on failing to expand the array. |
| 1355 | */ |
| 1356 | int drm_gem_fence_array_add(struct xarray *fence_array, |
| 1357 | struct dma_fence *fence) |
| 1358 | { |
| 1359 | struct dma_fence *entry; |
| 1360 | unsigned long index; |
| 1361 | u32 id = 0; |
| 1362 | int ret; |
| 1363 | |
| 1364 | if (!fence) |
| 1365 | return 0; |
| 1366 | |
| 1367 | /* Deduplicate if we already depend on a fence from the same context. |
| 1368 | * This lets the size of the array of deps scale with the number of |
| 1369 | * engines involved, rather than the number of BOs. |
| 1370 | */ |
| 1371 | xa_for_each(fence_array, index, entry) { |
| 1372 | if (entry->context != fence->context) |
| 1373 | continue; |
| 1374 | |
| 1375 | if (dma_fence_is_later(fence, entry)) { |
| 1376 | dma_fence_put(entry); |
| 1377 | xa_store(fence_array, index, fence, GFP_KERNEL); |
| 1378 | } else { |
| 1379 | dma_fence_put(fence); |
| 1380 | } |
| 1381 | return 0; |
| 1382 | } |
| 1383 | |
| 1384 | ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); |
| 1385 | if (ret != 0) |
| 1386 | dma_fence_put(fence); |
| 1387 | |
| 1388 | return ret; |
| 1389 | } |
| 1390 | EXPORT_SYMBOL(drm_gem_fence_array_add); |
| 1391 | |
| 1392 | /** |
| 1393 | * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked |
| 1394 | * in the GEM object's reservation object to an array of dma_fences for use in |
| 1395 | * scheduling a rendering job. |
| 1396 | * |
| 1397 | * This should be called after drm_gem_lock_reservations() on your array of |
| 1398 | * GEM objects used in the job but before updating the reservations with your |
| 1399 | * own fences. |
| 1400 | * |
| 1401 | * @fence_array: array of dma_fence * for the job to block on. |
| 1402 | * @obj: the gem object to add new dependencies from. |
| 1403 | * @write: whether the job might write the object (so we need to depend on |
| 1404 | * shared fences in the reservation object). |
| 1405 | */ |
| 1406 | int drm_gem_fence_array_add_implicit(struct xarray *fence_array, |
| 1407 | struct drm_gem_object *obj, |
| 1408 | bool write) |
| 1409 | { |
| 1410 | int ret; |
| 1411 | struct dma_fence **fences; |
| 1412 | unsigned int i, fence_count; |
| 1413 | |
| 1414 | if (!write) { |
| 1415 | struct dma_fence *fence = |
| 1416 | dma_resv_get_excl_rcu(obj->resv); |
| 1417 | |
| 1418 | return drm_gem_fence_array_add(fence_array, fence); |
| 1419 | } |
| 1420 | |
| 1421 | ret = dma_resv_get_fences_rcu(obj->resv, NULL, |
| 1422 | &fence_count, &fences); |
| 1423 | if (ret || !fence_count) |
| 1424 | return ret; |
| 1425 | |
| 1426 | for (i = 0; i < fence_count; i++) { |
| 1427 | ret = drm_gem_fence_array_add(fence_array, fences[i]); |
| 1428 | if (ret) |
| 1429 | break; |
| 1430 | } |
| 1431 | |
| 1432 | for (; i < fence_count; i++) |
| 1433 | dma_fence_put(fences[i]); |
| 1434 | kfree(fences); |
| 1435 | return ret; |
| 1436 | } |
| 1437 | EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); |