Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2 |
| 3 | * |
| 4 | * Copyright (C) 2010 Samsung Electronics |
| 5 | * |
| 6 | * Author: Pawel Osciak <pawel@osciak.com> |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License as published by |
| 10 | * the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/io.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/refcount.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/vmalloc.h> |
| 20 | |
| 21 | #include <media/videobuf2-v4l2.h> |
| 22 | #include <media/videobuf2-vmalloc.h> |
| 23 | #include <media/videobuf2-memops.h> |
| 24 | |
| 25 | struct vb2_vmalloc_buf { |
| 26 | void *vaddr; |
| 27 | struct frame_vector *vec; |
| 28 | enum dma_data_direction dma_dir; |
| 29 | unsigned long size; |
| 30 | refcount_t refcount; |
| 31 | struct vb2_vmarea_handler handler; |
| 32 | struct dma_buf *dbuf; |
| 33 | }; |
| 34 | |
| 35 | static void vb2_vmalloc_put(void *buf_priv); |
| 36 | |
| 37 | static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, |
| 38 | unsigned long size, enum dma_data_direction dma_dir, |
| 39 | gfp_t gfp_flags) |
| 40 | { |
| 41 | struct vb2_vmalloc_buf *buf; |
| 42 | |
| 43 | buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags); |
| 44 | if (!buf) |
| 45 | return ERR_PTR(-ENOMEM); |
| 46 | |
| 47 | buf->size = size; |
| 48 | buf->vaddr = vmalloc_user(buf->size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | if (!buf->vaddr) { |
| 50 | pr_debug("vmalloc of size %ld failed\n", buf->size); |
| 51 | kfree(buf); |
| 52 | return ERR_PTR(-ENOMEM); |
| 53 | } |
| 54 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 55 | buf->dma_dir = dma_dir; |
| 56 | buf->handler.refcount = &buf->refcount; |
| 57 | buf->handler.put = vb2_vmalloc_put; |
| 58 | buf->handler.arg = buf; |
| 59 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | refcount_set(&buf->refcount, 1); |
| 61 | return buf; |
| 62 | } |
| 63 | |
| 64 | static void vb2_vmalloc_put(void *buf_priv) |
| 65 | { |
| 66 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 67 | |
| 68 | if (refcount_dec_and_test(&buf->refcount)) { |
| 69 | vfree(buf->vaddr); |
| 70 | kfree(buf); |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr, |
| 75 | unsigned long size, |
| 76 | enum dma_data_direction dma_dir) |
| 77 | { |
| 78 | struct vb2_vmalloc_buf *buf; |
| 79 | struct frame_vector *vec; |
| 80 | int n_pages, offset, i; |
| 81 | int ret = -ENOMEM; |
| 82 | |
| 83 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 84 | if (!buf) |
| 85 | return ERR_PTR(-ENOMEM); |
| 86 | |
| 87 | buf->dma_dir = dma_dir; |
| 88 | offset = vaddr & ~PAGE_MASK; |
| 89 | buf->size = size; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 90 | vec = vb2_create_framevec(vaddr, size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | if (IS_ERR(vec)) { |
| 92 | ret = PTR_ERR(vec); |
| 93 | goto fail_pfnvec_create; |
| 94 | } |
| 95 | buf->vec = vec; |
| 96 | n_pages = frame_vector_count(vec); |
| 97 | if (frame_vector_to_pages(vec) < 0) { |
| 98 | unsigned long *nums = frame_vector_pfns(vec); |
| 99 | |
| 100 | /* |
| 101 | * We cannot get page pointers for these pfns. Check memory is |
| 102 | * physically contiguous and use direct mapping. |
| 103 | */ |
| 104 | for (i = 1; i < n_pages; i++) |
| 105 | if (nums[i-1] + 1 != nums[i]) |
| 106 | goto fail_map; |
| 107 | buf->vaddr = (__force void *) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 108 | ioremap(__pfn_to_phys(nums[0]), size + offset); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | } else { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 110 | buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | } |
| 112 | |
| 113 | if (!buf->vaddr) |
| 114 | goto fail_map; |
| 115 | buf->vaddr += offset; |
| 116 | return buf; |
| 117 | |
| 118 | fail_map: |
| 119 | vb2_destroy_framevec(vec); |
| 120 | fail_pfnvec_create: |
| 121 | kfree(buf); |
| 122 | |
| 123 | return ERR_PTR(ret); |
| 124 | } |
| 125 | |
| 126 | static void vb2_vmalloc_put_userptr(void *buf_priv) |
| 127 | { |
| 128 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 129 | unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK; |
| 130 | unsigned int i; |
| 131 | struct page **pages; |
| 132 | unsigned int n_pages; |
| 133 | |
| 134 | if (!buf->vec->is_pfns) { |
| 135 | n_pages = frame_vector_count(buf->vec); |
| 136 | pages = frame_vector_pages(buf->vec); |
| 137 | if (vaddr) |
| 138 | vm_unmap_ram((void *)vaddr, n_pages); |
| 139 | if (buf->dma_dir == DMA_FROM_DEVICE || |
| 140 | buf->dma_dir == DMA_BIDIRECTIONAL) |
| 141 | for (i = 0; i < n_pages; i++) |
| 142 | set_page_dirty_lock(pages[i]); |
| 143 | } else { |
| 144 | iounmap((__force void __iomem *)buf->vaddr); |
| 145 | } |
| 146 | vb2_destroy_framevec(buf->vec); |
| 147 | kfree(buf); |
| 148 | } |
| 149 | |
| 150 | static void *vb2_vmalloc_vaddr(void *buf_priv) |
| 151 | { |
| 152 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 153 | |
| 154 | if (!buf->vaddr) { |
| 155 | pr_err("Address of an unallocated plane requested or cannot map user pointer\n"); |
| 156 | return NULL; |
| 157 | } |
| 158 | |
| 159 | return buf->vaddr; |
| 160 | } |
| 161 | |
| 162 | static unsigned int vb2_vmalloc_num_users(void *buf_priv) |
| 163 | { |
| 164 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 165 | return refcount_read(&buf->refcount); |
| 166 | } |
| 167 | |
| 168 | static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma) |
| 169 | { |
| 170 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 171 | int ret; |
| 172 | |
| 173 | if (!buf) { |
| 174 | pr_err("No memory to map\n"); |
| 175 | return -EINVAL; |
| 176 | } |
| 177 | |
| 178 | ret = remap_vmalloc_range(vma, buf->vaddr, 0); |
| 179 | if (ret) { |
| 180 | pr_err("Remapping vmalloc memory, error: %d\n", ret); |
| 181 | return ret; |
| 182 | } |
| 183 | |
| 184 | /* |
| 185 | * Make sure that vm_areas for 2 buffers won't be merged together |
| 186 | */ |
| 187 | vma->vm_flags |= VM_DONTEXPAND; |
| 188 | |
| 189 | /* |
| 190 | * Use common vm_area operations to track buffer refcount. |
| 191 | */ |
| 192 | vma->vm_private_data = &buf->handler; |
| 193 | vma->vm_ops = &vb2_common_vm_ops; |
| 194 | |
| 195 | vma->vm_ops->open(vma); |
| 196 | |
| 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | #ifdef CONFIG_HAS_DMA |
| 201 | /*********************************************/ |
| 202 | /* DMABUF ops for exporters */ |
| 203 | /*********************************************/ |
| 204 | |
| 205 | struct vb2_vmalloc_attachment { |
| 206 | struct sg_table sgt; |
| 207 | enum dma_data_direction dma_dir; |
| 208 | }; |
| 209 | |
| 210 | static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, |
| 211 | struct dma_buf_attachment *dbuf_attach) |
| 212 | { |
| 213 | struct vb2_vmalloc_attachment *attach; |
| 214 | struct vb2_vmalloc_buf *buf = dbuf->priv; |
| 215 | int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE; |
| 216 | struct sg_table *sgt; |
| 217 | struct scatterlist *sg; |
| 218 | void *vaddr = buf->vaddr; |
| 219 | int ret; |
| 220 | int i; |
| 221 | |
| 222 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); |
| 223 | if (!attach) |
| 224 | return -ENOMEM; |
| 225 | |
| 226 | sgt = &attach->sgt; |
| 227 | ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL); |
| 228 | if (ret) { |
| 229 | kfree(attach); |
| 230 | return ret; |
| 231 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 232 | for_each_sgtable_sg(sgt, sg, i) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 233 | struct page *page = vmalloc_to_page(vaddr); |
| 234 | |
| 235 | if (!page) { |
| 236 | sg_free_table(sgt); |
| 237 | kfree(attach); |
| 238 | return -ENOMEM; |
| 239 | } |
| 240 | sg_set_page(sg, page, PAGE_SIZE, 0); |
| 241 | vaddr += PAGE_SIZE; |
| 242 | } |
| 243 | |
| 244 | attach->dma_dir = DMA_NONE; |
| 245 | dbuf_attach->priv = attach; |
| 246 | return 0; |
| 247 | } |
| 248 | |
| 249 | static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, |
| 250 | struct dma_buf_attachment *db_attach) |
| 251 | { |
| 252 | struct vb2_vmalloc_attachment *attach = db_attach->priv; |
| 253 | struct sg_table *sgt; |
| 254 | |
| 255 | if (!attach) |
| 256 | return; |
| 257 | |
| 258 | sgt = &attach->sgt; |
| 259 | |
| 260 | /* release the scatterlist cache */ |
| 261 | if (attach->dma_dir != DMA_NONE) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 262 | dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 263 | sg_free_table(sgt); |
| 264 | kfree(attach); |
| 265 | db_attach->priv = NULL; |
| 266 | } |
| 267 | |
| 268 | static struct sg_table *vb2_vmalloc_dmabuf_ops_map( |
| 269 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) |
| 270 | { |
| 271 | struct vb2_vmalloc_attachment *attach = db_attach->priv; |
| 272 | /* stealing dmabuf mutex to serialize map/unmap operations */ |
| 273 | struct mutex *lock = &db_attach->dmabuf->lock; |
| 274 | struct sg_table *sgt; |
| 275 | |
| 276 | mutex_lock(lock); |
| 277 | |
| 278 | sgt = &attach->sgt; |
| 279 | /* return previously mapped sg table */ |
| 280 | if (attach->dma_dir == dma_dir) { |
| 281 | mutex_unlock(lock); |
| 282 | return sgt; |
| 283 | } |
| 284 | |
| 285 | /* release any previous cache */ |
| 286 | if (attach->dma_dir != DMA_NONE) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 287 | dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 288 | attach->dma_dir = DMA_NONE; |
| 289 | } |
| 290 | |
| 291 | /* mapping to the client with new direction */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 292 | if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | pr_err("failed to map scatterlist\n"); |
| 294 | mutex_unlock(lock); |
| 295 | return ERR_PTR(-EIO); |
| 296 | } |
| 297 | |
| 298 | attach->dma_dir = dma_dir; |
| 299 | |
| 300 | mutex_unlock(lock); |
| 301 | |
| 302 | return sgt; |
| 303 | } |
| 304 | |
| 305 | static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, |
| 306 | struct sg_table *sgt, enum dma_data_direction dma_dir) |
| 307 | { |
| 308 | /* nothing to be done here */ |
| 309 | } |
| 310 | |
| 311 | static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf) |
| 312 | { |
| 313 | /* drop reference obtained in vb2_vmalloc_get_dmabuf */ |
| 314 | vb2_vmalloc_put(dbuf->priv); |
| 315 | } |
| 316 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 317 | static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf) |
| 318 | { |
| 319 | struct vb2_vmalloc_buf *buf = dbuf->priv; |
| 320 | |
| 321 | return buf->vaddr; |
| 322 | } |
| 323 | |
| 324 | static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf, |
| 325 | struct vm_area_struct *vma) |
| 326 | { |
| 327 | return vb2_vmalloc_mmap(dbuf->priv, vma); |
| 328 | } |
| 329 | |
| 330 | static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = { |
| 331 | .attach = vb2_vmalloc_dmabuf_ops_attach, |
| 332 | .detach = vb2_vmalloc_dmabuf_ops_detach, |
| 333 | .map_dma_buf = vb2_vmalloc_dmabuf_ops_map, |
| 334 | .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 335 | .vmap = vb2_vmalloc_dmabuf_ops_vmap, |
| 336 | .mmap = vb2_vmalloc_dmabuf_ops_mmap, |
| 337 | .release = vb2_vmalloc_dmabuf_ops_release, |
| 338 | }; |
| 339 | |
| 340 | static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags) |
| 341 | { |
| 342 | struct vb2_vmalloc_buf *buf = buf_priv; |
| 343 | struct dma_buf *dbuf; |
| 344 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
| 345 | |
| 346 | exp_info.ops = &vb2_vmalloc_dmabuf_ops; |
| 347 | exp_info.size = buf->size; |
| 348 | exp_info.flags = flags; |
| 349 | exp_info.priv = buf; |
| 350 | |
| 351 | if (WARN_ON(!buf->vaddr)) |
| 352 | return NULL; |
| 353 | |
| 354 | dbuf = dma_buf_export(&exp_info); |
| 355 | if (IS_ERR(dbuf)) |
| 356 | return NULL; |
| 357 | |
| 358 | /* dmabuf keeps reference to vb2 buffer */ |
| 359 | refcount_inc(&buf->refcount); |
| 360 | |
| 361 | return dbuf; |
| 362 | } |
| 363 | #endif /* CONFIG_HAS_DMA */ |
| 364 | |
| 365 | |
| 366 | /*********************************************/ |
| 367 | /* callbacks for DMABUF buffers */ |
| 368 | /*********************************************/ |
| 369 | |
| 370 | static int vb2_vmalloc_map_dmabuf(void *mem_priv) |
| 371 | { |
| 372 | struct vb2_vmalloc_buf *buf = mem_priv; |
| 373 | |
| 374 | buf->vaddr = dma_buf_vmap(buf->dbuf); |
| 375 | |
| 376 | return buf->vaddr ? 0 : -EFAULT; |
| 377 | } |
| 378 | |
| 379 | static void vb2_vmalloc_unmap_dmabuf(void *mem_priv) |
| 380 | { |
| 381 | struct vb2_vmalloc_buf *buf = mem_priv; |
| 382 | |
| 383 | dma_buf_vunmap(buf->dbuf, buf->vaddr); |
| 384 | buf->vaddr = NULL; |
| 385 | } |
| 386 | |
| 387 | static void vb2_vmalloc_detach_dmabuf(void *mem_priv) |
| 388 | { |
| 389 | struct vb2_vmalloc_buf *buf = mem_priv; |
| 390 | |
| 391 | if (buf->vaddr) |
| 392 | dma_buf_vunmap(buf->dbuf, buf->vaddr); |
| 393 | |
| 394 | kfree(buf); |
| 395 | } |
| 396 | |
| 397 | static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf, |
| 398 | unsigned long size, enum dma_data_direction dma_dir) |
| 399 | { |
| 400 | struct vb2_vmalloc_buf *buf; |
| 401 | |
| 402 | if (dbuf->size < size) |
| 403 | return ERR_PTR(-EFAULT); |
| 404 | |
| 405 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); |
| 406 | if (!buf) |
| 407 | return ERR_PTR(-ENOMEM); |
| 408 | |
| 409 | buf->dbuf = dbuf; |
| 410 | buf->dma_dir = dma_dir; |
| 411 | buf->size = size; |
| 412 | |
| 413 | return buf; |
| 414 | } |
| 415 | |
| 416 | |
| 417 | const struct vb2_mem_ops vb2_vmalloc_memops = { |
| 418 | .alloc = vb2_vmalloc_alloc, |
| 419 | .put = vb2_vmalloc_put, |
| 420 | .get_userptr = vb2_vmalloc_get_userptr, |
| 421 | .put_userptr = vb2_vmalloc_put_userptr, |
| 422 | #ifdef CONFIG_HAS_DMA |
| 423 | .get_dmabuf = vb2_vmalloc_get_dmabuf, |
| 424 | #endif |
| 425 | .map_dmabuf = vb2_vmalloc_map_dmabuf, |
| 426 | .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf, |
| 427 | .attach_dmabuf = vb2_vmalloc_attach_dmabuf, |
| 428 | .detach_dmabuf = vb2_vmalloc_detach_dmabuf, |
| 429 | .vaddr = vb2_vmalloc_vaddr, |
| 430 | .mmap = vb2_vmalloc_mmap, |
| 431 | .num_users = vb2_vmalloc_num_users, |
| 432 | }; |
| 433 | EXPORT_SYMBOL_GPL(vb2_vmalloc_memops); |
| 434 | |
| 435 | MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2"); |
| 436 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>"); |
| 437 | MODULE_LICENSE("GPL"); |