blob: d0696cf937af3476578bac4a10464e8948720545 [file] [log] [blame]
Olivier Deprez157378f2022-04-04 15:47:50 +02001// SPDX-License-Identifier: GPL-2.0
2#include <linux/device.h>
3#include <linux/dma-buf.h>
4#include <linux/err.h>
5#include <linux/highmem.h>
6#include <linux/idr.h>
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/uaccess.h>
10#include <linux/vmalloc.h>
11#include <uapi/linux/dma-heap.h>
12
13#include "heap-helpers.h"
14
15void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
16 void (*free)(struct heap_helper_buffer *))
17{
18 buffer->priv_virt = NULL;
19 mutex_init(&buffer->lock);
20 buffer->vmap_cnt = 0;
21 buffer->vaddr = NULL;
22 buffer->pagecount = 0;
23 buffer->pages = NULL;
24 INIT_LIST_HEAD(&buffer->attachments);
25 buffer->free = free;
26}
27
28struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
29 int fd_flags)
30{
31 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
32
33 exp_info.ops = &heap_helper_ops;
34 exp_info.size = buffer->size;
35 exp_info.flags = fd_flags;
36 exp_info.priv = buffer;
37
38 return dma_buf_export(&exp_info);
39}
40
41static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
42{
43 void *vaddr;
44
45 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
46 if (!vaddr)
47 return ERR_PTR(-ENOMEM);
48
49 return vaddr;
50}
51
52static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
53{
54 if (buffer->vmap_cnt > 0) {
55 WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
56 vunmap(buffer->vaddr);
57 }
58
59 buffer->free(buffer);
60}
61
62static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
63{
64 void *vaddr;
65
66 if (buffer->vmap_cnt) {
67 buffer->vmap_cnt++;
68 return buffer->vaddr;
69 }
70 vaddr = dma_heap_map_kernel(buffer);
71 if (IS_ERR(vaddr))
72 return vaddr;
73 buffer->vaddr = vaddr;
74 buffer->vmap_cnt++;
75 return vaddr;
76}
77
78static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
79{
80 if (!--buffer->vmap_cnt) {
81 vunmap(buffer->vaddr);
82 buffer->vaddr = NULL;
83 }
84}
85
86struct dma_heaps_attachment {
87 struct device *dev;
88 struct sg_table table;
89 struct list_head list;
90};
91
92static int dma_heap_attach(struct dma_buf *dmabuf,
93 struct dma_buf_attachment *attachment)
94{
95 struct dma_heaps_attachment *a;
96 struct heap_helper_buffer *buffer = dmabuf->priv;
97 int ret;
98
99 a = kzalloc(sizeof(*a), GFP_KERNEL);
100 if (!a)
101 return -ENOMEM;
102
103 ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
104 buffer->pagecount, 0,
105 buffer->pagecount << PAGE_SHIFT,
106 GFP_KERNEL);
107 if (ret) {
108 kfree(a);
109 return ret;
110 }
111
112 a->dev = attachment->dev;
113 INIT_LIST_HEAD(&a->list);
114
115 attachment->priv = a;
116
117 mutex_lock(&buffer->lock);
118 list_add(&a->list, &buffer->attachments);
119 mutex_unlock(&buffer->lock);
120
121 return 0;
122}
123
124static void dma_heap_detach(struct dma_buf *dmabuf,
125 struct dma_buf_attachment *attachment)
126{
127 struct dma_heaps_attachment *a = attachment->priv;
128 struct heap_helper_buffer *buffer = dmabuf->priv;
129
130 mutex_lock(&buffer->lock);
131 list_del(&a->list);
132 mutex_unlock(&buffer->lock);
133
134 sg_free_table(&a->table);
135 kfree(a);
136}
137
138static
139struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
140 enum dma_data_direction direction)
141{
142 struct dma_heaps_attachment *a = attachment->priv;
143 struct sg_table *table = &a->table;
144 int ret;
145
146 ret = dma_map_sgtable(attachment->dev, table, direction, 0);
147 if (ret)
148 table = ERR_PTR(ret);
149 return table;
150}
151
152static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
153 struct sg_table *table,
154 enum dma_data_direction direction)
155{
156 dma_unmap_sgtable(attachment->dev, table, direction, 0);
157}
158
159static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
160{
161 struct vm_area_struct *vma = vmf->vma;
162 struct heap_helper_buffer *buffer = vma->vm_private_data;
163
164 if (vmf->pgoff > buffer->pagecount)
165 return VM_FAULT_SIGBUS;
166
167 vmf->page = buffer->pages[vmf->pgoff];
168 get_page(vmf->page);
169
170 return 0;
171}
172
173static const struct vm_operations_struct dma_heap_vm_ops = {
174 .fault = dma_heap_vm_fault,
175};
176
177static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
178{
179 struct heap_helper_buffer *buffer = dmabuf->priv;
180
181 if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
182 return -EINVAL;
183
184 vma->vm_ops = &dma_heap_vm_ops;
185 vma->vm_private_data = buffer;
186
187 return 0;
188}
189
190static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
191{
192 struct heap_helper_buffer *buffer = dmabuf->priv;
193
194 dma_heap_buffer_destroy(buffer);
195}
196
197static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
198 enum dma_data_direction direction)
199{
200 struct heap_helper_buffer *buffer = dmabuf->priv;
201 struct dma_heaps_attachment *a;
202 int ret = 0;
203
204 mutex_lock(&buffer->lock);
205
206 if (buffer->vmap_cnt)
207 invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
208
209 list_for_each_entry(a, &buffer->attachments, list) {
210 dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
211 direction);
212 }
213 mutex_unlock(&buffer->lock);
214
215 return ret;
216}
217
218static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
219 enum dma_data_direction direction)
220{
221 struct heap_helper_buffer *buffer = dmabuf->priv;
222 struct dma_heaps_attachment *a;
223
224 mutex_lock(&buffer->lock);
225
226 if (buffer->vmap_cnt)
227 flush_kernel_vmap_range(buffer->vaddr, buffer->size);
228
229 list_for_each_entry(a, &buffer->attachments, list) {
230 dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
231 direction);
232 }
233 mutex_unlock(&buffer->lock);
234
235 return 0;
236}
237
238static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
239{
240 struct heap_helper_buffer *buffer = dmabuf->priv;
241 void *vaddr;
242
243 mutex_lock(&buffer->lock);
244 vaddr = dma_heap_buffer_vmap_get(buffer);
245 mutex_unlock(&buffer->lock);
246
247 return vaddr;
248}
249
250static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
251{
252 struct heap_helper_buffer *buffer = dmabuf->priv;
253
254 mutex_lock(&buffer->lock);
255 dma_heap_buffer_vmap_put(buffer);
256 mutex_unlock(&buffer->lock);
257}
258
259const struct dma_buf_ops heap_helper_ops = {
260 .map_dma_buf = dma_heap_map_dma_buf,
261 .unmap_dma_buf = dma_heap_unmap_dma_buf,
262 .mmap = dma_heap_mmap,
263 .release = dma_heap_dma_buf_release,
264 .attach = dma_heap_attach,
265 .detach = dma_heap_detach,
266 .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
267 .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
268 .vmap = dma_heap_dma_buf_vmap,
269 .vunmap = dma_heap_dma_buf_vunmap,
270};