blob: 4a1c4fca475adfebed1dca9e2181faa6f88f035e [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
4
5#include <linux/sizes.h>
6#include <linux/string.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-debug.h>
10#include <linux/dma-direction.h>
11#include <linux/scatterlist.h>
12#include <linux/bug.h>
13#include <linux/mem_encrypt.h>
14
15/**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23/*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28/*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33/*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38/*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43/*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49/*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54/*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60/*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64#define DMA_ATTR_NO_WARN (1UL << 8)
65
66/*
67 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73/*
74 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
79struct dma_map_ops {
80 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
82 unsigned long attrs);
83 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
85 unsigned long attrs);
86 int (*mmap)(struct device *, struct vm_area_struct *,
87 void *, dma_addr_t, size_t,
88 unsigned long attrs);
89
90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 dma_addr_t, size_t, unsigned long attrs);
92
93 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
96 unsigned long attrs);
97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
99 unsigned long attrs);
100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
106 unsigned long attrs);
107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
110 unsigned long attrs);
111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131 int (*dma_supported)(struct device *dev, u64 mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000132 u64 (*get_required_mask)(struct device *dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000133 size_t (*max_mapping_size)(struct device *dev);
134 unsigned long (*get_merge_boundary)(struct device *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000135};
136
David Brazdil0f672f62019-12-10 10:32:29 +0000137#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
138
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000139extern const struct dma_map_ops dma_virt_ops;
David Brazdil0f672f62019-12-10 10:32:29 +0000140extern const struct dma_map_ops dma_dummy_ops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141
142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
143
144#define DMA_MASK_NONE 0x0ULL
145
146static inline int valid_dma_direction(int dma_direction)
147{
148 return ((dma_direction == DMA_BIDIRECTIONAL) ||
149 (dma_direction == DMA_TO_DEVICE) ||
150 (dma_direction == DMA_FROM_DEVICE));
151}
152
David Brazdil0f672f62019-12-10 10:32:29 +0000153#ifdef CONFIG_DMA_DECLARE_COHERENT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154/*
155 * These three functions are only for dma allocator.
156 * Don't use them in device drivers.
157 */
158int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
159 dma_addr_t *dma_handle, void **ret);
160int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
161
162int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
163 void *cpu_addr, size_t size, int *ret);
164
165void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
166int dma_release_from_global_coherent(int order, void *vaddr);
167int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
168 size_t size, int *ret);
169
170#else
171#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
172#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
173#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
174
175static inline void *dma_alloc_from_global_coherent(ssize_t size,
176 dma_addr_t *dma_handle)
177{
178 return NULL;
179}
180
181static inline int dma_release_from_global_coherent(int order, void *vaddr)
182{
183 return 0;
184}
185
186static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
187 void *cpu_addr, size_t size,
188 int *ret)
189{
190 return 0;
191}
David Brazdil0f672f62019-12-10 10:32:29 +0000192#endif /* CONFIG_DMA_DECLARE_COHERENT */
193
194static inline bool dma_is_direct(const struct dma_map_ops *ops)
195{
196 return likely(!ops);
197}
198
199/*
200 * All the dma_direct_* declarations are here just for the indirect call bypass,
201 * and must not be used directly drivers!
202 */
203dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
204 unsigned long offset, size_t size, enum dma_data_direction dir,
205 unsigned long attrs);
206int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
207 enum dma_data_direction dir, unsigned long attrs);
208dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
209 size_t size, enum dma_data_direction dir, unsigned long attrs);
210
211#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
212 defined(CONFIG_SWIOTLB)
213void dma_direct_sync_single_for_device(struct device *dev,
214 dma_addr_t addr, size_t size, enum dma_data_direction dir);
215void dma_direct_sync_sg_for_device(struct device *dev,
216 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
217#else
218static inline void dma_direct_sync_single_for_device(struct device *dev,
219 dma_addr_t addr, size_t size, enum dma_data_direction dir)
220{
221}
222static inline void dma_direct_sync_sg_for_device(struct device *dev,
223 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
224{
225}
226#endif
227
228#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
229 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
230 defined(CONFIG_SWIOTLB)
231void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
232 size_t size, enum dma_data_direction dir, unsigned long attrs);
233void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
234 int nents, enum dma_data_direction dir, unsigned long attrs);
235void dma_direct_sync_single_for_cpu(struct device *dev,
236 dma_addr_t addr, size_t size, enum dma_data_direction dir);
237void dma_direct_sync_sg_for_cpu(struct device *dev,
238 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
239#else
240static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
241 size_t size, enum dma_data_direction dir, unsigned long attrs)
242{
243}
244static inline void dma_direct_unmap_sg(struct device *dev,
245 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
246 unsigned long attrs)
247{
248}
249static inline void dma_direct_sync_single_for_cpu(struct device *dev,
250 dma_addr_t addr, size_t size, enum dma_data_direction dir)
251{
252}
253static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
254 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
255{
256}
257#endif
258
259size_t dma_direct_max_mapping_size(struct device *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000260
261#ifdef CONFIG_HAS_DMA
262#include <asm/dma-mapping.h>
David Brazdil0f672f62019-12-10 10:32:29 +0000263
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
265{
David Brazdil0f672f62019-12-10 10:32:29 +0000266 if (dev->dma_ops)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000267 return dev->dma_ops;
David Brazdil0f672f62019-12-10 10:32:29 +0000268 return get_arch_dma_ops(dev->bus);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269}
270
271static inline void set_dma_ops(struct device *dev,
272 const struct dma_map_ops *dma_ops)
273{
274 dev->dma_ops = dma_ops;
275}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000276
David Brazdil0f672f62019-12-10 10:32:29 +0000277static inline dma_addr_t dma_map_page_attrs(struct device *dev,
278 struct page *page, size_t offset, size_t size,
279 enum dma_data_direction dir, unsigned long attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280{
281 const struct dma_map_ops *ops = get_dma_ops(dev);
282 dma_addr_t addr;
283
284 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000285 if (dma_is_direct(ops))
286 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
287 else
288 addr = ops->map_page(dev, page, offset, size, dir, attrs);
289 debug_dma_map_page(dev, page, offset, size, dir, addr);
290
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291 return addr;
292}
293
David Brazdil0f672f62019-12-10 10:32:29 +0000294static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
295 size_t size, enum dma_data_direction dir, unsigned long attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000296{
297 const struct dma_map_ops *ops = get_dma_ops(dev);
298
299 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000300 if (dma_is_direct(ops))
301 dma_direct_unmap_page(dev, addr, size, dir, attrs);
302 else if (ops->unmap_page)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303 ops->unmap_page(dev, addr, size, dir, attrs);
David Brazdil0f672f62019-12-10 10:32:29 +0000304 debug_dma_unmap_page(dev, addr, size, dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000305}
306
307/*
308 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
309 * It should never return a value < 0.
310 */
311static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
312 int nents, enum dma_data_direction dir,
313 unsigned long attrs)
314{
315 const struct dma_map_ops *ops = get_dma_ops(dev);
316 int ents;
317
318 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000319 if (dma_is_direct(ops))
320 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
321 else
322 ents = ops->map_sg(dev, sg, nents, dir, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000323 BUG_ON(ents < 0);
324 debug_dma_map_sg(dev, sg, nents, ents, dir);
325
326 return ents;
327}
328
329static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
330 int nents, enum dma_data_direction dir,
331 unsigned long attrs)
332{
333 const struct dma_map_ops *ops = get_dma_ops(dev);
334
335 BUG_ON(!valid_dma_direction(dir));
336 debug_dma_unmap_sg(dev, sg, nents, dir);
David Brazdil0f672f62019-12-10 10:32:29 +0000337 if (dma_is_direct(ops))
338 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
339 else if (ops->unmap_sg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000340 ops->unmap_sg(dev, sg, nents, dir, attrs);
341}
342
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000343static inline dma_addr_t dma_map_resource(struct device *dev,
344 phys_addr_t phys_addr,
345 size_t size,
346 enum dma_data_direction dir,
347 unsigned long attrs)
348{
349 const struct dma_map_ops *ops = get_dma_ops(dev);
David Brazdil0f672f62019-12-10 10:32:29 +0000350 dma_addr_t addr = DMA_MAPPING_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000351
352 BUG_ON(!valid_dma_direction(dir));
353
354 /* Don't allow RAM to be mapped */
David Brazdil0f672f62019-12-10 10:32:29 +0000355 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
356 return DMA_MAPPING_ERROR;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357
David Brazdil0f672f62019-12-10 10:32:29 +0000358 if (dma_is_direct(ops))
359 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
360 else if (ops->map_resource)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000361 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
362
363 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364 return addr;
365}
366
367static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
368 size_t size, enum dma_data_direction dir,
369 unsigned long attrs)
370{
371 const struct dma_map_ops *ops = get_dma_ops(dev);
372
373 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000374 if (!dma_is_direct(ops) && ops->unmap_resource)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375 ops->unmap_resource(dev, addr, size, dir, attrs);
376 debug_dma_unmap_resource(dev, addr, size, dir);
377}
378
379static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
380 size_t size,
381 enum dma_data_direction dir)
382{
383 const struct dma_map_ops *ops = get_dma_ops(dev);
384
385 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000386 if (dma_is_direct(ops))
387 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
388 else if (ops->sync_single_for_cpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389 ops->sync_single_for_cpu(dev, addr, size, dir);
390 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
391}
392
393static inline void dma_sync_single_for_device(struct device *dev,
394 dma_addr_t addr, size_t size,
395 enum dma_data_direction dir)
396{
397 const struct dma_map_ops *ops = get_dma_ops(dev);
398
399 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000400 if (dma_is_direct(ops))
401 dma_direct_sync_single_for_device(dev, addr, size, dir);
402 else if (ops->sync_single_for_device)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 ops->sync_single_for_device(dev, addr, size, dir);
404 debug_dma_sync_single_for_device(dev, addr, size, dir);
405}
406
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407static inline void
408dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
409 int nelems, enum dma_data_direction dir)
410{
411 const struct dma_map_ops *ops = get_dma_ops(dev);
412
413 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000414 if (dma_is_direct(ops))
415 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
416 else if (ops->sync_sg_for_cpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
418 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
419}
420
421static inline void
422dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
423 int nelems, enum dma_data_direction dir)
424{
425 const struct dma_map_ops *ops = get_dma_ops(dev);
426
427 BUG_ON(!valid_dma_direction(dir));
David Brazdil0f672f62019-12-10 10:32:29 +0000428 if (dma_is_direct(ops))
429 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
430 else if (ops->sync_sg_for_device)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000431 ops->sync_sg_for_device(dev, sg, nelems, dir);
432 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
433
434}
435
David Brazdil0f672f62019-12-10 10:32:29 +0000436static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
437{
438 debug_dma_mapping_error(dev, dma_addr);
439
440 if (dma_addr == DMA_MAPPING_ERROR)
441 return -ENOMEM;
442 return 0;
443}
444
445void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
446 gfp_t flag, unsigned long attrs);
447void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
448 dma_addr_t dma_handle, unsigned long attrs);
449void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
450 gfp_t gfp, unsigned long attrs);
451void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
452 dma_addr_t dma_handle);
453void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
454 enum dma_data_direction dir);
455int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
456 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457 unsigned long attrs);
458int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
459 void *cpu_addr, dma_addr_t dma_addr, size_t size,
460 unsigned long attrs);
461bool dma_can_mmap(struct device *dev);
462int dma_supported(struct device *dev, u64 mask);
463int dma_set_mask(struct device *dev, u64 mask);
464int dma_set_coherent_mask(struct device *dev, u64 mask);
465u64 dma_get_required_mask(struct device *dev);
466size_t dma_max_mapping_size(struct device *dev);
467unsigned long dma_get_merge_boundary(struct device *dev);
468#else /* CONFIG_HAS_DMA */
469static inline dma_addr_t dma_map_page_attrs(struct device *dev,
470 struct page *page, size_t offset, size_t size,
471 enum dma_data_direction dir, unsigned long attrs)
472{
473 return DMA_MAPPING_ERROR;
474}
475static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
476 size_t size, enum dma_data_direction dir, unsigned long attrs)
477{
478}
479static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
480 int nents, enum dma_data_direction dir, unsigned long attrs)
481{
482 return 0;
483}
484static inline void dma_unmap_sg_attrs(struct device *dev,
485 struct scatterlist *sg, int nents, enum dma_data_direction dir,
486 unsigned long attrs)
487{
488}
489static inline dma_addr_t dma_map_resource(struct device *dev,
490 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
491 unsigned long attrs)
492{
493 return DMA_MAPPING_ERROR;
494}
495static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
496 size_t size, enum dma_data_direction dir, unsigned long attrs)
497{
498}
499static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
500 size_t size, enum dma_data_direction dir)
501{
502}
503static inline void dma_sync_single_for_device(struct device *dev,
504 dma_addr_t addr, size_t size, enum dma_data_direction dir)
505{
506}
507static inline void dma_sync_sg_for_cpu(struct device *dev,
508 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
509{
510}
511static inline void dma_sync_sg_for_device(struct device *dev,
512 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
513{
514}
515static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
516{
517 return -ENOMEM;
518}
519static inline void *dma_alloc_attrs(struct device *dev, size_t size,
520 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
521{
522 return NULL;
523}
524static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
525 dma_addr_t dma_handle, unsigned long attrs)
526{
527}
528static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
529 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
530{
531 return NULL;
532}
533static inline void dmam_free_coherent(struct device *dev, size_t size,
534 void *vaddr, dma_addr_t dma_handle)
535{
536}
537static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
538 enum dma_data_direction dir)
539{
540}
541static inline int dma_get_sgtable_attrs(struct device *dev,
542 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
543 size_t size, unsigned long attrs)
544{
545 return -ENXIO;
546}
547static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
548 void *cpu_addr, dma_addr_t dma_addr, size_t size,
549 unsigned long attrs)
550{
551 return -ENXIO;
552}
553static inline bool dma_can_mmap(struct device *dev)
554{
555 return false;
556}
557static inline int dma_supported(struct device *dev, u64 mask)
558{
559 return 0;
560}
561static inline int dma_set_mask(struct device *dev, u64 mask)
562{
563 return -EIO;
564}
565static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
566{
567 return -EIO;
568}
569static inline u64 dma_get_required_mask(struct device *dev)
570{
571 return 0;
572}
573static inline size_t dma_max_mapping_size(struct device *dev)
574{
575 return 0;
576}
577static inline unsigned long dma_get_merge_boundary(struct device *dev)
578{
579 return 0;
580}
581#endif /* CONFIG_HAS_DMA */
582
583static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
584 size_t size, enum dma_data_direction dir, unsigned long attrs)
585{
586 debug_dma_map_single(dev, ptr, size);
587 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
588 size, dir, attrs);
589}
590
591static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
592 size_t size, enum dma_data_direction dir, unsigned long attrs)
593{
594 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
595}
596
597static inline void dma_sync_single_range_for_cpu(struct device *dev,
598 dma_addr_t addr, unsigned long offset, size_t size,
599 enum dma_data_direction dir)
600{
601 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
602}
603
604static inline void dma_sync_single_range_for_device(struct device *dev,
605 dma_addr_t addr, unsigned long offset, size_t size,
606 enum dma_data_direction dir)
607{
608 return dma_sync_single_for_device(dev, addr + offset, size, dir);
609}
610
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
612#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
613#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
614#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
615#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
616#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
David Brazdil0f672f62019-12-10 10:32:29 +0000617#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
618#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000619
620extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
David Brazdil0f672f62019-12-10 10:32:29 +0000621 void *cpu_addr, dma_addr_t dma_addr, size_t size,
622 unsigned long attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000623
David Brazdil0f672f62019-12-10 10:32:29 +0000624struct page **dma_common_find_pages(void *cpu_addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000625void *dma_common_contiguous_remap(struct page *page, size_t size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 pgprot_t prot, const void *caller);
627
628void *dma_common_pages_remap(struct page **pages, size_t size,
David Brazdil0f672f62019-12-10 10:32:29 +0000629 pgprot_t prot, const void *caller);
630void dma_common_free_remap(void *cpu_addr, size_t size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000631
David Brazdil0f672f62019-12-10 10:32:29 +0000632bool dma_in_atomic_pool(void *start, size_t size);
633void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
634bool dma_free_from_pool(void *start, size_t size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000635
636int
David Brazdil0f672f62019-12-10 10:32:29 +0000637dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
638 dma_addr_t dma_addr, size_t size, unsigned long attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639
640static inline void *dma_alloc_coherent(struct device *dev, size_t size,
David Brazdil0f672f62019-12-10 10:32:29 +0000641 dma_addr_t *dma_handle, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000642{
David Brazdil0f672f62019-12-10 10:32:29 +0000643
644 return dma_alloc_attrs(dev, size, dma_handle, gfp,
645 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000646}
647
648static inline void dma_free_coherent(struct device *dev, size_t size,
649 void *cpu_addr, dma_addr_t dma_handle)
650{
651 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
652}
653
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000654
655static inline u64 dma_get_mask(struct device *dev)
656{
David Brazdil0f672f62019-12-10 10:32:29 +0000657 if (dev->dma_mask && *dev->dma_mask)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658 return *dev->dma_mask;
659 return DMA_BIT_MASK(32);
660}
661
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662/*
663 * Set both the DMA mask and the coherent DMA mask to the same thing.
664 * Note that we don't check the return value from dma_set_coherent_mask()
665 * as the DMA API guarantees that the coherent DMA mask can be set to
666 * the same or smaller than the streaming DMA mask.
667 */
668static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
669{
670 int rc = dma_set_mask(dev, mask);
671 if (rc == 0)
672 dma_set_coherent_mask(dev, mask);
673 return rc;
674}
675
676/*
677 * Similar to the above, except it deals with the case where the device
678 * does not have dev->dma_mask appropriately setup.
679 */
680static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
681{
682 dev->dma_mask = &dev->coherent_dma_mask;
683 return dma_set_mask_and_coherent(dev, mask);
684}
685
David Brazdil0f672f62019-12-10 10:32:29 +0000686/**
687 * dma_addressing_limited - return if the device is addressing limited
688 * @dev: device to check
689 *
690 * Return %true if the devices DMA mask is too small to address all memory in
691 * the system, else %false. Lack of addressing bits is the prime reason for
692 * bounce buffering, but might not be the only one.
693 */
694static inline bool dma_addressing_limited(struct device *dev)
695{
696 return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
697 dma_get_required_mask(dev);
698}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699
David Brazdil0f672f62019-12-10 10:32:29 +0000700#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
701void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
702 const struct iommu_ops *iommu, bool coherent);
703#else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
David Brazdil0f672f62019-12-10 10:32:29 +0000705 u64 size, const struct iommu_ops *iommu, bool coherent)
706{
707}
708#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000709
David Brazdil0f672f62019-12-10 10:32:29 +0000710#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
711void arch_teardown_dma_ops(struct device *dev);
712#else
713static inline void arch_teardown_dma_ops(struct device *dev)
714{
715}
716#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717
718static inline unsigned int dma_get_max_seg_size(struct device *dev)
719{
720 if (dev->dma_parms && dev->dma_parms->max_segment_size)
721 return dev->dma_parms->max_segment_size;
722 return SZ_64K;
723}
724
David Brazdil0f672f62019-12-10 10:32:29 +0000725static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000726{
727 if (dev->dma_parms) {
728 dev->dma_parms->max_segment_size = size;
729 return 0;
730 }
731 return -EIO;
732}
733
734static inline unsigned long dma_get_seg_boundary(struct device *dev)
735{
736 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
737 return dev->dma_parms->segment_boundary_mask;
738 return DMA_BIT_MASK(32);
739}
740
741static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
742{
743 if (dev->dma_parms) {
744 dev->dma_parms->segment_boundary_mask = mask;
745 return 0;
746 }
747 return -EIO;
748}
749
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750static inline int dma_get_cache_alignment(void)
751{
752#ifdef ARCH_DMA_MINALIGN
753 return ARCH_DMA_MINALIGN;
754#endif
755 return 1;
756}
757
David Brazdil0f672f62019-12-10 10:32:29 +0000758#ifdef CONFIG_DMA_DECLARE_COHERENT
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000760 dma_addr_t device_addr, size_t size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000761#else
762static inline int
763dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
David Brazdil0f672f62019-12-10 10:32:29 +0000764 dma_addr_t device_addr, size_t size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000765{
766 return -ENOSYS;
767}
David Brazdil0f672f62019-12-10 10:32:29 +0000768#endif /* CONFIG_DMA_DECLARE_COHERENT */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000769
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000770static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
David Brazdil0f672f62019-12-10 10:32:29 +0000771 dma_addr_t *dma_handle, gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000772{
David Brazdil0f672f62019-12-10 10:32:29 +0000773 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
774 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000775}
776
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000777static inline void *dma_alloc_wc(struct device *dev, size_t size,
778 dma_addr_t *dma_addr, gfp_t gfp)
779{
David Brazdil0f672f62019-12-10 10:32:29 +0000780 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
781
782 if (gfp & __GFP_NOWARN)
783 attrs |= DMA_ATTR_NO_WARN;
784
785 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000786}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000787
788static inline void dma_free_wc(struct device *dev, size_t size,
789 void *cpu_addr, dma_addr_t dma_addr)
790{
791 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
792 DMA_ATTR_WRITE_COMBINE);
793}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000794
795static inline int dma_mmap_wc(struct device *dev,
796 struct vm_area_struct *vma,
797 void *cpu_addr, dma_addr_t dma_addr,
798 size_t size)
799{
800 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
801 DMA_ATTR_WRITE_COMBINE);
802}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000803
804#ifdef CONFIG_NEED_DMA_MAP_STATE
805#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
806#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
807#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
808#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
809#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
810#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
811#else
812#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
813#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
814#define dma_unmap_addr(PTR, ADDR_NAME) (0)
815#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
816#define dma_unmap_len(PTR, LEN_NAME) (0)
817#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
818#endif
819
820#endif