blob: 06c111544f61d630c79d722111e8b0e573de8782 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
Olivier Deprez157378f2022-04-04 15:47:50 +02003 * Copyright (C) 2018-2020 Christoph Hellwig.
David Brazdil0f672f62019-12-10 10:32:29 +00004 *
5 * DMA operations that map physical memory directly without using an IOMMU.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
David Brazdil0f672f62019-12-10 10:32:29 +00007#include <linux/memblock.h> /* for max_pfn */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008#include <linux/export.h>
9#include <linux/mm.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020010#include <linux/dma-map-ops.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <linux/scatterlist.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/pfn.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <linux/vmalloc.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <linux/set_memory.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020015#include <linux/slab.h>
16#include "direct.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000017
18/*
Olivier Deprez157378f2022-04-04 15:47:50 +020019 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
20 * it for entirely different regions. In that case the arch code needs to
21 * override the variable below for dma-direct to work properly.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022 */
Olivier Deprez157378f2022-04-04 15:47:50 +020023unsigned int zone_dma_bits __ro_after_init = 24;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
David Brazdil0f672f62019-12-10 10:32:29 +000025static inline dma_addr_t phys_to_dma_direct(struct device *dev,
26 phys_addr_t phys)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027{
David Brazdil0f672f62019-12-10 10:32:29 +000028 if (force_dma_unencrypted(dev))
Olivier Deprez157378f2022-04-04 15:47:50 +020029 return phys_to_dma_unencrypted(dev, phys);
David Brazdil0f672f62019-12-10 10:32:29 +000030 return phys_to_dma(dev, phys);
31}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000032
Olivier Deprez157378f2022-04-04 15:47:50 +020033static inline struct page *dma_direct_to_page(struct device *dev,
34 dma_addr_t dma_addr)
35{
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
37}
38
David Brazdil0f672f62019-12-10 10:32:29 +000039u64 dma_direct_get_required_mask(struct device *dev)
40{
Olivier Deprez0e641232021-09-23 10:07:05 +020041 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
42 u64 max_dma = phys_to_dma_direct(dev, phys);
David Brazdil0f672f62019-12-10 10:32:29 +000043
44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
45}
46
Olivier Deprez157378f2022-04-04 15:47:50 +020047static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
48 u64 *phys_limit)
David Brazdil0f672f62019-12-10 10:32:29 +000049{
Olivier Deprez157378f2022-04-04 15:47:50 +020050 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
David Brazdil0f672f62019-12-10 10:32:29 +000051
52 /*
53 * Optimistically try the zone that the physical address mask falls
54 * into first. If that returns memory that isn't actually addressable
55 * we will fallback to the next lower zone and try again.
56 *
57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
58 * zones.
59 */
Olivier Deprez157378f2022-04-04 15:47:50 +020060 *phys_limit = dma_to_phys(dev, dma_limit);
61 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
David Brazdil0f672f62019-12-10 10:32:29 +000062 return GFP_DMA;
Olivier Deprez157378f2022-04-04 15:47:50 +020063 if (*phys_limit <= DMA_BIT_MASK(32))
David Brazdil0f672f62019-12-10 10:32:29 +000064 return GFP_DMA32;
65 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066}
67
68static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
69{
Olivier Deprez157378f2022-04-04 15:47:50 +020070 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
71
72 if (dma_addr == DMA_MAPPING_ERROR)
73 return false;
74 return dma_addr + size - 1 <=
75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000076}
77
Olivier Deprez157378f2022-04-04 15:47:50 +020078static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
79 gfp_t gfp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000080{
David Brazdil0f672f62019-12-10 10:32:29 +000081 int node = dev_to_node(dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000082 struct page *page = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +020083 u64 phys_limit;
David Brazdil0f672f62019-12-10 10:32:29 +000084
Olivier Deprez157378f2022-04-04 15:47:50 +020085 WARN_ON_ONCE(!PAGE_ALIGNED(size));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086
Olivier Deprez157378f2022-04-04 15:47:50 +020087 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
88 &phys_limit);
89 page = dma_alloc_contiguous(dev, size, gfp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
Olivier Deprez157378f2022-04-04 15:47:50 +020091 dma_free_contiguous(dev, page, size);
David Brazdil0f672f62019-12-10 10:32:29 +000092 page = NULL;
93 }
94again:
95 if (!page)
Olivier Deprez157378f2022-04-04 15:47:50 +020096 page = alloc_pages_node(node, gfp, get_order(size));
David Brazdil0f672f62019-12-10 10:32:29 +000097 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
98 dma_free_contiguous(dev, page, size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 page = NULL;
100
101 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
Olivier Deprez157378f2022-04-04 15:47:50 +0200102 phys_limit < DMA_BIT_MASK(64) &&
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103 !(gfp & (GFP_DMA32 | GFP_DMA))) {
104 gfp |= GFP_DMA32;
105 goto again;
106 }
107
David Brazdil0f672f62019-12-10 10:32:29 +0000108 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000109 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
110 goto again;
111 }
112 }
113
David Brazdil0f672f62019-12-10 10:32:29 +0000114 return page;
115}
116
Olivier Deprez157378f2022-04-04 15:47:50 +0200117static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
118 dma_addr_t *dma_handle, gfp_t gfp)
119{
120 struct page *page;
121 u64 phys_mask;
122 void *ret;
123
124 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
125 &phys_mask);
126 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
127 if (!page)
128 return NULL;
129 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
130 return ret;
131}
132
133void *dma_direct_alloc(struct device *dev, size_t size,
David Brazdil0f672f62019-12-10 10:32:29 +0000134 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
135{
136 struct page *page;
137 void *ret;
Olivier Deprez157378f2022-04-04 15:47:50 +0200138 int err;
David Brazdil0f672f62019-12-10 10:32:29 +0000139
Olivier Deprez157378f2022-04-04 15:47:50 +0200140 size = PAGE_ALIGN(size);
141 if (attrs & DMA_ATTR_NO_WARN)
142 gfp |= __GFP_NOWARN;
David Brazdil0f672f62019-12-10 10:32:29 +0000143
144 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
145 !force_dma_unencrypted(dev)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200146 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
147 if (!page)
148 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000149 /* remove any dirty cache lines on the kernel alias */
150 if (!PageHighMem(page))
151 arch_dma_prep_coherent(page, size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200152 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
David Brazdil0f672f62019-12-10 10:32:29 +0000153 /* return the page pointer as the opaque cookie */
154 return page;
155 }
156
Olivier Deprez157378f2022-04-04 15:47:50 +0200157 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
158 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
159 !dev_is_dma_coherent(dev))
160 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
161
162 /*
163 * Remapping or decrypting memory may block. If either is required and
164 * we can't block, allocate the memory from the atomic pools.
165 */
166 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
167 !gfpflags_allow_blocking(gfp) &&
168 (force_dma_unencrypted(dev) ||
169 (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !dev_is_dma_coherent(dev))))
170 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
171
172 /* we always manually zero the memory once we are done */
173 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
174 if (!page)
175 return NULL;
176
177 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
178 !dev_is_dma_coherent(dev)) ||
179 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
180 /* remove any dirty cache lines on the kernel alias */
181 arch_dma_prep_coherent(page, size);
182
183 /* create a coherent mapping */
184 ret = dma_common_contiguous_remap(page, size,
185 dma_pgprot(dev, PAGE_KERNEL, attrs),
186 __builtin_return_address(0));
187 if (!ret)
188 goto out_free_pages;
189 if (force_dma_unencrypted(dev)) {
190 err = set_memory_decrypted((unsigned long)ret,
191 1 << get_order(size));
192 if (err)
193 goto out_free_pages;
194 }
195 memset(ret, 0, size);
196 goto done;
197 }
198
David Brazdil0f672f62019-12-10 10:32:29 +0000199 if (PageHighMem(page)) {
200 /*
201 * Depending on the cma= arguments and per-arch setup
202 * dma_alloc_contiguous could return highmem pages.
203 * Without remapping there is no way to return them here,
204 * so log an error and fail.
205 */
206 dev_info(dev, "Rejecting highmem page from CMA.\n");
Olivier Deprez157378f2022-04-04 15:47:50 +0200207 goto out_free_pages;
David Brazdil0f672f62019-12-10 10:32:29 +0000208 }
209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210 ret = page_address(page);
David Brazdil0f672f62019-12-10 10:32:29 +0000211 if (force_dma_unencrypted(dev)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200212 err = set_memory_decrypted((unsigned long)ret,
213 1 << get_order(size));
214 if (err)
215 goto out_free_pages;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200217
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 memset(ret, 0, size);
David Brazdil0f672f62019-12-10 10:32:29 +0000219
Olivier Deprez157378f2022-04-04 15:47:50 +0200220 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
221 !dev_is_dma_coherent(dev)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000222 arch_dma_prep_coherent(page, size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200223 ret = arch_dma_set_uncached(ret, size);
224 if (IS_ERR(ret))
225 goto out_encrypt_pages;
David Brazdil0f672f62019-12-10 10:32:29 +0000226 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200227done:
228 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000229 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000230
Olivier Deprez157378f2022-04-04 15:47:50 +0200231out_encrypt_pages:
232 if (force_dma_unencrypted(dev)) {
233 err = set_memory_encrypted((unsigned long)page_address(page),
234 1 << get_order(size));
235 /* If memory cannot be re-encrypted, it must be leaked */
236 if (err)
237 return NULL;
238 }
239out_free_pages:
David Brazdil0f672f62019-12-10 10:32:29 +0000240 dma_free_contiguous(dev, page, size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200241 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000242}
243
Olivier Deprez157378f2022-04-04 15:47:50 +0200244void dma_direct_free(struct device *dev, size_t size,
245 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000247 unsigned int page_order = get_order(size);
248
David Brazdil0f672f62019-12-10 10:32:29 +0000249 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
250 !force_dma_unencrypted(dev)) {
251 /* cpu_addr is a struct page cookie, not a kernel address */
Olivier Deprez157378f2022-04-04 15:47:50 +0200252 dma_free_contiguous(dev, cpu_addr, size);
David Brazdil0f672f62019-12-10 10:32:29 +0000253 return;
254 }
255
Olivier Deprez157378f2022-04-04 15:47:50 +0200256 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
257 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
258 !dev_is_dma_coherent(dev)) {
259 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
260 return;
261 }
262
263 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
264 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
265 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
266 return;
267
David Brazdil0f672f62019-12-10 10:32:29 +0000268 if (force_dma_unencrypted(dev))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
David Brazdil0f672f62019-12-10 10:32:29 +0000270
Olivier Deprez157378f2022-04-04 15:47:50 +0200271 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
272 vunmap(cpu_addr);
273 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
274 arch_dma_clear_uncached(cpu_addr, size);
275
276 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
David Brazdil0f672f62019-12-10 10:32:29 +0000277}
278
Olivier Deprez157378f2022-04-04 15:47:50 +0200279struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
280 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
David Brazdil0f672f62019-12-10 10:32:29 +0000281{
Olivier Deprez157378f2022-04-04 15:47:50 +0200282 struct page *page;
283 void *ret;
284
285 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
286 force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp))
287 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
288
289 page = __dma_direct_alloc_pages(dev, size, gfp);
290 if (!page)
291 return NULL;
292 if (PageHighMem(page)) {
293 /*
294 * Depending on the cma= arguments and per-arch setup
295 * dma_alloc_contiguous could return highmem pages.
296 * Without remapping there is no way to return them here,
297 * so log an error and fail.
298 */
299 dev_info(dev, "Rejecting highmem page from CMA.\n");
300 goto out_free_pages;
301 }
302
303 ret = page_address(page);
304 if (force_dma_unencrypted(dev)) {
305 if (set_memory_decrypted((unsigned long)ret,
306 1 << get_order(size)))
307 goto out_free_pages;
308 }
309 memset(ret, 0, size);
310 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
311 return page;
312out_free_pages:
313 dma_free_contiguous(dev, page, size);
314 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000315}
316
Olivier Deprez157378f2022-04-04 15:47:50 +0200317void dma_direct_free_pages(struct device *dev, size_t size,
318 struct page *page, dma_addr_t dma_addr,
319 enum dma_data_direction dir)
David Brazdil0f672f62019-12-10 10:32:29 +0000320{
Olivier Deprez157378f2022-04-04 15:47:50 +0200321 unsigned int page_order = get_order(size);
322 void *vaddr = page_address(page);
323
324 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
325 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
326 dma_free_from_pool(dev, vaddr, size))
327 return;
328
329 if (force_dma_unencrypted(dev))
330 set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
331
332 dma_free_contiguous(dev, page, size);
David Brazdil0f672f62019-12-10 10:32:29 +0000333}
334
335#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
336 defined(CONFIG_SWIOTLB)
David Brazdil0f672f62019-12-10 10:32:29 +0000337void dma_direct_sync_sg_for_device(struct device *dev,
338 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
339{
340 struct scatterlist *sg;
341 int i;
342
343 for_each_sg(sgl, sg, nents, i) {
344 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
345
346 if (unlikely(is_swiotlb_buffer(paddr)))
347 swiotlb_tbl_sync_single(dev, paddr, sg->length,
348 dir, SYNC_FOR_DEVICE);
349
350 if (!dev_is_dma_coherent(dev))
Olivier Deprez157378f2022-04-04 15:47:50 +0200351 arch_sync_dma_for_device(paddr, sg->length,
David Brazdil0f672f62019-12-10 10:32:29 +0000352 dir);
353 }
354}
David Brazdil0f672f62019-12-10 10:32:29 +0000355#endif
356
357#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
358 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
359 defined(CONFIG_SWIOTLB)
David Brazdil0f672f62019-12-10 10:32:29 +0000360void dma_direct_sync_sg_for_cpu(struct device *dev,
361 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
362{
363 struct scatterlist *sg;
364 int i;
365
366 for_each_sg(sgl, sg, nents, i) {
367 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
368
369 if (!dev_is_dma_coherent(dev))
Olivier Deprez157378f2022-04-04 15:47:50 +0200370 arch_sync_dma_for_cpu(paddr, sg->length, dir);
David Brazdil0f672f62019-12-10 10:32:29 +0000371
372 if (unlikely(is_swiotlb_buffer(paddr)))
373 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
374 SYNC_FOR_CPU);
Olivier Deprez157378f2022-04-04 15:47:50 +0200375
376 if (dir == DMA_FROM_DEVICE)
377 arch_dma_mark_clean(paddr, sg->length);
David Brazdil0f672f62019-12-10 10:32:29 +0000378 }
379
380 if (!dev_is_dma_coherent(dev))
Olivier Deprez157378f2022-04-04 15:47:50 +0200381 arch_sync_dma_for_cpu_all();
David Brazdil0f672f62019-12-10 10:32:29 +0000382}
David Brazdil0f672f62019-12-10 10:32:29 +0000383
384void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
385 int nents, enum dma_data_direction dir, unsigned long attrs)
386{
387 struct scatterlist *sg;
388 int i;
389
390 for_each_sg(sgl, sg, nents, i)
391 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
392 attrs);
393}
David Brazdil0f672f62019-12-10 10:32:29 +0000394#endif
395
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
397 enum dma_data_direction dir, unsigned long attrs)
398{
399 int i;
400 struct scatterlist *sg;
401
402 for_each_sg(sgl, sg, nents, i) {
David Brazdil0f672f62019-12-10 10:32:29 +0000403 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
404 sg->offset, sg->length, dir, attrs);
405 if (sg->dma_address == DMA_MAPPING_ERROR)
406 goto out_unmap;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000407 sg_dma_len(sg) = sg->length;
408 }
409
410 return nents;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000411
David Brazdil0f672f62019-12-10 10:32:29 +0000412out_unmap:
413 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
414 return 0;
415}
David Brazdil0f672f62019-12-10 10:32:29 +0000416
417dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
418 size_t size, enum dma_data_direction dir, unsigned long attrs)
419{
420 dma_addr_t dma_addr = paddr;
421
Olivier Deprez157378f2022-04-04 15:47:50 +0200422 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
423 dev_err_once(dev,
424 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
425 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
426 WARN_ON_ONCE(1);
David Brazdil0f672f62019-12-10 10:32:29 +0000427 return DMA_MAPPING_ERROR;
428 }
429
430 return dma_addr;
431}
David Brazdil0f672f62019-12-10 10:32:29 +0000432
Olivier Deprez157378f2022-04-04 15:47:50 +0200433int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
434 void *cpu_addr, dma_addr_t dma_addr, size_t size,
435 unsigned long attrs)
436{
437 struct page *page = dma_direct_to_page(dev, dma_addr);
438 int ret;
439
440 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
441 if (!ret)
442 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
443 return ret;
444}
445
446bool dma_direct_can_mmap(struct device *dev)
447{
448 return dev_is_dma_coherent(dev) ||
449 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
450}
451
452int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
453 void *cpu_addr, dma_addr_t dma_addr, size_t size,
454 unsigned long attrs)
455{
456 unsigned long user_count = vma_pages(vma);
457 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
458 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
459 int ret = -ENXIO;
460
461 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
462
463 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
464 return ret;
465
466 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
467 return -ENXIO;
468 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
469 user_count << PAGE_SHIFT, vma->vm_page_prot);
470}
471
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472int dma_direct_supported(struct device *dev, u64 mask)
473{
Olivier Deprez157378f2022-04-04 15:47:50 +0200474 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
David Brazdil0f672f62019-12-10 10:32:29 +0000475
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476 /*
Olivier Deprez157378f2022-04-04 15:47:50 +0200477 * Because 32-bit DMA masks are so common we expect every architecture
478 * to be able to satisfy them - either by not supporting more physical
479 * memory, or by providing a ZONE_DMA32. If neither is the case, the
480 * architecture needs to use an IOMMU instead of the direct mapping.
481 */
482 if (mask >= DMA_BIT_MASK(32))
483 return 1;
484
485 /*
486 * This check needs to be against the actual bit mask value, so use
487 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
David Brazdil0f672f62019-12-10 10:32:29 +0000488 * part of the check.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200490 if (IS_ENABLED(CONFIG_ZONE_DMA))
491 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
492 return mask >= phys_to_dma_unencrypted(dev, min_mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000493}
494
David Brazdil0f672f62019-12-10 10:32:29 +0000495size_t dma_direct_max_mapping_size(struct device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000496{
David Brazdil0f672f62019-12-10 10:32:29 +0000497 /* If SWIOTLB is active, use its maximum mapping size */
498 if (is_swiotlb_active() &&
499 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
500 return swiotlb_max_mapping_size(dev);
501 return SIZE_MAX;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000502}
Olivier Deprez157378f2022-04-04 15:47:50 +0200503
504bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
505{
506 return !dev_is_dma_coherent(dev) ||
507 is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
508}
509
510/**
511 * dma_direct_set_offset - Assign scalar offset for a single DMA range.
512 * @dev: device pointer; needed to "own" the alloced memory.
513 * @cpu_start: beginning of memory region covered by this offset.
514 * @dma_start: beginning of DMA/PCI region covered by this offset.
515 * @size: size of the region.
516 *
517 * This is for the simple case of a uniform offset which cannot
518 * be discovered by "dma-ranges".
519 *
520 * It returns -ENOMEM if out of memory, -EINVAL if a map
521 * already exists, 0 otherwise.
522 *
523 * Note: any call to this from a driver is a bug. The mapping needs
524 * to be described by the device tree or other firmware interfaces.
525 */
526int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
527 dma_addr_t dma_start, u64 size)
528{
529 struct bus_dma_region *map;
530 u64 offset = (u64)cpu_start - (u64)dma_start;
531
532 if (dev->dma_range_map) {
533 dev_err(dev, "attempt to add DMA range to existing map\n");
534 return -EINVAL;
535 }
536
537 if (!offset)
538 return 0;
539
540 map = kcalloc(2, sizeof(*map), GFP_KERNEL);
541 if (!map)
542 return -ENOMEM;
543 map[0].cpu_start = cpu_start;
544 map[0].dma_start = dma_start;
545 map[0].offset = offset;
546 map[0].size = size;
547 dev->dma_range_map = map;
548 return 0;
549}
550EXPORT_SYMBOL_GPL(dma_direct_set_offset);