blob: 18aade195884d7b3e3899dc89175e300f58434e0 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
Olivier Deprez157378f2022-04-04 15:47:50 +02002/*
3 * Internals of the DMA direct mapping implementation. Only for use by the
4 * DMA mapping code and IOMMU drivers.
5 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006#ifndef _LINUX_DMA_DIRECT_H
7#define _LINUX_DMA_DIRECT_H 1
8
9#include <linux/dma-mapping.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020010#include <linux/dma-map-ops.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020011#include <linux/memblock.h> /* for min_low_pfn */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/mem_encrypt.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020013#include <linux/swiotlb.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014
Olivier Deprez157378f2022-04-04 15:47:50 +020015extern unsigned int zone_dma_bits;
16
17/*
18 * Record the mapping of CPU physical to DMA addresses for a given region.
19 */
20struct bus_dma_region {
21 phys_addr_t cpu_start;
22 dma_addr_t dma_start;
23 u64 size;
24 u64 offset;
25};
26
27static inline dma_addr_t translate_phys_to_dma(struct device *dev,
28 phys_addr_t paddr)
29{
30 const struct bus_dma_region *m;
31
32 for (m = dev->dma_range_map; m->size; m++)
33 if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size)
34 return (dma_addr_t)paddr - m->offset;
35
36 /* make sure dma_capable fails when no translation is available */
37 return DMA_MAPPING_ERROR;
38}
39
40static inline phys_addr_t translate_dma_to_phys(struct device *dev,
41 dma_addr_t dma_addr)
42{
43 const struct bus_dma_region *m;
44
45 for (m = dev->dma_range_map; m->size; m++)
46 if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size)
47 return (phys_addr_t)dma_addr + m->offset;
48
49 return (phys_addr_t)-1;
50}
Olivier Deprez0e641232021-09-23 10:07:05 +020051
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
53#include <asm/dma-direct.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020054#ifndef phys_to_dma_unencrypted
55#define phys_to_dma_unencrypted phys_to_dma
56#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057#else
Olivier Deprez157378f2022-04-04 15:47:50 +020058static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev,
59 phys_addr_t paddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000060{
Olivier Deprez157378f2022-04-04 15:47:50 +020061 if (dev->dma_range_map)
62 return translate_phys_to_dma(dev, paddr);
63 return paddr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000064}
65
Olivier Deprez157378f2022-04-04 15:47:50 +020066/*
67 * If memory encryption is supported, phys_to_dma will set the memory encryption
68 * bit in the DMA address, and dma_to_phys will clear it.
69 * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb
70 * buffers.
71 */
72static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073{
Olivier Deprez157378f2022-04-04 15:47:50 +020074 return __sme_set(phys_to_dma_unencrypted(dev, paddr));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000075}
76
Olivier Deprez157378f2022-04-04 15:47:50 +020077static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078{
Olivier Deprez157378f2022-04-04 15:47:50 +020079 phys_addr_t paddr;
Olivier Deprez0e641232021-09-23 10:07:05 +020080
Olivier Deprez157378f2022-04-04 15:47:50 +020081 if (dev->dma_range_map)
82 paddr = translate_dma_to_phys(dev, dma_addr);
83 else
84 paddr = dma_addr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000085
Olivier Deprez157378f2022-04-04 15:47:50 +020086 return __sme_clr(paddr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087}
88#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
89
David Brazdil0f672f62019-12-10 10:32:29 +000090#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
91bool force_dma_unencrypted(struct device *dev);
92#else
93static inline bool force_dma_unencrypted(struct device *dev)
94{
95 return false;
96}
97#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
98
Olivier Deprez157378f2022-04-04 15:47:50 +020099static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
100 bool is_ram)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101{
Olivier Deprez157378f2022-04-04 15:47:50 +0200102 dma_addr_t end = addr + size - 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000103
Olivier Deprez157378f2022-04-04 15:47:50 +0200104 if (addr == DMA_MAPPING_ERROR)
105 return false;
106 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
107 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
108 return false;
109
110 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000111}
112
David Brazdil0f672f62019-12-10 10:32:29 +0000113u64 dma_direct_get_required_mask(struct device *dev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
115 gfp_t gfp, unsigned long attrs);
116void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
117 dma_addr_t dma_addr, unsigned long attrs);
Olivier Deprez157378f2022-04-04 15:47:50 +0200118struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
119 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
120void dma_direct_free_pages(struct device *dev, size_t size,
121 struct page *page, dma_addr_t dma_addr,
122 enum dma_data_direction dir);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123int dma_direct_supported(struct device *dev, u64 mask);
Olivier Deprez157378f2022-04-04 15:47:50 +0200124dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
125 size_t size, enum dma_data_direction dir, unsigned long attrs);
126
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127#endif /* _LINUX_DMA_DIRECT_H */