Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Internals of the DMA direct mapping implementation. Only for use by the |
| 4 | * DMA mapping code and IOMMU drivers. |
| 5 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | #ifndef _LINUX_DMA_DIRECT_H |
| 7 | #define _LINUX_DMA_DIRECT_H 1 |
| 8 | |
| 9 | #include <linux/dma-mapping.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 10 | #include <linux/dma-map-ops.h> |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 11 | #include <linux/memblock.h> /* for min_low_pfn */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include <linux/mem_encrypt.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 13 | #include <linux/swiotlb.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 14 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 15 | extern unsigned int zone_dma_bits; |
| 16 | |
| 17 | /* |
| 18 | * Record the mapping of CPU physical to DMA addresses for a given region. |
| 19 | */ |
| 20 | struct bus_dma_region { |
| 21 | phys_addr_t cpu_start; |
| 22 | dma_addr_t dma_start; |
| 23 | u64 size; |
| 24 | u64 offset; |
| 25 | }; |
| 26 | |
| 27 | static inline dma_addr_t translate_phys_to_dma(struct device *dev, |
| 28 | phys_addr_t paddr) |
| 29 | { |
| 30 | const struct bus_dma_region *m; |
| 31 | |
| 32 | for (m = dev->dma_range_map; m->size; m++) |
| 33 | if (paddr >= m->cpu_start && paddr - m->cpu_start < m->size) |
| 34 | return (dma_addr_t)paddr - m->offset; |
| 35 | |
| 36 | /* make sure dma_capable fails when no translation is available */ |
| 37 | return DMA_MAPPING_ERROR; |
| 38 | } |
| 39 | |
| 40 | static inline phys_addr_t translate_dma_to_phys(struct device *dev, |
| 41 | dma_addr_t dma_addr) |
| 42 | { |
| 43 | const struct bus_dma_region *m; |
| 44 | |
| 45 | for (m = dev->dma_range_map; m->size; m++) |
| 46 | if (dma_addr >= m->dma_start && dma_addr - m->dma_start < m->size) |
| 47 | return (phys_addr_t)dma_addr + m->offset; |
| 48 | |
| 49 | return (phys_addr_t)-1; |
| 50 | } |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 51 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA |
| 53 | #include <asm/dma-direct.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 54 | #ifndef phys_to_dma_unencrypted |
| 55 | #define phys_to_dma_unencrypted phys_to_dma |
| 56 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 57 | #else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 58 | static inline dma_addr_t phys_to_dma_unencrypted(struct device *dev, |
| 59 | phys_addr_t paddr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 61 | if (dev->dma_range_map) |
| 62 | return translate_phys_to_dma(dev, paddr); |
| 63 | return paddr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | } |
| 65 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 66 | /* |
| 67 | * If memory encryption is supported, phys_to_dma will set the memory encryption |
| 68 | * bit in the DMA address, and dma_to_phys will clear it. |
| 69 | * phys_to_dma_unencrypted is for use on special unencrypted memory like swiotlb |
| 70 | * buffers. |
| 71 | */ |
| 72 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 74 | return __sme_set(phys_to_dma_unencrypted(dev, paddr)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | } |
| 76 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 77 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 79 | phys_addr_t paddr; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 80 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 81 | if (dev->dma_range_map) |
| 82 | paddr = translate_dma_to_phys(dev, dma_addr); |
| 83 | else |
| 84 | paddr = dma_addr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 86 | return __sme_clr(paddr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | } |
| 88 | #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ |
| 89 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 90 | #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED |
| 91 | bool force_dma_unencrypted(struct device *dev); |
| 92 | #else |
| 93 | static inline bool force_dma_unencrypted(struct device *dev) |
| 94 | { |
| 95 | return false; |
| 96 | } |
| 97 | #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ |
| 98 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 99 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, |
| 100 | bool is_ram) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 102 | dma_addr_t end = addr + size - 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 103 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 104 | if (addr == DMA_MAPPING_ERROR) |
| 105 | return false; |
| 106 | if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && |
| 107 | min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) |
| 108 | return false; |
| 109 | |
| 110 | return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | } |
| 112 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 113 | u64 dma_direct_get_required_mask(struct device *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 114 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 115 | gfp_t gfp, unsigned long attrs); |
| 116 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
| 117 | dma_addr_t dma_addr, unsigned long attrs); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 118 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 119 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); |
| 120 | void dma_direct_free_pages(struct device *dev, size_t size, |
| 121 | struct page *page, dma_addr_t dma_addr, |
| 122 | enum dma_data_direction dir); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | int dma_direct_supported(struct device *dev, u64 mask); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 124 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
| 125 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
| 126 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 127 | #endif /* _LINUX_DMA_DIRECT_H */ |