Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_DMA_DIRECT_H |
| 3 | #define _LINUX_DMA_DIRECT_H 1 |
| 4 | |
| 5 | #include <linux/dma-mapping.h> |
| 6 | #include <linux/mem_encrypt.h> |
| 7 | |
| 8 | #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA |
| 9 | #include <asm/dma-direct.h> |
| 10 | #else |
| 11 | static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 12 | { |
| 13 | dma_addr_t dev_addr = (dma_addr_t)paddr; |
| 14 | |
| 15 | return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); |
| 16 | } |
| 17 | |
| 18 | static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) |
| 19 | { |
| 20 | phys_addr_t paddr = (phys_addr_t)dev_addr; |
| 21 | |
| 22 | return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); |
| 23 | } |
| 24 | |
| 25 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
| 26 | { |
| 27 | if (!dev->dma_mask) |
| 28 | return false; |
| 29 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 30 | return addr + size - 1 <= |
| 31 | min_not_zero(*dev->dma_mask, dev->bus_dma_mask); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 32 | } |
| 33 | #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ |
| 34 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 35 | #ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED |
| 36 | bool force_dma_unencrypted(struct device *dev); |
| 37 | #else |
| 38 | static inline bool force_dma_unencrypted(struct device *dev) |
| 39 | { |
| 40 | return false; |
| 41 | } |
| 42 | #endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ |
| 43 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | /* |
| 45 | * If memory encryption is supported, phys_to_dma will set the memory encryption |
| 46 | * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma |
| 47 | * and __dma_to_phys versions should only be used on non-encrypted memory for |
| 48 | * special occasions like DMA coherent buffers. |
| 49 | */ |
| 50 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 51 | { |
| 52 | return __sme_set(__phys_to_dma(dev, paddr)); |
| 53 | } |
| 54 | |
| 55 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 56 | { |
| 57 | return __sme_clr(__dma_to_phys(dev, daddr)); |
| 58 | } |
| 59 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 60 | u64 dma_direct_get_required_mask(struct device *dev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 62 | gfp_t gfp, unsigned long attrs); |
| 63 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, |
| 64 | dma_addr_t dma_addr, unsigned long attrs); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 65 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 66 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); |
| 67 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
| 68 | dma_addr_t dma_addr, unsigned long attrs); |
| 69 | struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
| 70 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); |
| 71 | void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | int dma_direct_supported(struct device *dev, u64 mask); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | #endif /* _LINUX_DMA_DIRECT_H */ |