Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_DMA_NONCOHERENT_H |
| 3 | #define _LINUX_DMA_NONCOHERENT_H 1 |
| 4 | |
| 5 | #include <linux/dma-mapping.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 6 | #include <asm/pgtable.h> |
| 7 | |
| 8 | #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H |
| 9 | #include <asm/dma-coherence.h> |
| 10 | #elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 11 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
| 12 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
| 13 | static inline bool dev_is_dma_coherent(struct device *dev) |
| 14 | { |
| 15 | return dev->dma_coherent; |
| 16 | } |
| 17 | #else |
| 18 | static inline bool dev_is_dma_coherent(struct device *dev) |
| 19 | { |
| 20 | return true; |
| 21 | } |
| 22 | #endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ |
| 23 | |
| 24 | /* |
| 25 | * Check if an allocation needs to be marked uncached to be coherent. |
| 26 | */ |
| 27 | static __always_inline bool dma_alloc_need_uncached(struct device *dev, |
| 28 | unsigned long attrs) |
| 29 | { |
| 30 | if (dev_is_dma_coherent(dev)) |
| 31 | return false; |
| 32 | if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
| 33 | return false; |
| 34 | if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && |
| 35 | (attrs & DMA_ATTR_NON_CONSISTENT)) |
| 36 | return false; |
| 37 | return true; |
| 38 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 39 | |
| 40 | void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 41 | gfp_t gfp, unsigned long attrs); |
| 42 | void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, |
| 43 | dma_addr_t dma_addr, unsigned long attrs); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 44 | long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, |
| 45 | dma_addr_t dma_addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 46 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 47 | #ifdef CONFIG_MMU |
| 48 | /* |
| 49 | * Page protection so that devices that can't snoop CPU caches can use the |
| 50 | * memory coherently. We default to pgprot_noncached which is usually used |
| 51 | * for ioremap as a safe bet, but architectures can override this with less |
| 52 | * strict semantics if possible. |
| 53 | */ |
| 54 | #ifndef pgprot_dmacoherent |
| 55 | #define pgprot_dmacoherent(prot) pgprot_noncached(prot) |
| 56 | #endif |
| 57 | |
| 58 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 60 | static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, |
| 61 | unsigned long attrs) |
| 62 | { |
| 63 | return prot; /* no protection bits supported without page tables */ |
| 64 | } |
| 65 | #endif /* CONFIG_MMU */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 66 | |
| 67 | #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC |
| 68 | void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 69 | enum dma_data_direction direction); |
| 70 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 71 | static inline void arch_dma_cache_sync(struct device *dev, void *vaddr, |
| 72 | size_t size, enum dma_data_direction direction) |
| 73 | { |
| 74 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ |
| 76 | |
| 77 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE |
| 78 | void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, |
| 79 | size_t size, enum dma_data_direction dir); |
| 80 | #else |
| 81 | static inline void arch_sync_dma_for_device(struct device *dev, |
| 82 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) |
| 83 | { |
| 84 | } |
| 85 | #endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */ |
| 86 | |
| 87 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU |
| 88 | void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, |
| 89 | size_t size, enum dma_data_direction dir); |
| 90 | #else |
| 91 | static inline void arch_sync_dma_for_cpu(struct device *dev, |
| 92 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) |
| 93 | { |
| 94 | } |
| 95 | #endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */ |
| 96 | |
| 97 | #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL |
| 98 | void arch_sync_dma_for_cpu_all(struct device *dev); |
| 99 | #else |
| 100 | static inline void arch_sync_dma_for_cpu_all(struct device *dev) |
| 101 | { |
| 102 | } |
| 103 | #endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */ |
| 104 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 105 | #ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT |
| 106 | void arch_dma_prep_coherent(struct page *page, size_t size); |
| 107 | #else |
| 108 | static inline void arch_dma_prep_coherent(struct page *page, size_t size) |
| 109 | { |
| 110 | } |
| 111 | #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ |
| 112 | |
| 113 | void *uncached_kernel_address(void *addr); |
| 114 | void *cached_kernel_address(void *addr); |
| 115 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | #endif /* _LINUX_DMA_NONCOHERENT_H */ |