David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on linux/arch/arm/mm/dma-mapping.c |
| 4 | * |
| 5 | * Copyright (C) 2000-2004 Russell King |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | |
| 8 | #include <linux/export.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/dma-direct.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11 | #include <linux/dma-map-ops.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include <linux/scatterlist.h> |
| 13 | |
| 14 | #include <asm/cachetype.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/outercache.h> |
| 17 | #include <asm/cp15.h> |
| 18 | |
| 19 | #include "dma.h" |
| 20 | |
| 21 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 22 | * The generic direct mapping code is used if |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | * - MMU/MPU is off |
| 24 | * - cpu is v7m w/o cache support |
| 25 | * - device is coherent |
| 26 | * otherwise arm_nommu_dma_ops is used. |
| 27 | * |
| 28 | * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to |
| 29 | * [1] on how to declare such memory). |
| 30 | * |
| 31 | * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt |
| 32 | */ |
| 33 | |
| 34 | static void *arm_nommu_dma_alloc(struct device *dev, size_t size, |
| 35 | dma_addr_t *dma_handle, gfp_t gfp, |
| 36 | unsigned long attrs) |
| 37 | |
| 38 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 39 | void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * dma_alloc_from_global_coherent() may fail because: |
| 43 | * |
| 44 | * - no consistent DMA region has been defined, so we can't |
| 45 | * continue. |
| 46 | * - there is no space left in consistent DMA region, so we |
| 47 | * only can fallback to generic allocator if we are |
| 48 | * advertised that consistency is not required. |
| 49 | */ |
| 50 | |
| 51 | WARN_ON_ONCE(ret == NULL); |
| 52 | return ret; |
| 53 | } |
| 54 | |
| 55 | static void arm_nommu_dma_free(struct device *dev, size_t size, |
| 56 | void *cpu_addr, dma_addr_t dma_addr, |
| 57 | unsigned long attrs) |
| 58 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 59 | int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 60 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 61 | WARN_ON_ONCE(ret == 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 65 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 66 | unsigned long attrs) |
| 67 | { |
| 68 | int ret; |
| 69 | |
| 70 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) |
| 71 | return ret; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 72 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
| 73 | return ret; |
| 74 | return -ENXIO; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | |
| 78 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, |
| 79 | enum dma_data_direction dir) |
| 80 | { |
| 81 | dmac_map_area(__va(paddr), size, dir); |
| 82 | |
| 83 | if (dir == DMA_FROM_DEVICE) |
| 84 | outer_inv_range(paddr, paddr + size); |
| 85 | else |
| 86 | outer_clean_range(paddr, paddr + size); |
| 87 | } |
| 88 | |
| 89 | static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, |
| 90 | enum dma_data_direction dir) |
| 91 | { |
| 92 | if (dir != DMA_TO_DEVICE) { |
| 93 | outer_inv_range(paddr, paddr + size); |
| 94 | dmac_unmap_area(__va(paddr), size, dir); |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, |
| 99 | unsigned long offset, size_t size, |
| 100 | enum dma_data_direction dir, |
| 101 | unsigned long attrs) |
| 102 | { |
| 103 | dma_addr_t handle = page_to_phys(page) + offset; |
| 104 | |
| 105 | __dma_page_cpu_to_dev(handle, size, dir); |
| 106 | |
| 107 | return handle; |
| 108 | } |
| 109 | |
| 110 | static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, |
| 111 | size_t size, enum dma_data_direction dir, |
| 112 | unsigned long attrs) |
| 113 | { |
| 114 | __dma_page_dev_to_cpu(handle, size, dir); |
| 115 | } |
| 116 | |
| 117 | |
| 118 | static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, |
| 119 | int nents, enum dma_data_direction dir, |
| 120 | unsigned long attrs) |
| 121 | { |
| 122 | int i; |
| 123 | struct scatterlist *sg; |
| 124 | |
| 125 | for_each_sg(sgl, sg, nents, i) { |
| 126 | sg_dma_address(sg) = sg_phys(sg); |
| 127 | sg_dma_len(sg) = sg->length; |
| 128 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); |
| 129 | } |
| 130 | |
| 131 | return nents; |
| 132 | } |
| 133 | |
| 134 | static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, |
| 135 | int nents, enum dma_data_direction dir, |
| 136 | unsigned long attrs) |
| 137 | { |
| 138 | struct scatterlist *sg; |
| 139 | int i; |
| 140 | |
| 141 | for_each_sg(sgl, sg, nents, i) |
| 142 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); |
| 143 | } |
| 144 | |
| 145 | static void arm_nommu_dma_sync_single_for_device(struct device *dev, |
| 146 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 147 | { |
| 148 | __dma_page_cpu_to_dev(handle, size, dir); |
| 149 | } |
| 150 | |
| 151 | static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, |
| 152 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
| 153 | { |
| 154 | __dma_page_cpu_to_dev(handle, size, dir); |
| 155 | } |
| 156 | |
| 157 | static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, |
| 158 | int nents, enum dma_data_direction dir) |
| 159 | { |
| 160 | struct scatterlist *sg; |
| 161 | int i; |
| 162 | |
| 163 | for_each_sg(sgl, sg, nents, i) |
| 164 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); |
| 165 | } |
| 166 | |
| 167 | static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
| 168 | int nents, enum dma_data_direction dir) |
| 169 | { |
| 170 | struct scatterlist *sg; |
| 171 | int i; |
| 172 | |
| 173 | for_each_sg(sgl, sg, nents, i) |
| 174 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); |
| 175 | } |
| 176 | |
| 177 | const struct dma_map_ops arm_nommu_dma_ops = { |
| 178 | .alloc = arm_nommu_dma_alloc, |
| 179 | .free = arm_nommu_dma_free, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 180 | .alloc_pages = dma_direct_alloc_pages, |
| 181 | .free_pages = dma_direct_free_pages, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 182 | .mmap = arm_nommu_dma_mmap, |
| 183 | .map_page = arm_nommu_dma_map_page, |
| 184 | .unmap_page = arm_nommu_dma_unmap_page, |
| 185 | .map_sg = arm_nommu_dma_map_sg, |
| 186 | .unmap_sg = arm_nommu_dma_unmap_sg, |
| 187 | .sync_single_for_device = arm_nommu_dma_sync_single_for_device, |
| 188 | .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, |
| 189 | .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, |
| 190 | .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, |
| 191 | }; |
| 192 | EXPORT_SYMBOL(arm_nommu_dma_ops); |
| 193 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 194 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, |
| 195 | const struct iommu_ops *iommu, bool coherent) |
| 196 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 197 | if (IS_ENABLED(CONFIG_CPU_V7M)) { |
| 198 | /* |
| 199 | * Cache support for v7m is optional, so can be treated as |
| 200 | * coherent if no cache has been detected. Note that it is not |
| 201 | * enough to check if MPU is in use or not since in absense of |
| 202 | * MPU system memory map is used. |
| 203 | */ |
| 204 | dev->archdata.dma_coherent = (cacheid) ? coherent : true; |
| 205 | } else { |
| 206 | /* |
| 207 | * Assume coherent DMA in case MMU/MPU has not been set up. |
| 208 | */ |
| 209 | dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; |
| 210 | } |
| 211 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 212 | if (!dev->archdata.dma_coherent) |
| 213 | set_dma_ops(dev, &arm_nommu_dma_ops); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | } |