David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | #include <linux/cpu.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3 | #include <linux/dma-direct.h> |
| 4 | #include <linux/dma-map-ops.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | #include <linux/gfp.h> |
| 6 | #include <linux/highmem.h> |
| 7 | #include <linux/export.h> |
| 8 | #include <linux/memblock.h> |
| 9 | #include <linux/of_address.h> |
| 10 | #include <linux/slab.h> |
| 11 | #include <linux/types.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | #include <linux/vmalloc.h> |
| 13 | #include <linux/swiotlb.h> |
| 14 | |
| 15 | #include <xen/xen.h> |
| 16 | #include <xen/interface/grant_table.h> |
| 17 | #include <xen/interface/memory.h> |
| 18 | #include <xen/page.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 19 | #include <xen/xen-ops.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | #include <xen/swiotlb-xen.h> |
| 21 | |
| 22 | #include <asm/cacheflush.h> |
| 23 | #include <asm/xen/hypercall.h> |
| 24 | #include <asm/xen/interface.h> |
| 25 | |
| 26 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
| 27 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 28 | phys_addr_t base; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 29 | gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 30 | u64 i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 32 | for_each_mem_range(i, &base, NULL) { |
| 33 | if (base < (phys_addr_t)0xffffffff) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 34 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
| 35 | flags |= __GFP_DMA32; |
| 36 | else |
| 37 | flags |= __GFP_DMA; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | break; |
| 39 | } |
| 40 | } |
| 41 | return __get_free_pages(flags, order); |
| 42 | } |
| 43 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | static bool hypercall_cflush = false; |
| 45 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 46 | /* buffers in highmem or foreign pages cannot cross page boundaries */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 47 | static void dma_cache_maint(struct device *dev, dma_addr_t handle, |
| 48 | size_t size, u32 op) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 49 | { |
| 50 | struct gnttab_cache_flush cflush; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 52 | cflush.offset = xen_offset_in_page(handle); |
| 53 | cflush.op = op; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 54 | handle &= XEN_PAGE_MASK; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | |
| 56 | do { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 57 | cflush.a.dev_bus_addr = dma_to_phys(dev, handle); |
| 58 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 59 | if (size + cflush.offset > XEN_PAGE_SIZE) |
| 60 | cflush.length = XEN_PAGE_SIZE - cflush.offset; |
| 61 | else |
| 62 | cflush.length = size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 63 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 64 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 66 | cflush.offset = 0; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 67 | handle += cflush.length; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 68 | size -= cflush.length; |
| 69 | } while (size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | } |
| 71 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 72 | /* |
| 73 | * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen |
| 74 | * pages, it is not possible for it to contain a mix of local and foreign Xen |
| 75 | * pages. Calling pfn_valid on a foreign mfn will always return false, so if |
| 76 | * pfn_valid returns true the pages is local and we can use the native |
| 77 | * dma-direct functions, otherwise we call the Xen specific version. |
| 78 | */ |
| 79 | void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 80 | size_t size, enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 82 | if (dir != DMA_TO_DEVICE) |
| 83 | dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | } |
| 85 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 86 | void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 87 | size_t size, enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 88 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 89 | if (dir == DMA_FROM_DEVICE) |
| 90 | dma_cache_maint(dev, handle, size, GNTTAB_CACHE_INVAL); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 91 | else |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 92 | dma_cache_maint(dev, handle, size, GNTTAB_CACHE_CLEAN); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | bool xen_arch_need_swiotlb(struct device *dev, |
| 96 | phys_addr_t phys, |
| 97 | dma_addr_t dev_addr) |
| 98 | { |
| 99 | unsigned int xen_pfn = XEN_PFN_DOWN(phys); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 100 | unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | |
| 102 | /* |
| 103 | * The swiotlb buffer should be used if |
| 104 | * - Xen doesn't have the cache flush hypercall |
| 105 | * - The Linux page refers to foreign memory |
| 106 | * - The device doesn't support coherent DMA request |
| 107 | * |
| 108 | * The Linux page may be spanned acrros multiple Xen page, although |
| 109 | * it's not possible to have a mix of local and foreign Xen page. |
| 110 | * Furthermore, range_straddles_page_boundary is already checking |
| 111 | * if buffer is physically contiguous in the host RAM. |
| 112 | * |
| 113 | * Therefore we only need to check the first Xen page to know if we |
| 114 | * require a bounce buffer because the device doesn't support coherent |
| 115 | * memory and we are not able to flush the cache. |
| 116 | */ |
| 117 | return (!hypercall_cflush && (xen_pfn != bfn) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 118 | !dev_is_dma_coherent(dev)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
| 122 | unsigned int address_bits, |
| 123 | dma_addr_t *dma_handle) |
| 124 | { |
| 125 | if (!xen_initial_domain()) |
| 126 | return -EINVAL; |
| 127 | |
| 128 | /* we assume that dom0 is mapped 1:1 for now */ |
| 129 | *dma_handle = pstart; |
| 130 | return 0; |
| 131 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | |
| 133 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
| 134 | { |
| 135 | return; |
| 136 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 138 | static int __init xen_mm_init(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | { |
| 140 | struct gnttab_cache_flush cflush; |
| 141 | if (!xen_initial_domain()) |
| 142 | return 0; |
| 143 | xen_swiotlb_init(1, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 144 | |
| 145 | cflush.op = 0; |
| 146 | cflush.a.dev_bus_addr = 0; |
| 147 | cflush.offset = 0; |
| 148 | cflush.length = 0; |
| 149 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) |
| 150 | hypercall_cflush = true; |
| 151 | return 0; |
| 152 | } |
| 153 | arch_initcall(xen_mm_init); |