David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | #include <linux/cpu.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 3 | #include <linux/dma-noncoherent.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | #include <linux/gfp.h> |
| 5 | #include <linux/highmem.h> |
| 6 | #include <linux/export.h> |
| 7 | #include <linux/memblock.h> |
| 8 | #include <linux/of_address.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/types.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include <linux/vmalloc.h> |
| 12 | #include <linux/swiotlb.h> |
| 13 | |
| 14 | #include <xen/xen.h> |
| 15 | #include <xen/interface/grant_table.h> |
| 16 | #include <xen/interface/memory.h> |
| 17 | #include <xen/page.h> |
| 18 | #include <xen/swiotlb-xen.h> |
| 19 | |
| 20 | #include <asm/cacheflush.h> |
| 21 | #include <asm/xen/hypercall.h> |
| 22 | #include <asm/xen/interface.h> |
| 23 | |
| 24 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) |
| 25 | { |
| 26 | struct memblock_region *reg; |
| 27 | gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM; |
| 28 | |
| 29 | for_each_memblock(memory, reg) { |
| 30 | if (reg->base < (phys_addr_t)0xffffffff) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 31 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
| 32 | flags |= __GFP_DMA32; |
| 33 | else |
| 34 | flags |= __GFP_DMA; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | break; |
| 36 | } |
| 37 | } |
| 38 | return __get_free_pages(flags, order); |
| 39 | } |
| 40 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | static bool hypercall_cflush = false; |
| 42 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 43 | /* buffers in highmem or foreign pages cannot cross page boundaries */ |
| 44 | static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | { |
| 46 | struct gnttab_cache_flush cflush; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 47 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 48 | cflush.a.dev_bus_addr = handle & XEN_PAGE_MASK; |
| 49 | cflush.offset = xen_offset_in_page(handle); |
| 50 | cflush.op = op; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | |
| 52 | do { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 53 | if (size + cflush.offset > XEN_PAGE_SIZE) |
| 54 | cflush.length = XEN_PAGE_SIZE - cflush.offset; |
| 55 | else |
| 56 | cflush.length = size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 57 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 58 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 60 | cflush.offset = 0; |
| 61 | cflush.a.dev_bus_addr += cflush.length; |
| 62 | size -= cflush.length; |
| 63 | } while (size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | } |
| 65 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 66 | /* |
| 67 | * Dom0 is mapped 1:1, and while the Linux page can span across multiple Xen |
| 68 | * pages, it is not possible for it to contain a mix of local and foreign Xen |
| 69 | * pages. Calling pfn_valid on a foreign mfn will always return false, so if |
| 70 | * pfn_valid returns true the pages is local and we can use the native |
| 71 | * dma-direct functions, otherwise we call the Xen specific version. |
| 72 | */ |
| 73 | void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, |
| 74 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 76 | if (pfn_valid(PFN_DOWN(handle))) |
| 77 | arch_sync_dma_for_cpu(dev, paddr, size, dir); |
| 78 | else if (dir != DMA_TO_DEVICE) |
| 79 | dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 80 | } |
| 81 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 82 | void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, |
| 83 | phys_addr_t paddr, size_t size, enum dma_data_direction dir) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 85 | if (pfn_valid(PFN_DOWN(handle))) |
| 86 | arch_sync_dma_for_device(dev, paddr, size, dir); |
| 87 | else if (dir == DMA_FROM_DEVICE) |
| 88 | dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); |
| 89 | else |
| 90 | dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | bool xen_arch_need_swiotlb(struct device *dev, |
| 94 | phys_addr_t phys, |
| 95 | dma_addr_t dev_addr) |
| 96 | { |
| 97 | unsigned int xen_pfn = XEN_PFN_DOWN(phys); |
| 98 | unsigned int bfn = XEN_PFN_DOWN(dev_addr); |
| 99 | |
| 100 | /* |
| 101 | * The swiotlb buffer should be used if |
| 102 | * - Xen doesn't have the cache flush hypercall |
| 103 | * - The Linux page refers to foreign memory |
| 104 | * - The device doesn't support coherent DMA request |
| 105 | * |
| 106 | * The Linux page may be spanned acrros multiple Xen page, although |
| 107 | * it's not possible to have a mix of local and foreign Xen page. |
| 108 | * Furthermore, range_straddles_page_boundary is already checking |
| 109 | * if buffer is physically contiguous in the host RAM. |
| 110 | * |
| 111 | * Therefore we only need to check the first Xen page to know if we |
| 112 | * require a bounce buffer because the device doesn't support coherent |
| 113 | * memory and we are not able to flush the cache. |
| 114 | */ |
| 115 | return (!hypercall_cflush && (xen_pfn != bfn) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 116 | !dev_is_dma_coherent(dev)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
| 120 | unsigned int address_bits, |
| 121 | dma_addr_t *dma_handle) |
| 122 | { |
| 123 | if (!xen_initial_domain()) |
| 124 | return -EINVAL; |
| 125 | |
| 126 | /* we assume that dom0 is mapped 1:1 for now */ |
| 127 | *dma_handle = pstart; |
| 128 | return 0; |
| 129 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 130 | |
| 131 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
| 132 | { |
| 133 | return; |
| 134 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 135 | |
| 136 | int __init xen_mm_init(void) |
| 137 | { |
| 138 | struct gnttab_cache_flush cflush; |
| 139 | if (!xen_initial_domain()) |
| 140 | return 0; |
| 141 | xen_swiotlb_init(1, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 142 | |
| 143 | cflush.op = 0; |
| 144 | cflush.a.dev_bus_addr = 0; |
| 145 | cflush.offset = 0; |
| 146 | cflush.length = 0; |
| 147 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) |
| 148 | hypercall_cflush = true; |
| 149 | return 0; |
| 150 | } |
| 151 | arch_initcall(xen_mm_init); |