Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1cb9c0f..7d042d5 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/arch/arm/mm/dma-mapping.c
*
* Copyright (C) 2000-2004 Russell King
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* DMA uncached mapping support.
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/genalloc.h>
@@ -18,7 +14,9 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/device.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
@@ -38,6 +36,7 @@
#include <asm/mach/map.h>
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
+#include <xen/swiotlb-xen.h>
#include "dma.h"
#include "mm.h"
@@ -180,11 +179,6 @@
__dma_page_cpu_to_dev(page, offset, size, dir);
}
-static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return dma_addr == ARM_MAPPING_ERROR;
-}
-
const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc,
.free = arm_dma_free,
@@ -194,12 +188,13 @@
.unmap_page = arm_dma_unmap_page,
.map_sg = arm_dma_map_sg,
.unmap_sg = arm_dma_unmap_sg,
+ .map_resource = dma_direct_map_resource,
.sync_single_for_cpu = arm_dma_sync_single_for_cpu,
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
+ .get_required_mask = dma_direct_get_required_mask,
};
EXPORT_SYMBOL(arm_dma_ops);
@@ -218,32 +213,15 @@
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
- .mapping_error = arm_dma_mapping_error,
+ .map_resource = dma_direct_map_resource,
.dma_supported = arm_dma_supported,
+ .get_required_mask = dma_direct_get_required_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
static int __dma_supported(struct device *dev, u64 mask, bool warn)
{
- unsigned long max_dma_pfn;
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then we must
- * indicate that DMA to this device is not supported.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) < max_pfn - 1) {
- if (warn) {
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
- mask);
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
- }
- return 0;
- }
-
- max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+ unsigned long max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
@@ -362,25 +340,6 @@
pgprot_t prot, struct page **ret_page,
const void *caller, bool want_vaddr);
-static void *
-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- /*
- * DMA allocation can be mapped to user space, so lets
- * set VM_USERMAP flags too.
- */
- return dma_common_contiguous_remap(page, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- prot, caller);
-}
-
-static void __dma_free_remap(void *cpu_addr, size_t size)
-{
- dma_common_free_remap(cpu_addr, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP);
-}
-
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
static struct gen_pool *atomic_pool __ro_after_init;
@@ -502,8 +461,7 @@
}
}
-static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data)
+static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
{
struct page *page = virt_to_page(addr);
pgprot_t prot = *(pgprot_t *)data;
@@ -537,7 +495,7 @@
if (!want_vaddr)
goto out;
- ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
__dma_free_buffer(page, size);
return NULL;
@@ -604,7 +562,7 @@
goto out;
if (PageHighMem(page)) {
- ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
+ ptr = dma_common_contiguous_remap(page, size, prot, caller);
if (!ptr) {
dma_release_from_contiguous(dev, page, count);
return NULL;
@@ -624,7 +582,7 @@
{
if (want_vaddr) {
if (PageHighMem(page))
- __dma_free_remap(cpu_addr, size);
+ dma_common_free_remap(cpu_addr, size);
else
__dma_remap(page, size, PAGE_KERNEL);
}
@@ -716,7 +674,7 @@
static void remap_allocator_free(struct arm_dma_free_args *args)
{
if (args->want_vaddr)
- __dma_free_remap(args->cpu_addr, args->size);
+ dma_common_free_remap(args->cpu_addr, args->size);
__dma_free_buffer(args->page, args->size);
}
@@ -775,7 +733,7 @@
gfp &= ~(__GFP_COMP);
args.gfp = gfp;
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
allowblock = gfpflags_allow_blocking(gfp);
cma = allowblock ? dev_get_cma_area(dev) : false;
@@ -904,17 +862,6 @@
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
-/*
- * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
- * that the intention is to allow exporting memory allocated via the
- * coherent DMA APIs through the dma_buf API, which only accepts a
- * scattertable. This presents a couple of problems:
- * 1. Not all memory allocated via the coherent DMA APIs is backed by
- * a struct page
- * 2. Passing coherent DMA memory into the streaming APIs is not allowed
- * as we will try to flush the memory through a different alias to that
- * actually being used (and the flushes are redundant.)
- */
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
@@ -1153,6 +1100,15 @@
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
+ /*
+ * When CONFIG_ARM_LPAE is set, physical address can extend above
+ * 32-bits, which then can't be addressed by devices that only support
+ * 32-bit DMA.
+ * Use the generic dma-direct / swiotlb ops code in that case, as that
+ * handles bounce buffering for us.
+ */
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return NULL;
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
@@ -1218,7 +1174,7 @@
if (i == mapping->nr_bitmaps) {
if (extend_iommu_mapping(mapping)) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
start = bitmap_find_next_zero_area(mapping->bitmaps[i],
@@ -1226,7 +1182,7 @@
if (start > mapping->bits) {
spin_unlock_irqrestore(&mapping->lock, flags);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
bitmap_set(mapping->bitmaps[i], start, count);
@@ -1387,17 +1343,6 @@
}
/*
- * Create a CPU mapping for a specified pages
- */
-static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
- const void *caller)
-{
- return dma_common_pages_remap(pages, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
-}
-
-/*
* Create a mapping in device IO address space for specified pages
*/
static dma_addr_t
@@ -1410,7 +1355,7 @@
int i;
dma_addr = __alloc_iova(mapping, size);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
iova = dma_addr;
@@ -1437,7 +1382,7 @@
fail:
iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
__free_iova(mapping, dma_addr, size);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
@@ -1469,18 +1414,13 @@
static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
- struct vm_struct *area;
-
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
- area = find_vm_area(cpu_addr);
- if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
- return area->pages;
- return NULL;
+ return dma_common_find_pages(cpu_addr);
}
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
@@ -1498,7 +1438,7 @@
return NULL;
*handle = __iommu_create_mapping(dev, &page, size, attrs);
- if (*handle == ARM_MAPPING_ERROR)
+ if (*handle == DMA_MAPPING_ERROR)
goto err_mapping;
return addr;
@@ -1526,7 +1466,7 @@
struct page **pages;
void *addr = NULL;
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
size = PAGE_ALIGN(size);
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
@@ -1547,13 +1487,13 @@
return NULL;
*handle = __iommu_create_mapping(dev, pages, size, attrs);
- if (*handle == ARM_MAPPING_ERROR)
+ if (*handle == DMA_MAPPING_ERROR)
goto err_buffer;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return pages;
- addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ addr = dma_common_pages_remap(pages, size, prot,
__builtin_return_address(0));
if (!addr)
goto err_mapping;
@@ -1583,31 +1523,21 @@
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- unsigned long uaddr = vma->vm_start;
- unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long off = vma->vm_pgoff;
+ int err;
if (!pages)
return -ENXIO;
- if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+ if (vma->vm_pgoff >= nr_pages)
return -ENXIO;
- pages += off;
+ err = vm_map_pages(vma, pages, nr_pages);
+ if (err)
+ pr_err("Remapping memory failed: %d\n", err);
- do {
- int ret = vm_insert_page(vma, uaddr, *pages++);
- if (ret) {
- pr_err("Remapping memory failed: %d\n", ret);
- return ret;
- }
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
-
- return 0;
+ return err;
}
static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
@@ -1646,10 +1576,8 @@
return;
}
- if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
- dma_common_free_remap(cpu_addr, size,
- VM_ARM_DMA_CONSISTENT | VM_USERMAP);
- }
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
+ dma_common_free_remap(cpu_addr, size);
__iommu_remove_mapping(dev, handle, size);
__iommu_free_buffer(dev, pages, size, attrs);
@@ -1697,10 +1625,10 @@
int prot;
size = PAGE_ALIGN(size);
- *handle = ARM_MAPPING_ERROR;
+ *handle = DMA_MAPPING_ERROR;
iova_base = iova = __alloc_iova(mapping, size);
- if (iova == ARM_MAPPING_ERROR)
+ if (iova == DMA_MAPPING_ERROR)
return -ENOMEM;
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
@@ -1740,7 +1668,7 @@
for (i = 1; i < nents; i++) {
s = sg_next(s);
- s->dma_address = ARM_MAPPING_ERROR;
+ s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
@@ -1915,7 +1843,7 @@
int ret, prot, len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs);
@@ -1927,7 +1855,7 @@
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/**
@@ -2021,7 +1949,7 @@
size_t len = PAGE_ALIGN(size + offset);
dma_addr = __alloc_iova(mapping, len);
- if (dma_addr == ARM_MAPPING_ERROR)
+ if (dma_addr == DMA_MAPPING_ERROR)
return dma_addr;
prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
@@ -2033,7 +1961,7 @@
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len);
- return ARM_MAPPING_ERROR;
+ return DMA_MAPPING_ERROR;
}
/**
@@ -2106,7 +2034,6 @@
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
@@ -2125,7 +2052,6 @@
.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
- .mapping_error = arm_dma_mapping_error,
.dma_supported = arm_dma_supported,
};
@@ -2287,7 +2213,7 @@
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
+ * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
*/
void arm_iommu_detach_device(struct device *dev)
{
@@ -2369,6 +2295,9 @@
const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+#ifdef CONFIG_SWIOTLB
+ dev->dma_coherent = coherent;
+#endif
/*
* Don't override the dma_ops if they have already been set. Ideally
@@ -2386,10 +2315,8 @@
set_dma_ops(dev, dma_ops);
#ifdef CONFIG_XEN
- if (xen_initial_domain()) {
- dev->archdata.dev_dma_ops = dev->dma_ops;
- dev->dma_ops = xen_dma_ops;
- }
+ if (xen_initial_domain())
+ dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
dev->archdata.dma_ops_setup = true;
}
@@ -2400,4 +2327,42 @@
return;
arm_teardown_iommu_dma_ops(dev);
+ /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
+ set_dma_ops(dev, NULL);
}
+
+#ifdef CONFIG_SWIOTLB
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return dma_to_pfn(dev, dma_addr);
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ return __dma_alloc(dev, size, dma_handle, gfp,
+ __get_dma_pgprot(attrs, PAGE_KERNEL), false,
+ attrs, __builtin_return_address(0));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
+}
+#endif /* CONFIG_SWIOTLB */