Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c
index 8c5f0c3..81dffe4 100644
--- a/arch/microblaze/mm/consistent.c
+++ b/arch/microblaze/mm/consistent.c
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/init.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <asm/cpuinfo.h>
#include <asm/cacheflush.h>
@@ -40,7 +40,7 @@
#define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
-void *uncached_kernel_address(void *ptr)
+void *arch_dma_set_uncached(void *ptr, size_t size)
{
unsigned long addr = (unsigned long)ptr;
@@ -49,11 +49,4 @@
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
return (void *)addr;
}
-
-void *cached_kernel_address(void *ptr)
-{
- unsigned long addr = (unsigned long)ptr;
-
- return (void *)(addr & ~UNCACHED_SHADOW_MASK);
-}
#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index e6a810b..b3fed2c 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -28,9 +28,9 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
+#include <linux/perf_event.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/mmu_context.h>
#include <linux/uaccess.h>
@@ -91,7 +91,7 @@
int code = SEGV_MAPERR;
int is_write = error_code & ESR_S;
vm_fault_t fault;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
regs->ear = address;
regs->esr = error_code;
@@ -122,10 +122,12 @@
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -137,12 +139,12 @@
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
}
vma = find_vma(mm, address);
@@ -215,9 +217,9 @@
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
@@ -231,16 +233,11 @@
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (unlikely(fault & VM_FAULT_MAJOR))
- current->maj_flt++;
- else
- current->min_flt++;
if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
flags |= FAULT_FLAG_TRIED;
/*
- * No need to up_read(&mm->mmap_sem) as we would
+ * No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
@@ -249,7 +246,7 @@
}
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
/*
* keep track of tlb+htab misses that are good addrs but
@@ -260,7 +257,7 @@
return;
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
pte_errors++;
@@ -279,7 +276,7 @@
* us unable to handle the page fault gracefully.
*/
out_of_memory:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (!user_mode(regs))
bad_page_fault(regs, address, SIGKILL);
else
@@ -287,7 +284,7 @@
return;
do_sigbus:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (user_mode(regs)) {
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
return;
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index d7569f7..92e0890 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -32,18 +32,12 @@
*/
#include <asm/tlbflush.h>
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
-
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
@@ -55,19 +49,16 @@
return (void *) vaddr;
}
-EXPORT_SYMBOL(kmap_atomic_prot);
+EXPORT_SYMBOL(kmap_atomic_high_prot);
-void __kunmap_atomic(void *kvaddr)
+void kunmap_atomic_high(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
unsigned int idx;
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
+ if (vaddr < __fix_to_virt(FIX_KMAP_END))
return;
- }
type = kmap_atomic_idx();
@@ -83,7 +74,5 @@
local_flush_tlb_page(NULL, vaddr);
kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
}
-EXPORT_SYMBOL(__kunmap_atomic);
+EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index a015a95..45da639 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -7,6 +7,7 @@
* for more details.
*/
+#include <linux/dma-map-ops.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -45,17 +46,12 @@
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
-pgprot_t kmap_prot;
-EXPORT_SYMBOL(kmap_prot);
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
- vaddr), vaddr);
-}
static void __init highmem_init(void)
{
@@ -64,7 +60,6 @@
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = PAGE_KERNEL;
}
static void highmem_setup(void)
@@ -108,20 +103,20 @@
#endif
/* We don't have holes in memory map */
- free_area_init_nodes(zones_size);
+ free_area_init(zones_size);
}
void __init setup_memory(void)
{
- struct memblock_region *reg;
-
#ifndef CONFIG_MMU
u32 kernel_align_start, kernel_align_size;
+ phys_addr_t start, end;
+ u64 i;
/* Find main memory where is the kernel */
- for_each_memblock(memory, reg) {
- memory_start = (u32)reg->base;
- lowmem_size = reg->size;
+ for_each_mem_range(i, &start, &end) {
+ memory_start = start;
+ lowmem_size = end - start;
if ((memory_start <= (u32)_text) &&
((u32)_text <= (memory_start + lowmem_size - 1))) {
memory_size = lowmem_size;
@@ -169,20 +164,6 @@
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
- /* Add active regions with valid PFNs */
- for_each_memblock(memory, reg) {
- unsigned long start_pfn, end_pfn;
-
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
- memblock_set_node(start_pfn << PAGE_SHIFT,
- (end_pfn - start_pfn) << PAGE_SHIFT,
- &memblock.memory, 0);
- }
-
- /* XXX need to clip this if using highmem? */
- sparse_memory_present_with_active_regions(0);
-
paging_init();
}
@@ -197,18 +178,6 @@
#endif
mem_init_print_info(NULL);
-#ifdef CONFIG_MMU
- pr_info("Kernel virtual memory layout:\n");
- pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
-#ifdef CONFIG_HIGHMEM
- pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
-#endif /* CONFIG_HIGHMEM */
- pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
- ioremap_bot, ioremap_base);
- pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
- (unsigned long)VMALLOC_START, VMALLOC_END);
-#endif
mem_init_done = 1;
}
@@ -342,6 +311,11 @@
/* This will also cause that unflatten device tree will be allocated
* inside 768MB limit */
memblock_set_current_limit(memory_start + lowmem_size - 1);
+
+ parse_early_param();
+
+ /* CMA initialization */
+ dma_contiguous_reserve(memory_start + lowmem_size - 1);
}
/* This is only called until mem_init is done. */
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 010bb9c..38ccb90 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -32,8 +32,8 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <linux/io.h>
#include <asm/mmu.h>
@@ -134,11 +134,16 @@
int map_page(unsigned long va, phys_addr_t pa, int flags)
{
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pd;
pte_t *pg;
int err = -ENOMEM;
+
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pgd_offset_k(va), va);
+ p4d = p4d_offset(pgd_offset_k(va), va);
+ pud = pud_offset(p4d, va);
+ pd = pmd_offset(pud, va);
/* Use middle 10 bits of VA to index the second-level map */
pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
/* pg = pte_alloc_kernel(&init_mm, pd, va); */
@@ -188,13 +193,17 @@
static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
{
pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int retval = 0;
pgd = pgd_offset(mm, addr & PAGE_MASK);
if (pgd) {
- pmd = pmd_offset(pgd, addr & PAGE_MASK);
+ p4d = p4d_offset(pgd, addr & PAGE_MASK);
+ pud = pud_offset(p4d, addr & PAGE_MASK);
+ pmd = pmd_offset(pud, addr & PAGE_MASK);
if (pmd_present(*pmd)) {
pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
if (pte) {