Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/xtensa/mm/Makefile b/arch/xtensa/mm/Makefile
index 734888a..f7fb08a 100644
--- a/arch/xtensa/mm/Makefile
+++ b/arch/xtensa/mm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
 #
 # Makefile for the Linux/Xtensa-specific parts of the memory manager.
 #
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 9220dcd..b27359e 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -21,7 +21,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/swap.h>
 #include <linux/pagemap.h>
 
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 2ab0e0d..f81b147 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -157,7 +157,7 @@
 	if (user_mode(regs)) {
 		current->thread.bad_vaddr = address;
 		current->thread.error_code = is_write;
-		force_sig_fault(SIGSEGV, code, (void *) address, current);
+		force_sig_fault(SIGSEGV, code, (void *) address);
 		return;
 	}
 	bad_page_fault(regs, address, SIGSEGV);
@@ -182,7 +182,7 @@
 	 * or user mode.
 	 */
 	current->thread.bad_vaddr = address;
-	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address, current);
+	force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
 
 	/* Kernel mode? Handle exceptions or die */
 	if (!user_mode(regs))
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 34aead7..d898ed6 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -18,7 +18,7 @@
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 #include <linux/highmem.h>
 #include <linux/swap.h>
@@ -45,10 +45,7 @@
 	 * If PHYS_OFFSET is zero reserve page at address 0:
 	 * successfull allocations should never return NULL.
 	 */
-	if (PHYS_OFFSET)
-		memblock_reserve(0, PHYS_OFFSET);
-	else
-		memblock_reserve(0, 1);
+	memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
 
 	early_init_fdt_scan_reserved_mem();
 
@@ -60,6 +57,9 @@
 	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
 	max_low_pfn = min(max_pfn, MAX_LOW_PFN);
 
+	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
+
 	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
 	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
 
@@ -71,7 +71,7 @@
 {
 	/* All pages are DMA-able, so we put them all in the DMA zone. */
 	unsigned long zones_size[MAX_NR_ZONES] = {
-		[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
+		[ZONE_NORMAL] = max_low_pfn - ARCH_PFN_OFFSET,
 #ifdef CONFIG_HIGHMEM
 		[ZONE_HIGHMEM] = max_pfn - max_low_pfn,
 #endif
@@ -152,7 +152,7 @@
 	max_mapnr = max_pfn - ARCH_PFN_OFFSET;
 	high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
 
-	free_all_bootmem();
+	memblock_free_all();
 
 	mem_init_print_info(NULL);
 	pr_info("virtual kernel memory layout:\n"
@@ -203,21 +203,6 @@
 		(unsigned long)(__bss_stop - __bss_start) >> 10);
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-extern int initrd_is_mapped;
-
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-	if (initrd_is_mapped)
-		free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void free_initmem(void)
-{
-	free_initmem_default(-1);
-}
-
 static void __init parse_memmap_one(char *p)
 {
 	char *oldp;
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
index d89c3c5..9ea3f21 100644
--- a/arch/xtensa/mm/ioremap.c
+++ b/arch/xtensa/mm/ioremap.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * ioremap implementation.
  *
  * Copyright (C) 2015 Cadence Design Systems Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 
 #include <linux/io.h>
diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c
index 6b532b6..af71525 100644
--- a/arch/xtensa/mm/kasan_init.c
+++ b/arch/xtensa/mm/kasan_init.c
@@ -8,11 +8,10 @@
  * Copyright (C) 2017 Cadence Design Systems Inc.
  */
 
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/init_task.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
-#include <linux/memblock.h>
 #include <asm/initialize_mmu.h>
 #include <asm/tlbflush.h>
 #include <asm/traps.h>
@@ -25,12 +24,13 @@
 	int i;
 
 	for (i = 0; i < PTRS_PER_PTE; ++i)
-		set_pte(kasan_zero_pte + i,
-			mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL));
+		set_pte(kasan_early_shadow_pte + i,
+			mk_pte(virt_to_page(kasan_early_shadow_page),
+				PAGE_KERNEL));
 
 	for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
 		BUG_ON(!pmd_none(*pmd));
-		set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte));
+		set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
 	}
 	early_trap_init();
 }
@@ -43,7 +43,11 @@
 	unsigned long vaddr = (unsigned long)start;
 	pgd_t *pgd = pgd_offset_k(vaddr);
 	pmd_t *pmd = pmd_offset(pgd, vaddr);
-	pte_t *pte = memblock_virt_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+	pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
+
+	if (!pte)
+		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
 
 	pr_debug("%s: %p - %p\n", __func__, start, end);
 
@@ -52,8 +56,10 @@
 
 		for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
 			phys_addr_t phys =
-				memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
-						    MEMBLOCK_ALLOC_ANYWHERE);
+				memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+
+			if (!phys)
+				panic("Failed to allocate page table page\n");
 
 			set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
 		}
@@ -81,13 +87,16 @@
 	populate(kasan_mem_to_shadow((void *)VMALLOC_START),
 		 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
 
-	/* Write protect kasan_zero_page and zero-initialize it again. */
+	/*
+	 * Write protect kasan_early_shadow_page and zero-initialize it again.
+	 */
 	for (i = 0; i < PTRS_PER_PTE; ++i)
-		set_pte(kasan_zero_pte + i,
-			mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO));
+		set_pte(kasan_early_shadow_pte + i,
+			mk_pte(virt_to_page(kasan_early_shadow_page),
+				PAGE_KERNEL_RO));
 
 	local_flush_tlb_all();
-	memset(kasan_zero_page, 0, PAGE_SIZE);
+	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
 
 	/* At this point kasan is fully initialized. Enable error messages. */
 	current->kasan_depth = 0;
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 11a01c3..6aa036c 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -30,7 +30,7 @@
 
 ENTRY(clear_page)
 
-	entry	a1, 16
+	abi_entry_default
 
 	movi	a3, 0
 	__loopi	a2, a7, PAGE_SIZE, 32
@@ -44,7 +44,7 @@
 	s32i	a3, a2, 28
 	__endla	a2, a7, 32
 
-	retw
+	abi_ret_default
 
 ENDPROC(clear_page)
 
@@ -57,7 +57,7 @@
 
 ENTRY(copy_page)
 
-	entry	a1, 16
+	abi_entry_default
 
 	__loopi a2, a4, PAGE_SIZE, 32
 
@@ -86,7 +86,7 @@
 
 	__endl  a2, a4
 
-	retw
+	abi_ret_default
 
 ENDPROC(copy_page)
 
@@ -116,7 +116,7 @@
 
 ENTRY(clear_page_alias)
 
-	entry	a1, 32
+	abi_entry_default
 
 	/* Skip setting up a temporary DTLB if not aliased low page. */
 
@@ -144,14 +144,14 @@
 	__endla	a2, a7, 32
 
 	bnez	a6, 1f
-	retw
+	abi_ret_default
 
 	/* We need to invalidate the temporary idtlb entry, if any. */
 
 1:	idtlb	a4
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(clear_page_alias)
 
@@ -164,7 +164,7 @@
 
 ENTRY(copy_page_alias)
 
-	entry	a1, 32
+	abi_entry_default
 
 	/* Skip setting up a temporary DTLB for destination if not aliased. */
 
@@ -221,19 +221,19 @@
 
 	bnez	a6, 1f
 	bnez	a7, 2f
-	retw
+	abi_ret_default
 
 1:	addi	a2, a2, -PAGE_SIZE
 	idtlb	a2
 	dsync
 	bnez	a7, 2f
-	retw
+	abi_ret_default
 
 2:	addi	a3, a3, -PAGE_SIZE+1
 	idtlb	a3
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(copy_page_alias)
 
@@ -248,7 +248,7 @@
 
 ENTRY(__flush_invalidate_dcache_page_alias)
 
-	entry	sp, 16
+	abi_entry_default
 
 	movi	a7, 0			# required for exception handler
 	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
@@ -261,7 +261,7 @@
 	idtlb	a4
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_invalidate_dcache_page_alias)
 
@@ -272,7 +272,7 @@
 
 ENTRY(__invalidate_dcache_page_alias)
 
-	entry	sp, 16
+	abi_entry_default
 
 	movi	a7, 0			# required for exception handler
 	addi	a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
@@ -285,7 +285,7 @@
 	idtlb	a4
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_dcache_page_alias)
 #endif
@@ -296,7 +296,7 @@
 	
 ENTRY(__invalidate_icache_page_alias)
 
-	entry	sp, 16
+	abi_entry_default
 
 	addi	a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE)
 	mov	a4, a2
@@ -307,7 +307,7 @@
 
 	iitlb	a4
 	isync
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_icache_page_alias)
 
@@ -325,12 +325,12 @@
 
 ENTRY(__invalidate_icache_page)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_icache_page a2 a3
 	isync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_icache_page)
 
@@ -340,12 +340,12 @@
 
 ENTRY(__invalidate_dcache_page)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_dcache_page a2 a3
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_dcache_page)
 
@@ -355,12 +355,12 @@
 
 ENTRY(__flush_invalidate_dcache_page)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___flush_invalidate_dcache_page a2 a3
 
 	dsync
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_invalidate_dcache_page)
 
@@ -370,12 +370,12 @@
 
 ENTRY(__flush_dcache_page)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___flush_dcache_page a2 a3
 
 	dsync
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_dcache_page)
 
@@ -385,12 +385,12 @@
 
 ENTRY(__invalidate_icache_range)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_icache_range a2 a3 a4
 	isync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_icache_range)
 
@@ -400,12 +400,12 @@
 
 ENTRY(__flush_invalidate_dcache_range)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___flush_invalidate_dcache_range a2 a3 a4
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_invalidate_dcache_range)
 
@@ -415,12 +415,12 @@
 
 ENTRY(__flush_dcache_range)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___flush_dcache_range a2 a3 a4
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_dcache_range)
 
@@ -430,11 +430,11 @@
 
 ENTRY(__invalidate_dcache_range)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_dcache_range a2 a3 a4
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_dcache_range)
 
@@ -444,12 +444,12 @@
 
 ENTRY(__invalidate_icache_all)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_icache_all a2 a3
 	isync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_icache_all)
 
@@ -459,12 +459,12 @@
 
 ENTRY(__flush_invalidate_dcache_all)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___flush_invalidate_dcache_all a2 a3
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__flush_invalidate_dcache_all)
 
@@ -474,11 +474,11 @@
 
 ENTRY(__invalidate_dcache_all)
 
-	entry	sp, 16
+	abi_entry_default
 
 	___invalidate_dcache_all a2 a3
 	dsync
 
-	retw
+	abi_ret_default
 
 ENDPROC(__invalidate_dcache_all)
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 9d1ecfc..03678c4 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -4,7 +4,7 @@
  *
  * Extracted from init.c
  */
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/string.h>
@@ -31,7 +31,10 @@
 	pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
 		 __func__, vaddr, n_pages);
 
-	pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t));
+	pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
+	if (!pte)
+		panic("%s: Failed to allocate %lu bytes align=%lx\n",
+		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
 
 	for (i = 0; i < n_pages; ++i)
 		pte_clear(NULL, 0, pte + i);