Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 33fe418..3175413 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -4,10 +4,12 @@
 #
 
 obj-y		:= init.o fault.o extmem.o mmap.o vmem.o maccess.o
-obj-y		+= page-states.o gup.o pageattr.o mem_detect.o
-obj-y		+= pgtable.o pgalloc.o
+obj-y		+= page-states.o pageattr.o pgtable.o pgalloc.o
 
 obj-$(CONFIG_CMM)		+= cmm.o
 obj-$(CONFIG_HUGETLB_PAGE)	+= hugetlbpage.o
 obj-$(CONFIG_S390_PTDUMP)	+= dump_pagetables.o
 obj-$(CONFIG_PGSTE)		+= gmap.o
+
+KASAN_SANITIZE_kasan_init.o	:= n
+obj-$(CONFIG_KASAN)		+= kasan_init.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 510a182..a51c892 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -298,16 +298,16 @@
 	}
 
 	if (write) {
-		len = *lenp;
-		if (copy_from_user(buf, buffer,
-				   len > sizeof(buf) ? sizeof(buf) : len))
+		len = min(*lenp, sizeof(buf));
+		if (copy_from_user(buf, buffer, len))
 			return -EFAULT;
-		buf[sizeof(buf) - 1] = '\0';
+		buf[len - 1] = '\0';
 		cmm_skip_blanks(buf, &p);
 		nr = simple_strtoul(p, &p, 0);
 		cmm_skip_blanks(p, &p);
 		seconds = simple_strtoul(p, &p, 0);
 		cmm_set_timeout(nr, seconds);
+		*ppos += *lenp;
 	} else {
 		len = sprintf(buf, "%ld %ld\n",
 			      cmm_timeout_pages, cmm_timeout_seconds);
@@ -315,9 +315,9 @@
 			len = *lenp;
 		if (copy_to_user(buffer, buf, len))
 			return -EFAULT;
+		*lenp = len;
+		*ppos += len;
 	}
-	*lenp = len;
-	*ppos += len;
 	return 0;
 }
 
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 7cdea2e..5d67b81 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -3,6 +3,8 @@
 #include <linux/debugfs.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/kasan.h>
+#include <asm/kasan.h>
 #include <asm/sections.h>
 #include <asm/pgtable.h>
 
@@ -17,18 +19,26 @@
 	IDENTITY_NR = 0,
 	KERNEL_START_NR,
 	KERNEL_END_NR,
+#ifdef CONFIG_KASAN
+	KASAN_SHADOW_START_NR,
+	KASAN_SHADOW_END_NR,
+#endif
 	VMEMMAP_NR,
 	VMALLOC_NR,
 	MODULES_NR,
 };
 
 static struct addr_marker address_markers[] = {
-	[IDENTITY_NR]	  = {0, "Identity Mapping"},
-	[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
-	[KERNEL_END_NR]	  = {(unsigned long)_end, "Kernel Image End"},
-	[VMEMMAP_NR]	  = {0, "vmemmap Area"},
-	[VMALLOC_NR]	  = {0, "vmalloc Area"},
-	[MODULES_NR]	  = {0, "Modules Area"},
+	[IDENTITY_NR]		= {0, "Identity Mapping"},
+	[KERNEL_START_NR]	= {(unsigned long)_stext, "Kernel Image Start"},
+	[KERNEL_END_NR]		= {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KASAN
+	[KASAN_SHADOW_START_NR]	= {KASAN_SHADOW_START, "Kasan Shadow Start"},
+	[KASAN_SHADOW_END_NR]	= {KASAN_SHADOW_END, "Kasan Shadow End"},
+#endif
+	[VMEMMAP_NR]		= {0, "vmemmap Area"},
+	[VMALLOC_NR]		= {0, "vmalloc Area"},
+	[MODULES_NR]		= {0, "Modules Area"},
 	{ -1, NULL }
 };
 
@@ -80,7 +90,7 @@
 	} else if (prot != cur || level != st->level ||
 		   st->current_address >= st->marker[1].start_address) {
 		/* Print the actual finished series */
-		seq_printf(m, "0x%0*lx-0x%0*lx",
+		seq_printf(m, "0x%0*lx-0x%0*lx ",
 			   width, st->start_address,
 			   width, st->current_address);
 		delta = (st->current_address - st->start_address) >> 10;
@@ -90,7 +100,7 @@
 		}
 		seq_printf(m, "%9lu%c ", delta, *unit);
 		print_prot(m, st->current_prot, st->level);
-		if (st->current_address >= st->marker[1].start_address) {
+		while (st->current_address >= st->marker[1].start_address) {
 			st->marker++;
 			seq_printf(m, "---[ %s ]---\n", st->marker->name);
 		}
@@ -100,6 +110,18 @@
 	}
 }
 
+#ifdef CONFIG_KASAN
+static void note_kasan_early_shadow_page(struct seq_file *m,
+						struct pg_state *st)
+{
+	unsigned int prot;
+
+	prot = pte_val(*kasan_early_shadow_pte) &
+		(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
+	note_page(m, st, prot, 4);
+}
+#endif
+
 /*
  * The actual page table walker functions. In order to keep the
  * implementation of print_prot() short, we only check and pass
@@ -132,9 +154,16 @@
 	pmd_t *pmd;
 	int i;
 
-	for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+	if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
+		note_kasan_early_shadow_page(m, st);
+		return;
+	}
+#endif
+
+	pmd = pmd_offset(pud, addr);
+	for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
 		st->current_address = addr;
-		pmd = pmd_offset(pud, addr);
 		if (!pmd_none(*pmd)) {
 			if (pmd_large(*pmd)) {
 				prot = pmd_val(*pmd) &
@@ -156,9 +185,16 @@
 	pud_t *pud;
 	int i;
 
-	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+	if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
+		note_kasan_early_shadow_page(m, st);
+		return;
+	}
+#endif
+
+	pud = pud_offset(p4d, addr);
+	for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
 		st->current_address = addr;
-		pud = pud_offset(p4d, addr);
 		if (!pud_none(*pud))
 			if (pud_large(*pud)) {
 				prot = pud_val(*pud) &
@@ -179,9 +215,16 @@
 	p4d_t *p4d;
 	int i;
 
-	for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+	if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
+		note_kasan_early_shadow_page(m, st);
+		return;
+	}
+#endif
+
+	p4d = p4d_offset(pgd, addr);
+	for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
 		st->current_address = addr;
-		p4d = p4d_offset(pgd, addr);
 		if (!p4d_none(*p4d))
 			walk_pud_level(m, st, p4d, addr);
 		else
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 84111a4..fd0dae9 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -16,9 +16,10 @@
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/ctype.h>
 #include <linux/ioport.h>
+#include <linux/refcount.h>
 #include <asm/diag.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -28,12 +29,7 @@
 #include <asm/cpcmd.h>
 #include <asm/setup.h>
 
-#define DCSS_LOADSHR    0x00
-#define DCSS_LOADNSR    0x04
 #define DCSS_PURGESEG   0x08
-#define DCSS_FINDSEG    0x0c
-#define DCSS_LOADNOLY   0x10
-#define DCSS_SEGEXT     0x18
 #define DCSS_LOADSHRX	0x20
 #define DCSS_LOADNSRX	0x24
 #define DCSS_FINDSEGX	0x2c
@@ -53,20 +49,6 @@
 	struct qrange range[6];
 };
 
-struct qrange_old {
-	unsigned int start; /* last byte type */
-	unsigned int end;   /* last byte reserved */
-};
-
-/* output area format for the Diag x'64' old subcode x'18' */
-struct qout64_old {
-	int segstart;
-	int segend;
-	int segcnt;
-	int segrcnt;
-	struct qrange_old range[6];
-};
-
 struct qin64 {
 	char qopcode;
 	char rsrv1[3];
@@ -83,7 +65,7 @@
 	char res_name[16];
 	unsigned long start_addr;
 	unsigned long end;
-	atomic_t ref_count;
+	refcount_t ref_count;
 	int do_nonshared;
 	unsigned int vm_segtype;
 	struct qrange range[6];
@@ -95,52 +77,10 @@
 static LIST_HEAD(dcss_list);
 static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
 					"EW/EN-MIXED" };
-static int loadshr_scode, loadnsr_scode;
-static int segext_scode, purgeseg_scode;
-static int scode_set;
-
-/* set correct Diag x'64' subcodes. */
-static int
-dcss_set_subcodes(void)
-{
-	char *name = kmalloc(8, GFP_KERNEL | GFP_DMA);
-	unsigned long rx, ry;
-	int rc;
-
-	if (name == NULL)
-		return -ENOMEM;
-
-	rx = (unsigned long) name;
-	ry = DCSS_FINDSEGX;
-
-	strcpy(name, "dummy");
-	diag_stat_inc(DIAG_STAT_X064);
-	asm volatile(
-		"	diag	%0,%1,0x64\n"
-		"0:	ipm	%2\n"
-		"	srl	%2,28\n"
-		"	j	2f\n"
-		"1:	la	%2,3\n"
-		"2:\n"
-		EX_TABLE(0b, 1b)
-		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc", "memory");
-
-	kfree(name);
-	/* Diag x'64' new subcodes are supported, set to new subcodes */
-	if (rc != 3) {
-		loadshr_scode = DCSS_LOADSHRX;
-		loadnsr_scode = DCSS_LOADNSRX;
-		purgeseg_scode = DCSS_PURGESEG;
-		segext_scode = DCSS_SEGEXTX;
-		return 0;
-	}
-	/* Diag x'64' new subcodes are not supported, set to old subcodes */
-	loadshr_scode = DCSS_LOADNOLY;
-	loadnsr_scode = DCSS_LOADNSR;
-	purgeseg_scode = DCSS_PURGESEG;
-	segext_scode = DCSS_SEGEXT;
-	return 0;
-}
+static int loadshr_scode = DCSS_LOADSHRX;
+static int loadnsr_scode = DCSS_LOADNSRX;
+static int purgeseg_scode = DCSS_PURGESEG;
+static int segext_scode = DCSS_SEGEXTX;
 
 /*
  * Create the 8 bytes, ebcdic VM segment name from
@@ -196,32 +136,15 @@
 	unsigned long rx, ry;
 	int rc;
 
-	if (scode_set == 0) {
-		rc = dcss_set_subcodes();
-		if (rc < 0)
-			return rc;
-		scode_set = 1;
-	}
 	rx = (unsigned long) parameter;
 	ry = (unsigned long) *func;
 
-	/* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
 	diag_stat_inc(DIAG_STAT_X064);
-	if (*func > DCSS_SEGEXT)
-		asm volatile(
-			"	diag	%0,%1,0x64\n"
-			"	ipm	%2\n"
-			"	srl	%2,28\n"
-			: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
-	/* 31-bit Diag x'64' old subcode, switch to 31-bit addressing mode */
-	else
-		asm volatile(
-			"	sam31\n"
-			"	diag	%0,%1,0x64\n"
-			"	sam64\n"
-			"	ipm	%2\n"
-			"	srl	%2,28\n"
-			: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
+	asm volatile(
+		"	diag	%0,%1,0x64\n"
+		"	ipm	%2\n"
+		"	srl	%2,28\n"
+		: "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
 	*ret1 = rx;
 	*ret2 = ry;
 	return rc;
@@ -271,31 +194,6 @@
 		goto out_free;
 	}
 
-	/* Only old format of output area of Diagnose x'64' is supported,
-	   copy data for the new format. */
-	if (segext_scode == DCSS_SEGEXT) {
-		struct qout64_old *qout_old;
-		qout_old = kzalloc(sizeof(*qout_old), GFP_KERNEL | GFP_DMA);
-		if (qout_old == NULL) {
-			rc = -ENOMEM;
-			goto out_free;
-		}
-		memcpy(qout_old, qout, sizeof(struct qout64_old));
-		qout->segstart = (unsigned long) qout_old->segstart;
-		qout->segend = (unsigned long) qout_old->segend;
-		qout->segcnt = qout_old->segcnt;
-		qout->segrcnt = qout_old->segrcnt;
-
-		if (qout->segcnt > 6)
-			qout->segrcnt = 6;
-		for (i = 0; i < qout->segrcnt; i++) {
-			qout->range[i].start =
-				(unsigned long) qout_old->range[i].start;
-			qout->range[i].end =
-				(unsigned long) qout_old->range[i].end;
-		}
-		kfree(qout_old);
-	}
 	if (qout->segcnt > 6) {
 		rc = -EOPNOTSUPP;
 		goto out_free;
@@ -410,11 +308,9 @@
 	if (rc < 0)
 		goto out_free;
 
-	if (loadshr_scode == DCSS_LOADSHRX) {
-		if (segment_overlaps_others(seg)) {
-			rc = -EBUSY;
-			goto out_free;
-		}
+	if (segment_overlaps_others(seg)) {
+		rc = -EBUSY;
+		goto out_free;
 	}
 
 	rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
@@ -467,16 +363,16 @@
 	seg->start_addr = start_addr;
 	seg->end = end_addr;
 	seg->do_nonshared = do_nonshared;
-	atomic_set(&seg->ref_count, 1);
+	refcount_set(&seg->ref_count, 1);
 	list_add(&seg->list, &dcss_list);
 	*addr = seg->start_addr;
 	*end  = seg->end;
 	if (do_nonshared)
-		pr_info("DCSS %s of range %p to %p and type %s loaded as "
+		pr_info("DCSS %s of range %px to %px and type %s loaded as "
 			"exclusive-writable\n", name, (void*) seg->start_addr,
 			(void*) seg->end, segtype_string[seg->vm_segtype]);
 	else {
-		pr_info("DCSS %s of range %p to %p and type %s loaded in "
+		pr_info("DCSS %s of range %px to %px and type %s loaded in "
 			"shared access mode\n", name, (void*) seg->start_addr,
 			(void*) seg->end, segtype_string[seg->vm_segtype]);
 	}
@@ -527,7 +423,7 @@
 		rc = __segment_load (name, do_nonshared, addr, end);
 	else {
 		if (do_nonshared == seg->do_nonshared) {
-			atomic_inc(&seg->ref_count);
+			refcount_inc(&seg->ref_count);
 			*addr = seg->start_addr;
 			*end  = seg->end;
 			rc    = seg->vm_segtype;
@@ -573,7 +469,7 @@
 		rc = 0;
 		goto out_unlock;
 	}
-	if (atomic_read (&seg->ref_count) != 1) {
+	if (refcount_read(&seg->ref_count) != 1) {
 		pr_warn("DCSS %s is in use and cannot be reloaded\n", name);
 		rc = -EAGAIN;
 		goto out_unlock;
@@ -649,7 +545,7 @@
 		pr_err("Unloading unknown DCSS %s failed\n", name);
 		goto out_unlock;
 	}
-	if (atomic_dec_return(&seg->ref_count) != 0)
+	if (!refcount_dec_and_test(&seg->ref_count))
 		goto out_unlock;
 	release_resource(seg->res);
 	kfree(seg->res);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 72af23b..7b0bb47 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -67,49 +67,10 @@
 }
 early_initcall(fault_init);
 
-static inline int notify_page_fault(struct pt_regs *regs)
-{
-	int ret = 0;
-
-	/* kprobe_running() needs smp_processor_id() */
-	if (kprobes_built_in() && !user_mode(regs)) {
-		preempt_disable();
-		if (kprobe_running() && kprobe_fault_handler(regs, 14))
-			ret = 1;
-		preempt_enable();
-	}
-	return ret;
-}
-
-
-/*
- * Unlock any spinlocks which will prevent us from getting the
- * message out.
- */
-void bust_spinlocks(int yes)
-{
-	if (yes) {
-		oops_in_progress = 1;
-	} else {
-		int loglevel_save = console_loglevel;
-		console_unblank();
-		oops_in_progress = 0;
-		/*
-		 * OK, the message is on the console.  Now we call printk()
-		 * without oops_in_progress set so that printk will give klogd
-		 * a poke.  Hold onto your hats...
-		 */
-		console_loglevel = 15;
-		printk(" ");
-		console_loglevel = loglevel_save;
-	}
-}
-
 /*
  * Find out which address space caused the exception.
- * Access register mode is impossible, ignore space == 3.
  */
-static inline enum fault_type get_fault_type(struct pt_regs *regs)
+static enum fault_type get_fault_type(struct pt_regs *regs)
 {
 	unsigned long trans_exc_code;
 
@@ -132,6 +93,10 @@
 		}
 		return VDSO_FAULT;
 	}
+	if (trans_exc_code == 1) {
+		/* access register mode, not used in the kernel */
+		return USER_FAULT;
+	}
 	/* home space exception -> access via kernel ASCE */
 	return KERNEL_FAULT;
 }
@@ -235,6 +200,8 @@
 		asce = S390_lowcore.kernel_asce;
 		pr_cont("kernel ");
 		break;
+	default:
+		unreachable();
 	}
 	pr_cont("ASCE.\n");
 	dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
@@ -267,8 +234,19 @@
 {
 	report_user_fault(regs, SIGSEGV, 1);
 	force_sig_fault(SIGSEGV, si_code,
-			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
-			current);
+			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
+}
+
+const struct exception_table_entry *s390_search_extables(unsigned long addr)
+{
+	const struct exception_table_entry *fixup;
+
+	fixup = search_extable(__start_dma_ex_table,
+			       __stop_dma_ex_table - __start_dma_ex_table,
+			       addr);
+	if (!fixup)
+		fixup = search_exception_tables(addr);
+	return fixup;
 }
 
 static noinline void do_no_context(struct pt_regs *regs)
@@ -276,7 +254,7 @@
 	const struct exception_table_entry *fixup;
 
 	/* Are we prepared to handle this kernel fault?  */
-	fixup = search_exception_tables(regs->psw.addr);
+	fixup = s390_search_extables(regs->psw.addr);
 	if (fixup) {
 		regs->psw.addr = extable_fixup(fixup);
 		return;
@@ -317,8 +295,7 @@
 	 * or user mode.
 	 */
 	force_sig_fault(SIGBUS, BUS_ADRERR,
-			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK),
-			current);
+			(void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
 }
 
 static noinline int signal_return(struct pt_regs *regs)
@@ -350,6 +327,7 @@
 	case VM_FAULT_BADACCESS:
 		if (access == VM_EXEC && signal_return(regs) == 0)
 			break;
+		/* fallthrough */
 	case VM_FAULT_BADMAP:
 		/* Bad memory access. Check if it is kernel or user space. */
 		if (user_mode(regs)) {
@@ -359,7 +337,9 @@
 			do_sigsegv(regs, si_code);
 			break;
 		}
+		/* fallthrough */
 	case VM_FAULT_BADCONTEXT:
+		/* fallthrough */
 	case VM_FAULT_PFAULT:
 		do_no_context(regs);
 		break;
@@ -421,7 +401,7 @@
 	 */
 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
 
-	if (notify_page_fault(regs))
+	if (kprobe_page_fault(regs, 14))
 		return 0;
 
 	mm = tsk->mm;
@@ -636,17 +616,19 @@
 	u64 reserved;
 } __attribute__ ((packed, aligned(8)));
 
+static struct pfault_refbk pfault_init_refbk = {
+	.refdiagc = 0x258,
+	.reffcode = 0,
+	.refdwlen = 5,
+	.refversn = 2,
+	.refgaddr = __LC_LPP,
+	.refselmk = 1ULL << 48,
+	.refcmpmk = 1ULL << 48,
+	.reserved = __PF_RES_FIELD
+};
+
 int pfault_init(void)
 {
-	struct pfault_refbk refbk = {
-		.refdiagc = 0x258,
-		.reffcode = 0,
-		.refdwlen = 5,
-		.refversn = 2,
-		.refgaddr = __LC_LPP,
-		.refselmk = 1ULL << 48,
-		.refcmpmk = 1ULL << 48,
-		.reserved = __PF_RES_FIELD };
         int rc;
 
 	if (pfault_disable)
@@ -658,18 +640,20 @@
 		"1:	la	%0,8\n"
 		"2:\n"
 		EX_TABLE(0b,1b)
-		: "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
+		: "=d" (rc)
+		: "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
         return rc;
 }
 
+static struct pfault_refbk pfault_fini_refbk = {
+	.refdiagc = 0x258,
+	.reffcode = 1,
+	.refdwlen = 5,
+	.refversn = 2,
+};
+
 void pfault_fini(void)
 {
-	struct pfault_refbk refbk = {
-		.refdiagc = 0x258,
-		.reffcode = 1,
-		.refdwlen = 5,
-		.refversn = 2,
-	};
 
 	if (pfault_disable)
 		return;
@@ -678,7 +662,7 @@
 		"	diag	%0,0,0x258\n"
 		"0:	nopr	%%r7\n"
 		EX_TABLE(0b,0b)
-		: : "a" (&refbk), "m" (refbk) : "cc");
+		: : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
 }
 
 static DEFINE_SPINLOCK(pfault_lock);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 911c7de..edcdca9 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -9,7 +9,7 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/mm.h>
+#include <linux/pagewalk.h>
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/spinlock.h>
@@ -67,7 +67,7 @@
 	INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
 	spin_lock_init(&gmap->guest_table_lock);
 	spin_lock_init(&gmap->shadow_lock);
-	atomic_set(&gmap->ref_count, 1);
+	refcount_set(&gmap->ref_count, 1);
 	page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
 	if (!page)
 		goto out_free;
@@ -214,7 +214,7 @@
  */
 struct gmap *gmap_get(struct gmap *gmap)
 {
-	atomic_inc(&gmap->ref_count);
+	refcount_inc(&gmap->ref_count);
 	return gmap;
 }
 EXPORT_SYMBOL_GPL(gmap_get);
@@ -227,7 +227,7 @@
  */
 void gmap_put(struct gmap *gmap)
 {
-	if (atomic_dec_return(&gmap->ref_count) == 0)
+	if (refcount_dec_and_test(&gmap->ref_count))
 		gmap_free(gmap);
 }
 EXPORT_SYMBOL_GPL(gmap_put);
@@ -907,10 +907,16 @@
 	pmd_t *pmdp;
 
 	BUG_ON(gmap_is_shadow(gmap));
-	spin_lock(&gmap->guest_table_lock);
 	pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+	if (!pmdp)
+		return NULL;
 
-	if (!pmdp || pmd_none(*pmdp)) {
+	/* without huge pages, there is no need to take the table lock */
+	if (!gmap->mm->context.allow_gmap_hpage_1m)
+		return pmd_none(*pmdp) ? NULL : pmdp;
+
+	spin_lock(&gmap->guest_table_lock);
+	if (pmd_none(*pmdp)) {
 		spin_unlock(&gmap->guest_table_lock);
 		return NULL;
 	}
@@ -1588,7 +1594,7 @@
 			continue;
 		if (!sg->initialized)
 			return ERR_PTR(-EAGAIN);
-		atomic_inc(&sg->ref_count);
+		refcount_inc(&sg->ref_count);
 		return sg;
 	}
 	return NULL;
@@ -1676,7 +1682,7 @@
 			}
 		}
 	}
-	atomic_set(&new->ref_count, 2);
+	refcount_set(&new->ref_count, 2);
 	list_add(&new->list, &parent->children);
 	if (asce & _ASCE_REAL_SPACE) {
 		/* nothing to protect, return right away */
@@ -2418,8 +2424,8 @@
  * This function is assumed to be called with the guest_table_lock
  * held.
  */
-bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
-				   unsigned long gaddr)
+static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+					  unsigned long gaddr)
 {
 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
 		return false;
@@ -2515,13 +2521,9 @@
 	return 0;
 }
 
-static inline void zap_zero_pages(struct mm_struct *mm)
-{
-	struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
-
-	walk.mm = mm;
-	walk_page_range(0, TASK_SIZE, &walk);
-}
+static const struct mm_walk_ops zap_zero_walk_ops = {
+	.pmd_entry	= __zap_zero_pages,
+};
 
 /*
  * switch on pgstes for its userspace process (for kvm)
@@ -2540,7 +2542,7 @@
 	mm->context.has_pgste = 1;
 	/* split thp mappings and disable thp for future mappings */
 	thp_split_mm(mm);
-	zap_zero_pages(mm);
+	walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
 	up_write(&mm->mmap_sem);
 	return 0;
 }
@@ -2583,12 +2585,13 @@
 	return 0;
 }
 
+static const struct mm_walk_ops enable_skey_walk_ops = {
+	.hugetlb_entry		= __s390_enable_skey_hugetlb,
+	.pte_entry		= __s390_enable_skey_pte,
+};
+
 int s390_enable_skey(void)
 {
-	struct mm_walk walk = {
-		.hugetlb_entry = __s390_enable_skey_hugetlb,
-		.pte_entry = __s390_enable_skey_pte,
-	};
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	int rc = 0;
@@ -2608,8 +2611,7 @@
 	}
 	mm->def_flags &= ~VM_MERGEABLE;
 
-	walk.mm = mm;
-	walk_page_range(0, TASK_SIZE, &walk);
+	walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
 
 out_up:
 	up_write(&mm->mmap_sem);
@@ -2627,13 +2629,14 @@
 	return 0;
 }
 
+static const struct mm_walk_ops reset_cmma_walk_ops = {
+	.pte_entry		= __s390_reset_cmma,
+};
+
 void s390_reset_cmma(struct mm_struct *mm)
 {
-	struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
-
 	down_write(&mm->mmap_sem);
-	walk.mm = mm;
-	walk_page_range(0, TASK_SIZE, &walk);
+	walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
 	up_write(&mm->mmap_sem);
 }
 EXPORT_SYMBOL_GPL(s390_reset_cmma);
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
deleted file mode 100644
index 2809d11..0000000
--- a/arch/s390/mm/gup.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *  Lockless get_user_pages_fast for s390
- *
- *  Copyright IBM Corp. 2010
- *  Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <linux/vmstat.h>
-#include <linux/pagemap.h>
-#include <linux/rwsem.h>
-#include <asm/pgtable.h>
-
-/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
- */
-static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct page *head, *page;
-	unsigned long mask;
-	pte_t *ptep, pte;
-
-	mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
-
-	ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
-	do {
-		pte = *ptep;
-		barrier();
-		/* Similar to the PMD case, NUMA hinting must take slow path */
-		if (pte_protnone(pte))
-			return 0;
-		if ((pte_val(pte) & mask) != 0)
-			return 0;
-		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-		page = pte_page(pte);
-		head = compound_head(page);
-		if (!page_cache_get_speculative(head))
-			return 0;
-		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
-			put_page(head);
-			return 0;
-		}
-		VM_BUG_ON_PAGE(compound_head(page) != head, page);
-		pages[*nr] = page;
-		(*nr)++;
-
-	} while (ptep++, addr += PAGE_SIZE, addr != end);
-
-	return 1;
-}
-
-static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct page *head, *page;
-	unsigned long mask;
-	int refs;
-
-	mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
-	if ((pmd_val(pmd) & mask) != 0)
-		return 0;
-	VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
-
-	refs = 0;
-	head = pmd_page(pmd);
-	page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
-	do {
-		VM_BUG_ON(compound_head(page) != head);
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
-
-	if (!page_cache_add_speculative(head, refs)) {
-		*nr -= refs;
-		return 0;
-	}
-
-	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
-		return 0;
-	}
-
-	return 1;
-}
-
-
-static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	pmd_t *pmdp, pmd;
-
-	pmdp = (pmd_t *) pudp;
-	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
-		pmdp = (pmd_t *) pud_deref(pud);
-	pmdp += pmd_index(addr);
-	do {
-		pmd = *pmdp;
-		barrier();
-		next = pmd_addr_end(addr, end);
-		if (pmd_none(pmd))
-			return 0;
-		if (unlikely(pmd_large(pmd))) {
-			/*
-			 * NUMA hinting faults need to be handled in the GUP
-			 * slowpath for accounting purposes and so that they
-			 * can be serialised against THP migration.
-			 */
-			if (pmd_protnone(pmd))
-				return 0;
-			if (!gup_huge_pmd(pmdp, pmd, addr, next,
-					  write, pages, nr))
-				return 0;
-		} else if (!gup_pte_range(pmdp, pmd, addr, next,
-					  write, pages, nr))
-			return 0;
-	} while (pmdp++, addr = next, addr != end);
-
-	return 1;
-}
-
-static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	struct page *head, *page;
-	unsigned long mask;
-	int refs;
-
-	mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
-	if ((pud_val(pud) & mask) != 0)
-		return 0;
-	VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
-
-	refs = 0;
-	head = pud_page(pud);
-	page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
-	do {
-		VM_BUG_ON_PAGE(compound_head(page) != head, page);
-		pages[*nr] = page;
-		(*nr)++;
-		page++;
-		refs++;
-	} while (addr += PAGE_SIZE, addr != end);
-
-	if (!page_cache_add_speculative(head, refs)) {
-		*nr -= refs;
-		return 0;
-	}
-
-	if (unlikely(pud_val(pud) != pud_val(*pudp))) {
-		*nr -= refs;
-		while (refs--)
-			put_page(head);
-		return 0;
-	}
-
-	return 1;
-}
-
-static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	pud_t *pudp, pud;
-
-	pudp = (pud_t *) p4dp;
-	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
-		pudp = (pud_t *) p4d_deref(p4d);
-	pudp += pud_index(addr);
-	do {
-		pud = *pudp;
-		barrier();
-		next = pud_addr_end(addr, end);
-		if (pud_none(pud))
-			return 0;
-		if (unlikely(pud_large(pud))) {
-			if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
-					  nr))
-				return 0;
-		} else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
-					  nr))
-			return 0;
-	} while (pudp++, addr = next, addr != end);
-
-	return 1;
-}
-
-static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
-		unsigned long end, int write, struct page **pages, int *nr)
-{
-	unsigned long next;
-	p4d_t *p4dp, p4d;
-
-	p4dp = (p4d_t *) pgdp;
-	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
-		p4dp = (p4d_t *) pgd_deref(pgd);
-	p4dp += p4d_index(addr);
-	do {
-		p4d = *p4dp;
-		barrier();
-		next = p4d_addr_end(addr, end);
-		if (p4d_none(p4d))
-			return 0;
-		if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
-			return 0;
-	} while (p4dp++, addr = next, addr != end);
-
-	return 1;
-}
-
-/*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- */
-int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
-			  struct page **pages)
-{
-	struct mm_struct *mm = current->mm;
-	unsigned long addr, len, end;
-	unsigned long next, flags;
-	pgd_t *pgdp, pgd;
-	int nr = 0;
-
-	start &= PAGE_MASK;
-	addr = start;
-	len = (unsigned long) nr_pages << PAGE_SHIFT;
-	end = start + len;
-	if ((end <= start) || (end > mm->context.asce_limit))
-		return 0;
-	/*
-	 * local_irq_save() doesn't prevent pagetable teardown, but does
-	 * prevent the pagetables from being freed on s390.
-	 *
-	 * So long as we atomically load page table pointers versus teardown,
-	 * we can follow the address down to the the page and take a ref on it.
-	 */
-	local_irq_save(flags);
-	pgdp = pgd_offset(mm, addr);
-	do {
-		pgd = *pgdp;
-		barrier();
-		next = pgd_addr_end(addr, end);
-		if (pgd_none(pgd))
-			break;
-		if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
-			break;
-	} while (pgdp++, addr = next, addr != end);
-	local_irq_restore(flags);
-
-	return nr;
-}
-
-/**
- * get_user_pages_fast() - pin user pages in memory
- * @start:	starting user address
- * @nr_pages:	number of pages from start to pin
- * @write:	whether pages will be written to
- * @pages:	array that receives pointers to the pages pinned.
- *		Should be at least nr_pages long.
- *
- * Attempt to pin user pages in memory without taking mm->mmap_sem.
- * If not successful, it will fall back to taking the lock and
- * calling get_user_pages().
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- */
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
-			struct page **pages)
-{
-	int nr, ret;
-
-	might_sleep();
-	start &= PAGE_MASK;
-	nr = __get_user_pages_fast(start, nr_pages, write, pages);
-	if (nr == nr_pages)
-		return nr;
-
-	/* Try to get the remaining pages with get_user_pages */
-	start += nr << PAGE_SHIFT;
-	pages += nr;
-	ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
-				      write ? FOLL_WRITE : 0);
-	/* Have to be a bit careful with return values */
-	if (nr > 0)
-		ret = (ret < 0) ? nr : ret + nr;
-	return ret;
-}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3fa3e53..a124f19 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -18,10 +18,11 @@
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/swap.h>
+#include <linux/swiotlb.h>
 #include <linux/smp.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/pfn.h>
 #include <linux/poison.h>
@@ -29,7 +30,7 @@
 #include <linux/export.h>
 #include <linux/cma.h>
 #include <linux/gfp.h>
-#include <linux/memblock.h>
+#include <linux/dma-direct.h>
 #include <asm/processor.h>
 #include <linux/uaccess.h>
 #include <asm/pgtable.h>
@@ -42,6 +43,9 @@
 #include <asm/ctl_reg.h>
 #include <asm/sclp.h>
 #include <asm/set_memory.h>
+#include <asm/kasan.h>
+#include <asm/dma-mapping.h>
+#include <asm/uv.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
@@ -49,6 +53,8 @@
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(zero_page_mask);
 
+bool initmem_freed;
+
 static void __init setup_zero_pages(void)
 {
 	unsigned int order;
@@ -59,7 +65,7 @@
 	order = 7;
 
 	/* Limit number of empty zero pages for small memory sizes */
-	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
+	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
 		order--;
 
 	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
@@ -98,8 +104,9 @@
 	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
 	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
 	vmem_map_init();
+	kasan_copy_shadow(init_mm.pgd);
 
-        /* enable virtual mapping in kernel mode */
+	/* enable virtual mapping in kernel mode */
 	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
 	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
 	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
@@ -107,6 +114,7 @@
 	psw_bits(psw).dat = 1;
 	psw_bits(psw).as = PSW_BITS_AS_HOME;
 	__load_psw_mask(psw.mask);
+	kasan_free_early_identity();
 
 	sparse_memory_present_with_active_regions(MAX_NUMNODES);
 	sparse_init();
@@ -124,6 +132,47 @@
 	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 }
 
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	int i;
+
+	/* make specified pages unshared, (swiotlb, dma_free) */
+	for (i = 0; i < numpages; ++i) {
+		uv_remove_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	int i;
+	/* make specified pages shared (swiotlb, dma_alloca) */
+	for (i = 0; i < numpages; ++i) {
+		uv_set_shared(addr);
+		addr += PAGE_SIZE;
+	}
+	return 0;
+}
+
+/* are we a protected virtualization guest? */
+bool force_dma_unencrypted(struct device *dev)
+{
+	return is_prot_virt_guest();
+}
+
+/* protected virtualization */
+static void pv_init(void)
+{
+	if (!is_prot_virt_guest())
+		return;
+
+	/* make sure bounce buffers are shared */
+	swiotlb_init(1);
+	swiotlb_update_mem_attributes();
+	swiotlb_force = SWIOTLB_FORCE;
+}
+
 void __init mem_init(void)
 {
 	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -132,11 +181,13 @@
 	set_max_mapnr(max_low_pfn);
         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 
+	pv_init();
+
 	/* Setup guest page hinting */
 	cmma_init();
 
 	/* this will put all low memory onto the freelists */
-	free_all_bootmem();
+	memblock_free_all();
 	setup_zero_pages();	/* Setup zeroed pages. */
 
 	cmma_init_nodat();
@@ -146,20 +197,13 @@
 
 void free_initmem(void)
 {
+	initmem_freed = true;
 	__set_memory((unsigned long)_sinittext,
 		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
 		     SET_MEMORY_RW | SET_MEMORY_NX);
 	free_initmem_default(POISON_FREE_INITMEM);
 }
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void __init free_initrd_mem(unsigned long start, unsigned long end)
-{
-	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
-			   "initrd");
-}
-#endif
-
 unsigned long memory_block_size_bytes(void)
 {
 	/*
@@ -222,32 +266,35 @@
 
 #endif /* CONFIG_CMA */
 
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
-		bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+		struct mhp_restrictions *restrictions)
 {
 	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long size_pages = PFN_DOWN(size);
 	int rc;
 
+	if (WARN_ON_ONCE(restrictions->altmap))
+		return -EINVAL;
+
 	rc = vmem_add_mapping(start, size);
 	if (rc)
 		return rc;
 
-	rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
+	rc = __add_pages(nid, start_pfn, size_pages, restrictions);
 	if (rc)
 		vmem_remove_mapping(start, size);
 	return rc;
 }
 
-#ifdef CONFIG_MEMORY_HOTREMOVE
-int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+void arch_remove_memory(int nid, u64 start, u64 size,
+			struct vmem_altmap *altmap)
 {
-	/*
-	 * There is no hardware or firmware interface which could trigger a
-	 * hot memory remove on s390. So there is nothing that needs to be
-	 * implemented.
-	 */
-	return -EBUSY;
+	unsigned long start_pfn = start >> PAGE_SHIFT;
+	unsigned long nr_pages = size >> PAGE_SHIFT;
+	struct zone *zone;
+
+	zone = page_zone(pfn_to_page(start_pfn));
+	__remove_pages(zone, start_pfn, nr_pages, altmap);
+	vmem_remove_mapping(start, size);
 }
-#endif
 #endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
new file mode 100644
index 0000000..460f255
--- /dev/null
+++ b/arch/s390/mm/kasan_init.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kasan.h>
+#include <linux/sched/task.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/kasan.h>
+#include <asm/mem_detect.h>
+#include <asm/processor.h>
+#include <asm/sclp.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+static unsigned long segment_pos __initdata;
+static unsigned long segment_low __initdata;
+static unsigned long pgalloc_pos __initdata;
+static unsigned long pgalloc_low __initdata;
+static unsigned long pgalloc_freeable __initdata;
+static bool has_edat __initdata;
+static bool has_nx __initdata;
+
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+static void __init kasan_early_panic(const char *reason)
+{
+	sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
+	sclp_early_printk(reason);
+	disabled_wait();
+}
+
+static void * __init kasan_early_alloc_segment(void)
+{
+	segment_pos -= _SEGMENT_SIZE;
+
+	if (segment_pos < segment_low)
+		kasan_early_panic("out of memory during initialisation\n");
+
+	return (void *)segment_pos;
+}
+
+static void * __init kasan_early_alloc_pages(unsigned int order)
+{
+	pgalloc_pos -= (PAGE_SIZE << order);
+
+	if (pgalloc_pos < pgalloc_low)
+		kasan_early_panic("out of memory during initialisation\n");
+
+	return (void *)pgalloc_pos;
+}
+
+static void * __init kasan_early_crst_alloc(unsigned long val)
+{
+	unsigned long *table;
+
+	table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
+	if (table)
+		crst_table_init(table, val);
+	return table;
+}
+
+static pte_t * __init kasan_early_pte_alloc(void)
+{
+	static void *pte_leftover;
+	pte_t *pte;
+
+	BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
+
+	if (!pte_leftover) {
+		pte_leftover = kasan_early_alloc_pages(0);
+		pte = pte_leftover + _PAGE_TABLE_SIZE;
+	} else {
+		pte = pte_leftover;
+		pte_leftover = NULL;
+	}
+	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+	return pte;
+}
+
+enum populate_mode {
+	POPULATE_ONE2ONE,
+	POPULATE_MAP,
+	POPULATE_ZERO_SHADOW
+};
+static void __init kasan_early_vmemmap_populate(unsigned long address,
+						unsigned long end,
+						enum populate_mode mode)
+{
+	unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
+	pgd_t *pg_dir;
+	p4d_t *p4_dir;
+	pud_t *pu_dir;
+	pmd_t *pm_dir;
+	pte_t *pt_dir;
+
+	pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
+	if (!has_nx)
+		pgt_prot_zero &= ~_PAGE_NOEXEC;
+	pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
+	sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
+
+	while (address < end) {
+		pg_dir = pgd_offset_k(address);
+		if (pgd_none(*pg_dir)) {
+			if (mode == POPULATE_ZERO_SHADOW &&
+			    IS_ALIGNED(address, PGDIR_SIZE) &&
+			    end - address >= PGDIR_SIZE) {
+				pgd_populate(&init_mm, pg_dir,
+						kasan_early_shadow_p4d);
+				address = (address + PGDIR_SIZE) & PGDIR_MASK;
+				continue;
+			}
+			p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
+			pgd_populate(&init_mm, pg_dir, p4_dir);
+		}
+
+		p4_dir = p4d_offset(pg_dir, address);
+		if (p4d_none(*p4_dir)) {
+			if (mode == POPULATE_ZERO_SHADOW &&
+			    IS_ALIGNED(address, P4D_SIZE) &&
+			    end - address >= P4D_SIZE) {
+				p4d_populate(&init_mm, p4_dir,
+						kasan_early_shadow_pud);
+				address = (address + P4D_SIZE) & P4D_MASK;
+				continue;
+			}
+			pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
+			p4d_populate(&init_mm, p4_dir, pu_dir);
+		}
+
+		pu_dir = pud_offset(p4_dir, address);
+		if (pud_none(*pu_dir)) {
+			if (mode == POPULATE_ZERO_SHADOW &&
+			    IS_ALIGNED(address, PUD_SIZE) &&
+			    end - address >= PUD_SIZE) {
+				pud_populate(&init_mm, pu_dir,
+						kasan_early_shadow_pmd);
+				address = (address + PUD_SIZE) & PUD_MASK;
+				continue;
+			}
+			pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
+			pud_populate(&init_mm, pu_dir, pm_dir);
+		}
+
+		pm_dir = pmd_offset(pu_dir, address);
+		if (pmd_none(*pm_dir)) {
+			if (mode == POPULATE_ZERO_SHADOW &&
+			    IS_ALIGNED(address, PMD_SIZE) &&
+			    end - address >= PMD_SIZE) {
+				pmd_populate(&init_mm, pm_dir,
+						kasan_early_shadow_pte);
+				address = (address + PMD_SIZE) & PMD_MASK;
+				continue;
+			}
+			/* the first megabyte of 1:1 is mapped with 4k pages */
+			if (has_edat && address && end - address >= PMD_SIZE &&
+			    mode != POPULATE_ZERO_SHADOW) {
+				void *page;
+
+				if (mode == POPULATE_ONE2ONE) {
+					page = (void *)address;
+				} else {
+					page = kasan_early_alloc_segment();
+					memset(page, 0, _SEGMENT_SIZE);
+				}
+				pmd_val(*pm_dir) = __pa(page) | sgt_prot;
+				address = (address + PMD_SIZE) & PMD_MASK;
+				continue;
+			}
+
+			pt_dir = kasan_early_pte_alloc();
+			pmd_populate(&init_mm, pm_dir, pt_dir);
+		} else if (pmd_large(*pm_dir)) {
+			address = (address + PMD_SIZE) & PMD_MASK;
+			continue;
+		}
+
+		pt_dir = pte_offset_kernel(pm_dir, address);
+		if (pte_none(*pt_dir)) {
+			void *page;
+
+			switch (mode) {
+			case POPULATE_ONE2ONE:
+				page = (void *)address;
+				pte_val(*pt_dir) = __pa(page) | pgt_prot;
+				break;
+			case POPULATE_MAP:
+				page = kasan_early_alloc_pages(0);
+				memset(page, 0, PAGE_SIZE);
+				pte_val(*pt_dir) = __pa(page) | pgt_prot;
+				break;
+			case POPULATE_ZERO_SHADOW:
+				page = kasan_early_shadow_page;
+				pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
+				break;
+			}
+		}
+		address += PAGE_SIZE;
+	}
+}
+
+static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
+{
+	unsigned long asce_bits;
+
+	asce_bits = asce_type | _ASCE_TABLE_LENGTH;
+	S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
+	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+
+	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
+	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
+	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
+}
+
+static void __init kasan_enable_dat(void)
+{
+	psw_t psw;
+
+	psw.mask = __extract_psw();
+	psw_bits(psw).dat = 1;
+	psw_bits(psw).as = PSW_BITS_AS_HOME;
+	__load_psw_mask(psw.mask);
+}
+
+static void __init kasan_early_detect_facilities(void)
+{
+	if (test_facility(8)) {
+		has_edat = true;
+		__ctl_set_bit(0, 23);
+	}
+	if (!noexec_disabled && test_facility(130)) {
+		has_nx = true;
+		__ctl_set_bit(0, 20);
+	}
+}
+
+void __init kasan_early_init(void)
+{
+	unsigned long untracked_mem_end;
+	unsigned long shadow_alloc_size;
+	unsigned long initrd_end;
+	unsigned long asce_type;
+	unsigned long memsize;
+	unsigned long vmax;
+	unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
+	pte_t pte_z;
+	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
+	pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
+	p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+
+	kasan_early_detect_facilities();
+	if (!has_nx)
+		pgt_prot &= ~_PAGE_NOEXEC;
+	pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
+
+	memsize = get_mem_detect_end();
+	if (!memsize)
+		kasan_early_panic("cannot detect physical memory size\n");
+	/* respect mem= cmdline parameter */
+	if (memory_end_set && memsize > memory_end)
+		memsize = memory_end;
+	if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
+		memsize = min(memsize, OLDMEM_SIZE);
+	memsize = min(memsize, KASAN_SHADOW_START);
+
+	if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
+		/* 4 level paging */
+		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
+		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
+		crst_table_init((unsigned long *)early_pg_dir,
+				_REGION2_ENTRY_EMPTY);
+		untracked_mem_end = vmax = _REGION1_SIZE;
+		asce_type = _ASCE_TYPE_REGION2;
+	} else {
+		/* 3 level paging */
+		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
+		BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
+		crst_table_init((unsigned long *)early_pg_dir,
+				_REGION3_ENTRY_EMPTY);
+		untracked_mem_end = vmax = _REGION2_SIZE;
+		asce_type = _ASCE_TYPE_REGION3;
+	}
+
+	/* init kasan zero shadow */
+	crst_table_init((unsigned long *)kasan_early_shadow_p4d,
+				p4d_val(p4d_z));
+	crst_table_init((unsigned long *)kasan_early_shadow_pud,
+				pud_val(pud_z));
+	crst_table_init((unsigned long *)kasan_early_shadow_pmd,
+				pmd_val(pmd_z));
+	memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+
+	shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
+	pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
+		initrd_end =
+		    round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
+		pgalloc_low = max(pgalloc_low, initrd_end);
+	}
+
+	if (pgalloc_low + shadow_alloc_size > memsize)
+		kasan_early_panic("out of memory during initialisation\n");
+
+	if (has_edat) {
+		segment_pos = round_down(memsize, _SEGMENT_SIZE);
+		segment_low = segment_pos - shadow_alloc_size;
+		pgalloc_pos = segment_low;
+	} else {
+		pgalloc_pos = memsize;
+	}
+	init_mm.pgd = early_pg_dir;
+	/*
+	 * Current memory layout:
+	 * +- 0 -------------+	 +- shadow start -+
+	 * | 1:1 ram mapping |	/| 1/8 ram	  |
+	 * +- end of ram ----+ / +----------------+
+	 * | ... gap ...     |/  |	kasan	  |
+	 * +- shadow start --+	 |	zero	  |
+	 * | 1/8 addr space  |	 |	page	  |
+	 * +- shadow end    -+	 |	mapping	  |
+	 * | ... gap ...     |\  |    (untracked) |
+	 * +- modules vaddr -+ \ +----------------+
+	 * | 2Gb	     |	\|	unmapped  | allocated per module
+	 * +-----------------+	 +- shadow end ---+
+	 */
+	/* populate kasan shadow (for identity mapping and zero page mapping) */
+	kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
+	if (IS_ENABLED(CONFIG_MODULES))
+		untracked_mem_end = vmax - MODULES_LEN;
+	kasan_early_vmemmap_populate(__sha(max_physmem_end),
+				     __sha(untracked_mem_end),
+				     POPULATE_ZERO_SHADOW);
+	/* memory allocated for identity mapping structs will be freed later */
+	pgalloc_freeable = pgalloc_pos;
+	/* populate identity mapping */
+	kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
+	kasan_set_pgd(early_pg_dir, asce_type);
+	kasan_enable_dat();
+	/* enable kasan */
+	init_task.kasan_depth = 0;
+	memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
+	sclp_early_printk("KernelAddressSanitizer initialized\n");
+}
+
+void __init kasan_copy_shadow(pgd_t *pg_dir)
+{
+	/*
+	 * At this point we are still running on early pages setup early_pg_dir,
+	 * while swapper_pg_dir has just been initialized with identity mapping.
+	 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
+	 */
+
+	pgd_t *pg_dir_src;
+	pgd_t *pg_dir_dst;
+	p4d_t *p4_dir_src;
+	p4d_t *p4_dir_dst;
+	pud_t *pu_dir_src;
+	pud_t *pu_dir_dst;
+
+	pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
+	pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
+	p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
+	p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
+	if (!p4d_folded(*p4_dir_src)) {
+		/* 4 level paging */
+		memcpy(p4_dir_dst, p4_dir_src,
+		       (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
+		return;
+	}
+	/* 3 level paging */
+	pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
+	pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
+	memcpy(pu_dir_dst, pu_dir_src,
+	       (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
+}
+
+void __init kasan_free_early_identity(void)
+{
+	memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
+}
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 7be0647..1864a8b 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -16,6 +16,7 @@
 #include <linux/cpu.h>
 #include <asm/ctl_reg.h>
 #include <asm/io.h>
+#include <asm/stacktrace.h>
 
 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
 {
@@ -51,21 +52,22 @@
  * Therefore we have a read-modify-write sequence: the function reads eight
  * bytes from destination at an eight byte boundary, modifies the bytes
  * requested and writes the result back in a loop.
- *
- * Note: this means that this function may not be called concurrently on
- *	 several cpus with overlapping words, since this may potentially
- *	 cause data corruption.
  */
+static DEFINE_SPINLOCK(s390_kernel_write_lock);
+
 void notrace s390_kernel_write(void *dst, const void *src, size_t size)
 {
+	unsigned long flags;
 	long copied;
 
+	spin_lock_irqsave(&s390_kernel_write_lock, flags);
 	while (size) {
 		copied = s390_kernel_write_odd(dst, src, size);
 		dst += copied;
 		src += copied;
 		size -= copied;
 	}
+	spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
 }
 
 static int __memcpy_real(void *dest, void *src, size_t count)
@@ -89,10 +91,8 @@
 	return rc;
 }
 
-/*
- * Copy memory in real mode (kernel to kernel)
- */
-int memcpy_real(void *dest, void *src, size_t count)
+static unsigned long _memcpy_real(unsigned long dest, unsigned long src,
+				  unsigned long count)
 {
 	int irqs_disabled, rc;
 	unsigned long flags;
@@ -103,7 +103,7 @@
 	irqs_disabled = arch_irqs_disabled_flags(flags);
 	if (!irqs_disabled)
 		trace_hardirqs_off();
-	rc = __memcpy_real(dest, src, count);
+	rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
 	if (!irqs_disabled)
 		trace_hardirqs_on();
 	__arch_local_irq_ssm(flags);
@@ -111,6 +111,23 @@
 }
 
 /*
+ * Copy memory in real mode (kernel to kernel)
+ */
+int memcpy_real(void *dest, void *src, size_t count)
+{
+	if (S390_lowcore.nodat_stack != 0)
+		return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack,
+				     3, dest, src, count);
+	/*
+	 * This is a really early memcpy_real call, the stacks are
+	 * not set up yet. Just call _memcpy_real on the early boot
+	 * stack
+	 */
+	return _memcpy_real((unsigned long) dest,(unsigned long) src,
+			    (unsigned long) count);
+}
+
+/*
  * Copy memory in absolute mode (kernel to kernel)
  */
 void memcpy_absolute(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
deleted file mode 100644
index 21f6c82..0000000
--- a/arch/s390/mm/mem_detect.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2008, 2009
- *
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <asm/ipl.h>
-#include <asm/sclp.h>
-#include <asm/setup.h>
-
-#define CHUNK_READ_WRITE 0
-#define CHUNK_READ_ONLY  1
-
-static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
-{
-	memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
-		     start, start + size - 1);
-	memblock_add_range(&memblock.memory, start, size, 0, 0);
-	memblock_add_range(&memblock.physmem, start, size, 0, 0);
-}
-
-void __init detect_memory_memblock(void)
-{
-	unsigned long memsize, rnmax, rzm, addr, size;
-	int type;
-
-	rzm = sclp.rzm;
-	rnmax = sclp.rnmax;
-	memsize = rzm * rnmax;
-	if (!rzm)
-		rzm = 1UL << 17;
-	max_physmem_end = memsize;
-	addr = 0;
-	/* keep memblock lists close to the kernel */
-	memblock_set_bottom_up(true);
-	do {
-		size = 0;
-		/* assume lowcore is writable */
-		type = addr ? tprot(addr) : CHUNK_READ_WRITE;
-		do {
-			size += rzm;
-			if (max_physmem_end && addr + size >= max_physmem_end)
-				break;
-		} while (type == tprot(addr + size));
-		if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
-			if (max_physmem_end && (addr + size > max_physmem_end))
-				size = max_physmem_end - addr;
-			memblock_physmem_add(addr, size);
-		}
-		addr += size;
-	} while (addr < max_physmem_end);
-	memblock_set_bottom_up(false);
-	if (!max_physmem_end)
-		max_physmem_end = memblock_end_of_DRAM();
-	memblock_dump_all();
-}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 0a7627c..cbc718b 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -24,19 +24,9 @@
 {
 	if (!(current->flags & PF_RANDOMIZE))
 		return 0;
-	if (current->personality & ADDR_NO_RANDOMIZE)
-		return 0;
 	return STACK_RND_MASK << PAGE_SHIFT;
 }
 
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~32 MB hole.
- */
-#define MIN_GAP (32*1024*1024)
-#define MAX_GAP (STACK_TOP/6*5)
-
 static inline int mmap_is_legacy(struct rlimit *rlim_stack)
 {
 	if (current->personality & ADDR_COMPAT_LAYOUT)
@@ -60,13 +50,26 @@
 				      struct rlimit *rlim_stack)
 {
 	unsigned long gap = rlim_stack->rlim_cur;
+	unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
+	unsigned long gap_min, gap_max;
 
-	if (gap < MIN_GAP)
-		gap = MIN_GAP;
-	else if (gap > MAX_GAP)
-		gap = MAX_GAP;
-	gap &= PAGE_MASK;
-	return STACK_TOP - stack_maxrandom_size() - rnd - gap;
+	/* Values close to RLIM_INFINITY can overflow. */
+	if (gap + pad > gap)
+		gap += pad;
+
+	/*
+	 * Top of mmap area (just below the process stack).
+	 * Leave at least a ~32 MB hole.
+	 */
+	gap_min = 32 * 1024 * 1024UL;
+	gap_max = (STACK_TOP / 6) * 5;
+
+	if (gap < gap_min)
+		gap = gap_min;
+	else if (gap > gap_max)
+		gap = gap_max;
+
+	return PAGE_ALIGN(STACK_TOP - gap - rnd);
 }
 
 unsigned long
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index dc3cede..fc14189 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -21,17 +21,11 @@
 
 static int __init cmma(char *str)
 {
-	char *parm;
+	bool enabled;
 
-	parm = strstrip(str);
-	if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
-		cmma_flag = 1;
-		return 1;
-	}
-	cmma_flag = 0;
-	if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
-		return 1;
-	return 0;
+	if (!kstrtobool(str, &enabled))
+		cmma_flag = enabled;
+	return 1;
 }
 __setup("cmma=", cmma);
 
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 814f265..3dd253f 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -17,8 +17,6 @@
 
 #ifdef CONFIG_PGSTE
 
-static int page_table_allocate_pgste_min = 0;
-static int page_table_allocate_pgste_max = 1;
 int page_table_allocate_pgste = 0;
 EXPORT_SYMBOL(page_table_allocate_pgste);
 
@@ -29,8 +27,8 @@
 		.maxlen		= sizeof(int),
 		.mode		= S_IRUGO | S_IWUSR,
 		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &page_table_allocate_pgste_min,
-		.extra2		= &page_table_allocate_pgste_max,
+		.extra1		= SYSCTL_ZERO,
+		.extra2		= SYSCTL_ONE,
 	},
 	{ }
 };
@@ -131,6 +129,7 @@
 	}
 
 	pgd = mm->pgd;
+	mm_dec_nr_pmds(mm);
 	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
 	mm->context.asce_limit = _REGION3_SIZE;
 	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
@@ -211,7 +210,7 @@
 	page = alloc_page(GFP_KERNEL);
 	if (!page)
 		return NULL;
-	if (!pgtable_page_ctor(page)) {
+	if (!pgtable_pte_page_ctor(page)) {
 		__free_page(page);
 		return NULL;
 	}
@@ -257,7 +256,7 @@
 		atomic_xor_bits(&page->_refcount, 3U << 24);
 	}
 
-	pgtable_page_dtor(page);
+	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }
 
@@ -289,7 +288,7 @@
 	tlb_remove_table(tlb, table);
 }
 
-static void __tlb_remove_table(void *_table)
+void __tlb_remove_table(void *_table)
 {
 	unsigned int mask = (unsigned long) _table & 3;
 	void *table = (void *)((unsigned long) _table ^ mask);
@@ -309,73 +308,12 @@
 	case 3:		/* 4K page table with pgstes */
 		if (mask & 3)
 			atomic_xor_bits(&page->_refcount, 3 << 24);
-		pgtable_page_dtor(page);
+		pgtable_pte_page_dtor(page);
 		__free_page(page);
 		break;
 	}
 }
 
-static void tlb_remove_table_smp_sync(void *arg)
-{
-	/* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
-	/*
-	 * This isn't an RCU grace period and hence the page-tables cannot be
-	 * assumed to be actually RCU-freed.
-	 *
-	 * It is however sufficient for software page-table walkers that rely
-	 * on IRQ disabling. See the comment near struct mmu_table_batch.
-	 */
-	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
-	__tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
-	struct mmu_table_batch *batch;
-	int i;
-
-	batch = container_of(head, struct mmu_table_batch, rcu);
-
-	for (i = 0; i < batch->nr; i++)
-		__tlb_remove_table(batch->tables[i]);
-
-	free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
-	struct mmu_table_batch **batch = &tlb->batch;
-
-	if (*batch) {
-		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
-		*batch = NULL;
-	}
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
-	struct mmu_table_batch **batch = &tlb->batch;
-
-	tlb->mm->context.flush_mm = 1;
-	if (*batch == NULL) {
-		*batch = (struct mmu_table_batch *)
-			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
-		if (*batch == NULL) {
-			__tlb_flush_mm_lazy(tlb->mm);
-			tlb_remove_table_one(table);
-			return;
-		}
-		(*batch)->nr = 0;
-	}
-	(*batch)->tables[(*batch)->nr++] = table;
-	if ((*batch)->nr == MAX_TABLE_BATCH)
-		tlb_flush_mmu(tlb);
-}
-
 /*
  * Base infrastructure required to generate basic asces, region, segment,
  * and page tables that do not make use of enhanced features like EDAT1.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index f2cc7da..9ebd012 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -301,12 +301,13 @@
 }
 EXPORT_SYMBOL(ptep_xchg_lazy);
 
-pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 			     pte_t *ptep)
 {
 	pgste_t pgste;
 	pte_t old;
 	int nodat;
+	struct mm_struct *mm = vma->vm_mm;
 
 	preempt_disable();
 	pgste = ptep_xchg_start(mm, addr, ptep);
@@ -318,12 +319,12 @@
 	}
 	return old;
 }
-EXPORT_SYMBOL(ptep_modify_prot_start);
 
-void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
-			     pte_t *ptep, pte_t pte)
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
+			     pte_t *ptep, pte_t old_pte, pte_t pte)
 {
 	pgste_t pgste;
+	struct mm_struct *mm = vma->vm_mm;
 
 	if (!MACHINE_HAS_NX)
 		pte_val(pte) &= ~_PAGE_NOEXEC;
@@ -337,7 +338,6 @@
 	}
 	preempt_enable();
 }
-EXPORT_SYMBOL(ptep_modify_prot_commit);
 
 static inline void pmdp_idte_local(struct mm_struct *mm,
 				   unsigned long addr, pmd_t *pmdp)
@@ -410,6 +410,7 @@
 	return old;
 }
 
+#ifdef CONFIG_PGSTE
 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
 {
 	pgd_t *pgd;
@@ -427,6 +428,7 @@
 	pmd = pmd_alloc(mm, pud, addr);
 	return pmd;
 }
+#endif
 
 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 		       pmd_t *pmdp, pmd_t new)
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index db55561..b403fa1 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -4,14 +4,13 @@
  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  */
 
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/pfn.h>
 #include <linux/mm.h>
 #include <linux/init.h>
 #include <linux/list.h>
 #include <linux/hugetlb.h>
 #include <linux/slab.h>
-#include <linux/memblock.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -36,7 +35,7 @@
 
 	if (slab_is_available())
 		return (void *)__get_free_pages(GFP_KERNEL, order);
-	return (void *) memblock_alloc(size, size);
+	return (void *) memblock_phys_alloc(size, size);
 }
 
 void *vmem_crst_alloc(unsigned long val)
@@ -57,7 +56,7 @@
 	if (slab_is_available())
 		pte = (pte_t *) page_table_alloc(&init_mm);
 	else
-		pte = (pte_t *) memblock_alloc(size, size);
+		pte = (pte_t *) memblock_phys_alloc(size, size);
 	if (!pte)
 		return NULL;
 	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
@@ -414,6 +413,8 @@
 	__set_memory((unsigned long)_sinittext,
 		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
 		     SET_MEMORY_RO | SET_MEMORY_X);
+	__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+		     SET_MEMORY_RO | SET_MEMORY_X);
 	pr_info("Write protected kernel read-only data: %luk\n",
 		(unsigned long)(__end_rodata - _stext) >> 10);
 }