Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/s390/boot/.gitignore b/arch/s390/boot/.gitignore
index 017d591..16ff906 100644
--- a/arch/s390/boot/.gitignore
+++ b/arch/s390/boot/.gitignore
@@ -1,2 +1,3 @@
 image
 bzImage
+section_cmp.*
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 9e6668e..e2c47d3 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -6,42 +6,67 @@
 KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
 
 #
-# Use -march=z900 for als.c to be able to print an error
+# Use minimum architecture for als.c to be able to print an error
 # message if the kernel is started on a machine which is too old
 #
-ifneq ($(CC_FLAGS_MARCH),-march=z900)
+ifndef CONFIG_CC_IS_CLANG
+CC_FLAGS_MARCH_MINIMUM := -march=z900
+else
+CC_FLAGS_MARCH_MINIMUM := -march=z10
+endif
+
+ifneq ($(CC_FLAGS_MARCH),$(CC_FLAGS_MARCH_MINIMUM))
 AFLAGS_REMOVE_head.o		+= $(CC_FLAGS_MARCH)
-AFLAGS_head.o			+= -march=z900
+AFLAGS_head.o			+= $(CC_FLAGS_MARCH_MINIMUM)
 AFLAGS_REMOVE_mem.o		+= $(CC_FLAGS_MARCH)
-AFLAGS_mem.o			+= -march=z900
+AFLAGS_mem.o			+= $(CC_FLAGS_MARCH_MINIMUM)
 CFLAGS_REMOVE_als.o		+= $(CC_FLAGS_MARCH)
-CFLAGS_als.o			+= -march=z900
+CFLAGS_als.o			+= $(CC_FLAGS_MARCH_MINIMUM)
 CFLAGS_REMOVE_sclp_early_core.o	+= $(CC_FLAGS_MARCH)
-CFLAGS_sclp_early_core.o	+= -march=z900
+CFLAGS_sclp_early_core.o	+= $(CC_FLAGS_MARCH_MINIMUM)
 endif
 
 CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
 
-obj-y	:= head.o als.o ebcdic.o sclp_early_core.o mem.o
-targets	:= bzImage startup.a $(obj-y)
+obj-y	:= head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
+obj-y	+= string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
+obj-y	+= version.o pgm_check_info.o ctype.o text_dma.o
+obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST)	+= uv.o
+obj-$(CONFIG_RELOCATABLE)	+= machine_kexec_reloc.o
+obj-$(CONFIG_RANDOMIZE_BASE)	+= kaslr.o
+targets	:= bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
 subdir-	:= compressed
 
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
-$(obj)/bzImage: $(obj)/compressed/vmlinux FORCE
+quiet_cmd_section_cmp = SECTCMP $*
+define cmd_section_cmp
+	s1=`$(OBJDUMP) -t -j "$*" "$<" | sort | \
+		sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+	s2=`$(OBJDUMP) -t -j "$*" "$(word 2,$^)" | sort | \
+		sed -n "/0000000000000000/! s/.*\s$*\s\+//p" | sha256sum`; \
+	if [ "$$s1" != "$$s2" ]; then \
+		echo "error: section $* differs between $< and $(word 2,$^)" >&2; \
+		exit 1; \
+	fi; \
+	touch $@
+endef
+
+$(obj)/bzImage: $(obj)/compressed/vmlinux $(obj)/section_cmp.boot.data $(obj)/section_cmp.boot.preserved.data FORCE
 	$(call if_changed,objcopy)
 
+$(obj)/section_cmp%: vmlinux $(obj)/compressed/vmlinux FORCE
+	$(call if_changed,section_cmp)
+
 $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
 	$(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
-quiet_cmd_ar = AR      $@
-      cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(filter $(OBJECTS), $^)
-
 $(obj)/startup.a: $(OBJECTS) FORCE
 	$(call if_changed,ar)
 
@@ -49,6 +74,6 @@
 	sh -x  $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
 	      System.map "$(INSTALL_PATH)"
 
-chkbss := $(OBJECTS)
-chkbss-target := $(obj)/startup.a
+chkbss := $(obj-y)
+chkbss-target := startup.a
 include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/als.c b/arch/s390/boot/als.c
index d592e0d..ff6801d 100644
--- a/arch/s390/boot/als.c
+++ b/arch/s390/boot/als.c
@@ -7,6 +7,7 @@
 #include <asm/facility.h>
 #include <asm/lowcore.h>
 #include <asm/sclp.h>
+#include "boot.h"
 
 /*
  * The code within this file will be called very early. It may _not_
@@ -58,7 +59,7 @@
 	*str = '\0';
 }
 
-static void print_missing_facilities(void)
+void print_missing_facilities(void)
 {
 	static char als_str[80] = "Missing facilities: ";
 	unsigned long val;
@@ -90,7 +91,6 @@
 	}
 	strcat(als_str, "\n");
 	sclp_early_printk(als_str);
-	sclp_early_printk("See Principles of Operations for facility bits\n");
 }
 
 static void facility_mismatch(void)
@@ -98,27 +98,15 @@
 	sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
 	print_machine_type();
 	print_missing_facilities();
-	disabled_wait(0x8badcccc);
+	sclp_early_printk("See Principles of Operations for facility bits\n");
+	disabled_wait();
 }
 
 void verify_facilities(void)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(S390_lowcore.stfle_fac_list); i++)
-		S390_lowcore.stfle_fac_list[i] = 0;
-	asm volatile(
-		"	stfl	0(0)\n"
-		: "=m" (S390_lowcore.stfl_fac_list));
-	S390_lowcore.stfle_fac_list[0] = (u64)S390_lowcore.stfl_fac_list << 32;
-	if (S390_lowcore.stfl_fac_list & 0x01000000) {
-		register unsigned long reg0 asm("0") = ARRAY_SIZE(als) - 1;
-
-		asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */
-			     : "+d" (reg0)
-			     : "a" (&S390_lowcore.stfle_fac_list)
-			     : "memory", "cc");
-	}
+	__stfle(S390_lowcore.stfle_fac_list, ARRAY_SIZE(S390_lowcore.stfle_fac_list));
 	for (i = 0; i < ARRAY_SIZE(als); i++) {
 		if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i])
 			facility_mismatch();
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
new file mode 100644
index 0000000..2ea603f
--- /dev/null
+++ b/arch/s390/boot/boot.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_BOOT_H
+#define BOOT_BOOT_H
+
+void startup_kernel(void);
+void detect_memory(void);
+void store_ipl_parmblock(void);
+void setup_boot_command_line(void);
+void parse_boot_command_line(void);
+void setup_memory_end(void);
+void verify_facilities(void);
+void print_missing_facilities(void);
+void print_pgm_check_info(void);
+unsigned long get_random_base(unsigned long safe_addr);
+
+extern int kaslr_enabled;
+extern const char kernel_version[];
+
+unsigned long read_ipl_report(unsigned long safe_offset);
+
+#endif /* BOOT_BOOT_H */
diff --git a/arch/s390/boot/cmdline.c b/arch/s390/boot/cmdline.c
new file mode 100644
index 0000000..73d826c
--- /dev/null
+++ b/arch/s390/boot/cmdline.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/cmdline.c"
diff --git a/arch/s390/boot/compressed/.gitignore b/arch/s390/boot/compressed/.gitignore
index 45aeb4f..e72fcd7 100644
--- a/arch/s390/boot/compressed/.gitignore
+++ b/arch/s390/boot/compressed/.gitignore
@@ -1,5 +1,2 @@
-sizes.h
 vmlinux
 vmlinux.lds
-vmlinux.scr.lds
-vmlinux.bin.full
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index b375c6c..fa529c5 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -8,14 +8,16 @@
 KCOV_INSTRUMENT := n
 GCOV_PROFILE := n
 UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
 
-obj-y	:= $(if $(CONFIG_KERNEL_UNCOMPRESSED),,head.o misc.o) piggy.o
+obj-y	:= $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) piggy.o info.o
 targets	:= vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
-targets += vmlinux.scr.lds $(obj-y) $(if $(CONFIG_KERNEL_UNCOMPRESSED),,sizes.h)
+targets += info.bin $(obj-y)
 
 KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
 KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
+OBJCOPYFLAGS :=
 
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
@@ -23,23 +25,16 @@
 $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
 	$(call if_changed,ld)
 
-# extract required uncompressed vmlinux symbols and adjust them to reflect offsets inside vmlinux.bin
-sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 (0x\1 - 0x100000)/p'
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
+$(obj)/info.bin: vmlinux FORCE
+	$(call if_changed,objcopy)
 
-quiet_cmd_sizes = GEN     $@
-      cmd_sizes = $(NM) $< | sed -n $(sed-sizes) > $@
+OBJCOPYFLAGS_info.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.info
+$(obj)/info.o: $(obj)/info.bin FORCE
+	$(call if_changed,objcopy)
 
-$(obj)/sizes.h: vmlinux
-	$(call if_changed,sizes)
-
-AFLAGS_head.o += -I$(objtree)/$(obj)
-$(obj)/head.o: $(obj)/sizes.h
-
-CFLAGS_misc.o += -I$(objtree)/$(obj)
-$(obj)/misc.o: $(obj)/sizes.h
-
-OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section=.vmlinux.info -S
+$(obj)/vmlinux.bin: vmlinux FORCE
 	$(call if_changed,objcopy)
 
 vmlinux.bin.all-y := $(obj)/vmlinux.bin
@@ -64,10 +59,10 @@
 $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,xzkern)
 
-LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr.lds $(obj)/vmlinux.bin$(suffix-y)
-	$(call if_changed,ld)
+OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
+$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
+	$(call if_changed,objcopy)
 
-chkbss := $(filter-out $(obj)/misc.o $(obj)/piggy.o,$(OBJECTS))
-chkbss-target := $(obj)/vmlinux.bin
+chkbss := $(filter-out piggy.o info.o, $(obj-y))
+chkbss-target := vmlinux.bin
 include $(srctree)/arch/s390/scripts/Makefile.chkbss
diff --git a/arch/s390/boot/compressed/decompressor.c b/arch/s390/boot/compressed/decompressor.c
new file mode 100644
index 0000000..4504663
--- /dev/null
+++ b/arch/s390/boot/compressed/decompressor.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Definitions and wrapper functions for kernel decompressor
+ *
+ * Copyright IBM Corp. 2010
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include "decompressor.h"
+
+/*
+ * gzip declarations
+ */
+#define STATIC static
+#define STATIC_RW_DATA static __section(.data)
+
+#undef memset
+#undef memcpy
+#undef memmove
+#define memmove memmove
+#define memzero(s, n) memset((s), 0, (n))
+
+/* Symbols defined by linker scripts */
+extern char _end[];
+extern unsigned char _compressed_start[];
+extern unsigned char _compressed_end[];
+
+#ifdef CONFIG_HAVE_KERNEL_BZIP2
+#define HEAP_SIZE	0x400000
+#else
+#define HEAP_SIZE	0x10000
+#endif
+
+static unsigned long free_mem_ptr = (unsigned long) _end;
+static unsigned long free_mem_end_ptr = (unsigned long) _end + HEAP_SIZE;
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+#define decompress_offset ALIGN((unsigned long)_end + HEAP_SIZE, PAGE_SIZE)
+
+unsigned long mem_safe_offset(void)
+{
+	/*
+	 * due to 4MB HEAD_SIZE for bzip2
+	 * 'decompress_offset + vmlinux.image_size' could be larger than
+	 * kernel at final position + its .bss, so take the larger of two
+	 */
+	return max(decompress_offset + vmlinux.image_size,
+		   vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size);
+}
+
+void *decompress_kernel(void)
+{
+	void *output = (void *)decompress_offset;
+
+	__decompress(_compressed_start, _compressed_end - _compressed_start,
+		     NULL, NULL, output, 0, NULL, error);
+	return output;
+}
diff --git a/arch/s390/boot/compressed/decompressor.h b/arch/s390/boot/compressed/decompressor.h
new file mode 100644
index 0000000..c15eb71
--- /dev/null
+++ b/arch/s390/boot/compressed/decompressor.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BOOT_COMPRESSED_DECOMPRESSOR_H
+#define BOOT_COMPRESSED_DECOMPRESSOR_H
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+static inline void *decompress_kernel(void) {}
+#else
+void *decompress_kernel(void);
+#endif
+unsigned long mem_safe_offset(void);
+void error(char *m);
+
+struct vmlinux_info {
+	unsigned long default_lma;
+	void (*entry)(void);
+	unsigned long image_size;	/* does not include .bss */
+	unsigned long bss_size;		/* uncompressed image .bss size */
+	unsigned long bootdata_off;
+	unsigned long bootdata_size;
+	unsigned long bootdata_preserved_off;
+	unsigned long bootdata_preserved_size;
+	unsigned long dynsym_start;
+	unsigned long rela_dyn_start;
+	unsigned long rela_dyn_end;
+};
+
+extern char _vmlinux_info[];
+#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
+
+#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S
deleted file mode 100644
index df8dbbc..0000000
--- a/arch/s390/boot/compressed/head.S
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Startup glue code to uncompress the kernel
- *
- * Copyright IBM Corp. 2010
- *
- *   Author(s):	Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/page.h>
-#include "sizes.h"
-
-__HEAD
-ENTRY(startup_decompressor)
-	basr	%r13,0			# get base
-.LPG1:
-	# setup stack
-	lg	%r15,.Lstack-.LPG1(%r13)
-	aghi	%r15,-160
-	brasl	%r14,decompress_kernel
-	# Set up registers for memory mover. We move the decompressed image to
-	# 0x100000, where startup_continue of the decompressed image is supposed
-	# to be.
-	lgr	%r4,%r2
-	lg	%r2,.Loffset-.LPG1(%r13)
-	lg	%r3,.Lmvsize-.LPG1(%r13)
-	lgr	%r5,%r3
-	# Move the memory mover someplace safe so it doesn't overwrite itself.
-	la	%r1,0x200
-	mvc	0(mover_end-mover,%r1),mover-.LPG1(%r13)
-	# When the memory mover is done we pass control to
-	# arch/s390/kernel/head64.S:startup_continue which lives at 0x100000 in
-	# the decompressed image.
-	lgr	%r6,%r2
-	br	%r1
-mover:
-	mvcle	%r2,%r4,0
-	jo	mover
-	br	%r6
-mover_end:
-
-	.align	8
-.Lstack:
-	.quad	0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
-.Loffset:
-	.quad	0x100000
-.Lmvsize:
-	.quad	SZ__bss_start
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
deleted file mode 100644
index f66ad73..0000000
--- a/arch/s390/boot/compressed/misc.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Definitions and wrapper functions for kernel decompressor
- *
- * Copyright IBM Corp. 2010
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#include <linux/uaccess.h>
-#include <asm/page.h>
-#include <asm/sclp.h>
-#include <asm/ipl.h>
-#include "sizes.h"
-
-/*
- * gzip declarations
- */
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#undef memmove
-#define memmove memmove
-#define memzero(s, n) memset((s), 0, (n))
-
-/* Symbols defined by linker scripts */
-extern char input_data[];
-extern int input_len;
-extern char _end[];
-extern char _bss[], _ebss[];
-
-static void error(char *m);
-
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#ifdef CONFIG_HAVE_KERNEL_BZIP2
-#define HEAP_SIZE	0x400000
-#else
-#define HEAP_SIZE	0x10000
-#endif
-
-#ifdef CONFIG_KERNEL_GZIP
-#include "../../../../lib/decompress_inflate.c"
-#endif
-
-#ifdef CONFIG_KERNEL_BZIP2
-#include "../../../../lib/decompress_bunzip2.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZ4
-#include "../../../../lib/decompress_unlz4.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZMA
-#include "../../../../lib/decompress_unlzma.c"
-#endif
-
-#ifdef CONFIG_KERNEL_LZO
-#include "../../../../lib/decompress_unlzo.c"
-#endif
-
-#ifdef CONFIG_KERNEL_XZ
-#include "../../../../lib/decompress_unxz.c"
-#endif
-
-static int puts(const char *s)
-{
-	sclp_early_printk(s);
-	return 0;
-}
-
-static void error(char *x)
-{
-	unsigned long long psw = 0x000a0000deadbeefULL;
-
-	puts("\n\n");
-	puts(x);
-	puts("\n\n -- System halted");
-
-	asm volatile("lpsw %0" : : "Q" (psw));
-}
-
-unsigned long decompress_kernel(void)
-{
-	void *output, *kernel_end;
-
-	output = (void *) ALIGN((unsigned long) _end + HEAP_SIZE, PAGE_SIZE);
-	kernel_end = output + SZ__bss_start;
-
-#ifdef CONFIG_BLK_DEV_INITRD
-	/*
-	 * Move the initrd right behind the end of the decompressed
-	 * kernel image. This also prevents initrd corruption caused by
-	 * bss clearing since kernel_end will always be located behind the
-	 * current bss section..
-	 */
-	if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
-		memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
-		INITRD_START = (unsigned long) kernel_end;
-	}
-#endif
-
-	/*
-	 * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
-	 * initialized afterwards since they reside in bss.
-	 */
-	memset(_bss, 0, _ebss - _bss);
-	free_mem_ptr = (unsigned long) _end;
-	free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
-	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
-	return (unsigned long) output;
-}
-
diff --git a/arch/s390/boot/compressed/vmlinux.lds.S b/arch/s390/boot/compressed/vmlinux.lds.S
index b16ac8b..44561b2 100644
--- a/arch/s390/boot/compressed/vmlinux.lds.S
+++ b/arch/s390/boot/compressed/vmlinux.lds.S
@@ -1,5 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/vmlinux.lds.h>
 
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
@@ -8,9 +9,6 @@
 
 SECTIONS
 {
-	/* Be careful parts of head_64.S assume startup_32 is at
-	 * address 0.
-	 */
 	. = 0;
 	.head.text : {
 		_head = . ;
@@ -26,7 +24,7 @@
 	.rodata : {
 		_rodata = . ;
 		*(.rodata)	 /* read-only data */
-		*(EXCLUDE_FILE (*piggy.o) .rodata.compressed)
+		*(.rodata.*)
 		_erodata = . ;
 	}
 	.data :	{
@@ -35,14 +33,53 @@
 		*(.data.*)
 		_edata = . ;
 	}
-	startup_continue = 0x100000;
+	/*
+	* .dma section for code, data, ex_table that need to stay below 2 GB,
+	* even when the kernel is relocate: above 2 GB.
+	*/
+	. = ALIGN(PAGE_SIZE);
+	_sdma = .;
+	.dma.text : {
+		_stext_dma = .;
+		*(.dma.text)
+		. = ALIGN(PAGE_SIZE);
+		_etext_dma = .;
+	}
+	. = ALIGN(16);
+	.dma.ex_table : {
+		_start_dma_ex_table = .;
+		KEEP(*(.dma.ex_table))
+		_stop_dma_ex_table = .;
+	}
+	.dma.data : { *(.dma.data) }
+	. = ALIGN(PAGE_SIZE);
+	_edma = .;
+
+	BOOT_DATA
+	BOOT_DATA_PRESERVED
+
+	/*
+	 * uncompressed image info used by the decompressor it should match
+	 * struct vmlinux_info. It comes from .vmlinux.info section of
+	 * uncompressed vmlinux in a form of info.o
+	 */
+	. = ALIGN(8);
+	.vmlinux.info : {
+		_vmlinux_info = .;
+		*(.vmlinux.info)
+	}
+
 #ifdef CONFIG_KERNEL_UNCOMPRESSED
 	. = 0x100000;
 #else
 	. = ALIGN(8);
 #endif
 	.rodata.compressed : {
-		*(.rodata.compressed)
+		_compressed_start = .;
+		*(.vmlinux.bin.compressed)
+		_compressed_end = .;
+		FILL(0xff);
+		. = ALIGN(4096);
 	}
 	. = ALIGN(256);
 	.bss : {
diff --git a/arch/s390/boot/compressed/vmlinux.scr.lds.S b/arch/s390/boot/compressed/vmlinux.scr.lds.S
deleted file mode 100644
index ff01d18..0000000
--- a/arch/s390/boot/compressed/vmlinux.scr.lds.S
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-SECTIONS
-{
-  .rodata.compressed : {
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
-	input_len = .;
-	LONG(input_data_end - input_data) input_data = .;
-#endif
-	*(.data)
-#ifndef CONFIG_KERNEL_UNCOMPRESSED
-	output_len = . - 4;
-	input_data_end = .;
-#endif
-	}
-}
diff --git a/arch/s390/boot/ctype.c b/arch/s390/boot/ctype.c
new file mode 100644
index 0000000..2495810
--- /dev/null
+++ b/arch/s390/boot/ctype.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../lib/ctype.c"
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index f721913..4b86a8d 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -60,6 +60,11 @@
 	.long	0x02000690,0x60000050
 	.long	0x020006e0,0x20000050
 
+	.org	__LC_RST_NEW_PSW		# 0x1a0
+	.quad	0,iplstart
+	.org	__LC_PGM_NEW_PSW		# 0x1d0
+	.quad	0x0000000180000000,startup_pgm_check_handler
+
 	.org	0x200
 
 #
@@ -302,39 +307,97 @@
 	xc	0x300(256),0x300
 	xc	0xe00(256),0xe00
 	xc	0xf00(256),0xf00
-	lctlg	%c0,%c15,0x200(%r0)	# initialize control registers
+	lctlg	%c0,%c15,.Lctl-.LPG0(%r13)	# load control registers
 	stcke	__LC_BOOT_CLOCK
 	mvc	__LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
 	spt	6f-.LPG0(%r13)
 	mvc	__LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
 	l	%r15,.Lstack-.LPG0(%r13)
-	ahi	%r15,-STACK_FRAME_OVERHEAD
 	brasl	%r14,verify_facilities
-#ifdef CONFIG_KERNEL_UNCOMPRESSED
-	jg	startup_continue
-#else
-	jg	startup_decompressor
-#endif
+	brasl	%r14,startup_kernel
 
 .Lstack:
-	.long	0x8000 + (1<<(PAGE_SHIFT+THREAD_SIZE_ORDER))
+	.long	0x8000 + (1<<(PAGE_SHIFT+BOOT_STACK_ORDER)) - STACK_FRAME_OVERHEAD
 	.align	8
 6:	.long	0x7fffffff,0xffffffff
 
+.Lctl:	.quad	0x04040000		# cr0: AFP registers & secondary space
+	.quad	0			# cr1: primary space segment table
+	.quad	.Lduct			# cr2: dispatchable unit control table
+	.quad	0			# cr3: instruction authorization
+	.quad	0xffff			# cr4: instruction authorization
+	.quad	.Lduct			# cr5: primary-aste origin
+	.quad	0			# cr6:	I/O interrupts
+	.quad	0			# cr7:	secondary space segment table
+	.quad	0			# cr8:	access registers translation
+	.quad	0			# cr9:	tracing off
+	.quad	0			# cr10: tracing off
+	.quad	0			# cr11: tracing off
+	.quad	0			# cr12: tracing off
+	.quad	0			# cr13: home space segment table
+	.quad	0xc0000000		# cr14: machine check handling off
+	.quad	.Llinkage_stack		# cr15: linkage stack operations
+
+	.section .dma.data,"aw",@progbits
+.Lduct: .long	0,.Laste,.Laste,0,.Lduald,0,0,0
+	.long	0,0,0,0,0,0,0,0
+.Llinkage_stack:
+	.long	0,0,0x89000000,0,0,0,0x8a000000,0
+	.align 64
+.Laste:	.quad	0,0xffffffffffffffff,0,0,0,0,0,0
+	.align	128
+.Lduald:.rept	8
+	.long	0x80000000,0,0,0	# invalid access-list entries
+	.endr
+	.previous
+
 #include "head_kdump.S"
 
 #
+# This program check is active immediately after kernel start
+# and until early_pgm_check_handler is set in kernel/early.c
+# It simply saves general/control registers and psw in
+# the save area and does disabled wait with a faulty address.
+#
+ENTRY(startup_pgm_check_handler)
+	stmg	%r0,%r15,__LC_SAVE_AREA_SYNC
+	la	%r1,4095
+	stctg	%c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r1)
+	mvc	__LC_GPREGS_SAVE_AREA-4095(128,%r1),__LC_SAVE_AREA_SYNC
+	mvc	__LC_PSW_SAVE_AREA-4095(16,%r1),__LC_PGM_OLD_PSW
+	mvc	__LC_RETURN_PSW(16),__LC_PGM_OLD_PSW
+	ni	__LC_RETURN_PSW,0xfc	# remove IO and EX bits
+	ni	__LC_RETURN_PSW+1,0xfb	# remove MCHK bit
+	oi	__LC_RETURN_PSW+1,0x2	# set wait state bit
+	larl	%r2,.Lold_psw_disabled_wait
+	stg	%r2,__LC_PGM_NEW_PSW+8
+	l	%r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r2)
+	brasl	%r14,print_pgm_check_info
+.Lold_psw_disabled_wait:
+	la	%r1,4095
+	lmg	%r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+	lpswe	__LC_RETURN_PSW		# disabled wait
+.Ldump_info_stack:
+	.long	0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
+ENDPROC(startup_pgm_check_handler)
+
+#
 # params at 10400 (setup.h)
+# Must be keept in sync with struct parmarea in setup.h
 #
 	.org	PARMAREA
-	.long	0,0			# IPL_DEVICE
-	.long	0,0			# INITRD_START
-	.long	0,0			# INITRD_SIZE
-	.long	0,0			# OLDMEM_BASE
-	.long	0,0			# OLDMEM_SIZE
+	.quad	0			# IPL_DEVICE
+	.quad	0			# INITRD_START
+	.quad	0			# INITRD_SIZE
+	.quad	0			# OLDMEM_BASE
+	.quad	0			# OLDMEM_SIZE
+	.quad	kernel_version		# points to kernel version string
 
 	.org	COMMAND_LINE
 	.byte	"root=/dev/ram0 ro"
 	.byte	0
 
-	.org	0x11000
+	.org	EARLY_SCCB_OFFSET
+	.fill	4096
+
+	.org	HEAD_END
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
new file mode 100644
index 0000000..24ef67e
--- /dev/null
+++ b/arch/s390/boot/ipl_parm.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include <asm/facility.h>
+#include <asm/pgtable.h>
+#include <asm/uv.h>
+#include "boot.h"
+
+char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
+struct ipl_parameter_block __bootdata_preserved(ipl_block);
+int __bootdata_preserved(ipl_block_valid);
+
+unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
+unsigned long __bootdata(memory_end);
+int __bootdata(memory_end_set);
+int __bootdata(noexec_disabled);
+
+int kaslr_enabled __section(.data);
+
+static inline int __diag308(unsigned long subcode, void *addr)
+{
+	register unsigned long _addr asm("0") = (unsigned long)addr;
+	register unsigned long _rc asm("1") = 0;
+	unsigned long reg1, reg2;
+	psw_t old = S390_lowcore.program_new_psw;
+
+	asm volatile(
+		"	epsw	%0,%1\n"
+		"	st	%0,%[psw_pgm]\n"
+		"	st	%1,%[psw_pgm]+4\n"
+		"	larl	%0,1f\n"
+		"	stg	%0,%[psw_pgm]+8\n"
+		"	diag	%[addr],%[subcode],0x308\n"
+		"1:	nopr	%%r7\n"
+		: "=&d" (reg1), "=&a" (reg2),
+		  [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+		  [addr] "+d" (_addr), "+d" (_rc)
+		: [subcode] "d" (subcode)
+		: "cc", "memory");
+	S390_lowcore.program_new_psw = old;
+	return _rc;
+}
+
+void store_ipl_parmblock(void)
+{
+	int rc;
+
+	rc = __diag308(DIAG308_STORE, &ipl_block);
+	if (rc == DIAG308_RC_OK &&
+	    ipl_block.hdr.version <= IPL_MAX_SUPPORTED_VERSION)
+		ipl_block_valid = 1;
+}
+
+static size_t scpdata_length(const u8 *buf, size_t count)
+{
+	while (count) {
+		if (buf[count - 1] != '\0' && buf[count - 1] != ' ')
+			break;
+		count--;
+	}
+	return count;
+}
+
+static size_t ipl_block_get_ascii_scpdata(char *dest, size_t size,
+					  const struct ipl_parameter_block *ipb)
+{
+	size_t count;
+	size_t i;
+	int has_lowercase;
+
+	count = min(size - 1, scpdata_length(ipb->fcp.scp_data,
+					     ipb->fcp.scp_data_len));
+	if (!count)
+		goto out;
+
+	has_lowercase = 0;
+	for (i = 0; i < count; i++) {
+		if (!isascii(ipb->fcp.scp_data[i])) {
+			count = 0;
+			goto out;
+		}
+		if (!has_lowercase && islower(ipb->fcp.scp_data[i]))
+			has_lowercase = 1;
+	}
+
+	if (has_lowercase)
+		memcpy(dest, ipb->fcp.scp_data, count);
+	else
+		for (i = 0; i < count; i++)
+			dest[i] = tolower(ipb->fcp.scp_data[i]);
+out:
+	dest[count] = '\0';
+	return count;
+}
+
+static void append_ipl_block_parm(void)
+{
+	char *parm, *delim;
+	size_t len, rc = 0;
+
+	len = strlen(early_command_line);
+
+	delim = early_command_line + len;    /* '\0' character position */
+	parm = early_command_line + len + 1; /* append right after '\0' */
+
+	switch (ipl_block.pb0_hdr.pbt) {
+	case IPL_PBT_CCW:
+		rc = ipl_block_get_ascii_vmparm(
+			parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
+		break;
+	case IPL_PBT_FCP:
+		rc = ipl_block_get_ascii_scpdata(
+			parm, COMMAND_LINE_SIZE - len - 1, &ipl_block);
+		break;
+	}
+	if (rc) {
+		if (*parm == '=')
+			memmove(early_command_line, parm + 1, rc);
+		else
+			*delim = ' '; /* replace '\0' with space */
+	}
+}
+
+static inline int has_ebcdic_char(const char *str)
+{
+	int i;
+
+	for (i = 0; str[i]; i++)
+		if (str[i] & 0x80)
+			return 1;
+	return 0;
+}
+
+void setup_boot_command_line(void)
+{
+	COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
+	/* convert arch command line to ascii if necessary */
+	if (has_ebcdic_char(COMMAND_LINE))
+		EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
+	/* copy arch command line */
+	strcpy(early_command_line, strim(COMMAND_LINE));
+
+	/* append IPL PARM data to the boot command line */
+	if (!is_prot_virt_guest() && ipl_block_valid)
+		append_ipl_block_parm();
+}
+
+static void modify_facility(unsigned long nr, bool clear)
+{
+	if (clear)
+		__clear_facility(nr, S390_lowcore.stfle_fac_list);
+	else
+		__set_facility(nr, S390_lowcore.stfle_fac_list);
+}
+
+static void check_cleared_facilities(void)
+{
+	unsigned long als[] = { FACILITIES_ALS };
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(als); i++) {
+		if ((S390_lowcore.stfle_fac_list[i] & als[i]) != als[i]) {
+			sclp_early_printk("Warning: The Linux kernel requires facilities cleared via command line option\n");
+			print_missing_facilities();
+			break;
+		}
+	}
+}
+
+static void modify_fac_list(char *str)
+{
+	unsigned long val, endval;
+	char *endp;
+	bool clear;
+
+	while (*str) {
+		clear = false;
+		if (*str == '!') {
+			clear = true;
+			str++;
+		}
+		val = simple_strtoull(str, &endp, 0);
+		if (str == endp)
+			break;
+		str = endp;
+		if (*str == '-') {
+			str++;
+			endval = simple_strtoull(str, &endp, 0);
+			if (str == endp)
+				break;
+			str = endp;
+			while (val <= endval) {
+				modify_facility(val, clear);
+				val++;
+			}
+		} else {
+			modify_facility(val, clear);
+		}
+		if (*str != ',')
+			break;
+		str++;
+	}
+	check_cleared_facilities();
+}
+
+static char command_line_buf[COMMAND_LINE_SIZE] __section(.data);
+void parse_boot_command_line(void)
+{
+	char *param, *val;
+	bool enabled;
+	char *args;
+	int rc;
+
+	kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE);
+	args = strcpy(command_line_buf, early_command_line);
+	while (*args) {
+		args = next_arg(args, &param, &val);
+
+		if (!strcmp(param, "mem") && val) {
+			memory_end = round_down(memparse(val, NULL), PAGE_SIZE);
+			memory_end_set = 1;
+		}
+
+		if (!strcmp(param, "vmalloc") && val)
+			vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE);
+
+		if (!strcmp(param, "noexec")) {
+			rc = kstrtobool(val, &enabled);
+			if (!rc && !enabled)
+				noexec_disabled = 1;
+		}
+
+		if (!strcmp(param, "facilities") && val)
+			modify_fac_list(val);
+
+		if (!strcmp(param, "nokaslr"))
+			kaslr_enabled = 0;
+	}
+}
+
+void setup_memory_end(void)
+{
+#ifdef CONFIG_CRASH_DUMP
+	if (OLDMEM_BASE) {
+		kaslr_enabled = 0;
+	} else if (ipl_block_valid &&
+		   ipl_block.pb0_hdr.pbt == IPL_PBT_FCP &&
+		   ipl_block.fcp.opt == IPL_PB0_FCP_OPT_DUMP) {
+		kaslr_enabled = 0;
+		if (!sclp_early_get_hsa_size(&memory_end) && memory_end)
+			memory_end_set = 1;
+	}
+#endif
+}
diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c
new file mode 100644
index 0000000..0b49655
--- /dev/null
+++ b/arch/s390/boot/ipl_report.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/ctype.h>
+#include <asm/ebcdic.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/boot_data.h>
+#include <uapi/asm/ipl.h>
+#include "boot.h"
+
+int __bootdata_preserved(ipl_secure_flag);
+
+unsigned long __bootdata_preserved(ipl_cert_list_addr);
+unsigned long __bootdata_preserved(ipl_cert_list_size);
+
+unsigned long __bootdata(early_ipl_comp_list_addr);
+unsigned long __bootdata(early_ipl_comp_list_size);
+
+#define for_each_rb_entry(entry, rb) \
+	for (entry = rb->entries; \
+	     (void *) entry + sizeof(*entry) <= (void *) rb + rb->len; \
+	     entry++)
+
+static inline bool intersects(unsigned long addr0, unsigned long size0,
+			      unsigned long addr1, unsigned long size1)
+{
+	return addr0 + size0 > addr1 && addr1 + size1 > addr0;
+}
+
+static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
+					 struct ipl_rb_certificates *certs,
+					 unsigned long safe_addr)
+{
+	struct ipl_rb_certificate_entry *cert;
+	struct ipl_rb_component_entry *comp;
+	size_t size;
+
+	/*
+	 * Find the length for the IPL report boot data
+	 */
+	early_ipl_comp_list_size = 0;
+	for_each_rb_entry(comp, comps)
+		early_ipl_comp_list_size += sizeof(*comp);
+	ipl_cert_list_size = 0;
+	for_each_rb_entry(cert, certs)
+		ipl_cert_list_size += sizeof(unsigned int) + cert->len;
+	size = ipl_cert_list_size + early_ipl_comp_list_size;
+
+	/*
+	 * Start from safe_addr to find a free memory area large
+	 * enough for the IPL report boot data. This area is used
+	 * for ipl_cert_list_addr/ipl_cert_list_size and
+	 * early_ipl_comp_list_addr/early_ipl_comp_list_size. It must
+	 * not overlap with any component or any certificate.
+	 */
+repeat:
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+	    intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
+		safe_addr = INITRD_START + INITRD_SIZE;
+	for_each_rb_entry(comp, comps)
+		if (intersects(safe_addr, size, comp->addr, comp->len)) {
+			safe_addr = comp->addr + comp->len;
+			goto repeat;
+		}
+	for_each_rb_entry(cert, certs)
+		if (intersects(safe_addr, size, cert->addr, cert->len)) {
+			safe_addr = cert->addr + cert->len;
+			goto repeat;
+		}
+	early_ipl_comp_list_addr = safe_addr;
+	ipl_cert_list_addr = safe_addr + early_ipl_comp_list_size;
+
+	return safe_addr + size;
+}
+
+static void copy_components_bootdata(struct ipl_rb_components *comps)
+{
+	struct ipl_rb_component_entry *comp, *ptr;
+
+	ptr = (struct ipl_rb_component_entry *) early_ipl_comp_list_addr;
+	for_each_rb_entry(comp, comps)
+		memcpy(ptr++, comp, sizeof(*ptr));
+}
+
+static void copy_certificates_bootdata(struct ipl_rb_certificates *certs)
+{
+	struct ipl_rb_certificate_entry *cert;
+	void *ptr;
+
+	ptr = (void *) ipl_cert_list_addr;
+	for_each_rb_entry(cert, certs) {
+		*(unsigned int *) ptr = cert->len;
+		ptr += sizeof(unsigned int);
+		memcpy(ptr, (void *) cert->addr, cert->len);
+		ptr += cert->len;
+	}
+}
+
+unsigned long read_ipl_report(unsigned long safe_addr)
+{
+	struct ipl_rb_certificates *certs;
+	struct ipl_rb_components *comps;
+	struct ipl_pl_hdr *pl_hdr;
+	struct ipl_rl_hdr *rl_hdr;
+	struct ipl_rb_hdr *rb_hdr;
+	unsigned long tmp;
+	void *rl_end;
+
+	/*
+	 * Check if there is a IPL report by looking at the copy
+	 * of the IPL parameter information block.
+	 */
+	if (!ipl_block_valid ||
+	    !(ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR))
+		return safe_addr;
+	ipl_secure_flag = !!(ipl_block.hdr.flags & IPL_PL_FLAG_SIPL);
+	/*
+	 * There is an IPL report, to find it load the pointer to the
+	 * IPL parameter information block from lowcore and skip past
+	 * the IPL parameter list, then align the address to a double
+	 * word boundary.
+	 */
+	tmp = (unsigned long) S390_lowcore.ipl_parmblock_ptr;
+	pl_hdr = (struct ipl_pl_hdr *) tmp;
+	tmp = (tmp + pl_hdr->len + 7) & -8UL;
+	rl_hdr = (struct ipl_rl_hdr *) tmp;
+	/* Walk through the IPL report blocks in the IPL Report list */
+	certs = NULL;
+	comps = NULL;
+	rl_end = (void *) rl_hdr + rl_hdr->len;
+	rb_hdr = (void *) rl_hdr + sizeof(*rl_hdr);
+	while ((void *) rb_hdr + sizeof(*rb_hdr) < rl_end &&
+	       (void *) rb_hdr + rb_hdr->len <= rl_end) {
+
+		switch (rb_hdr->rbt) {
+		case IPL_RBT_CERTIFICATES:
+			certs = (struct ipl_rb_certificates *) rb_hdr;
+			break;
+		case IPL_RBT_COMPONENTS:
+			comps = (struct ipl_rb_components *) rb_hdr;
+			break;
+		default:
+			break;
+		}
+
+		rb_hdr = (void *) rb_hdr + rb_hdr->len;
+	}
+
+	/*
+	 * With either the component list or the certificate list
+	 * missing the kernel will stay ignorant of secure IPL.
+	 */
+	if (!comps || !certs)
+		return safe_addr;
+
+	/*
+	 * Copy component and certificate list to a safe area
+	 * where the decompressed kernel can find them.
+	 */
+	safe_addr = find_bootdata_space(comps, certs, safe_addr);
+	copy_components_bootdata(comps);
+	copy_certificates_bootdata(certs);
+
+	return safe_addr;
+}
diff --git a/arch/s390/boot/ipl_vmparm.c b/arch/s390/boot/ipl_vmparm.c
new file mode 100644
index 0000000..8dacd5f
--- /dev/null
+++ b/arch/s390/boot/ipl_vmparm.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/ipl_vmparm.c"
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
new file mode 100644
index 0000000..5d12352
--- /dev/null
+++ b/arch/s390/boot/kaslr.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2019
+ */
+#include <asm/mem_detect.h>
+#include <asm/pgtable.h>
+#include <asm/cpacf.h>
+#include <asm/timex.h>
+#include <asm/sclp.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+#define PRNG_MODE_TDES	 1
+#define PRNG_MODE_SHA512 2
+#define PRNG_MODE_TRNG	 3
+
+struct prno_parm {
+	u32 res;
+	u32 reseed_counter;
+	u64 stream_bytes;
+	u8  V[112];
+	u8  C[112];
+};
+
+struct prng_parm {
+	u8  parm_block[32];
+	u32 reseed_counter;
+	u64 byte_counter;
+};
+
+static int check_prng(void)
+{
+	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
+		sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
+		return 0;
+	}
+	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
+		return PRNG_MODE_TRNG;
+	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
+		return PRNG_MODE_SHA512;
+	else
+		return PRNG_MODE_TDES;
+}
+
+static unsigned long get_random(unsigned long limit)
+{
+	struct prng_parm prng = {
+		/* initial parameter block for tdes mode, copied from libica */
+		.parm_block = {
+			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
+		},
+	};
+	unsigned long seed, random;
+	struct prno_parm prno;
+	__u64 entropy[4];
+	int mode, i;
+
+	mode = check_prng();
+	seed = get_tod_clock_fast();
+	switch (mode) {
+	case PRNG_MODE_TRNG:
+		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
+		break;
+	case PRNG_MODE_SHA512:
+		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
+			   (u8 *) &seed, sizeof(seed));
+		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
+			   sizeof(random), NULL, 0);
+		break;
+	case PRNG_MODE_TDES:
+		/* add entropy */
+		*(unsigned long *) prng.parm_block ^= seed;
+		for (i = 0; i < 16; i++) {
+			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
+				  (char *) entropy, (char *) entropy,
+				  sizeof(entropy));
+			memcpy(prng.parm_block, entropy, sizeof(entropy));
+		}
+		random = seed;
+		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
+			  (u8 *) &random, sizeof(random));
+		break;
+	default:
+		random = 0;
+	}
+	return random % limit;
+}
+
+unsigned long get_random_base(unsigned long safe_addr)
+{
+	unsigned long memory_limit = memory_end_set ? memory_end : 0;
+	unsigned long base, start, end, kernel_size;
+	unsigned long block_sum, offset;
+	unsigned long kasan_needs;
+	int i;
+
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
+		if (safe_addr < INITRD_START + INITRD_SIZE)
+			safe_addr = INITRD_START + INITRD_SIZE;
+	}
+	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
+
+	if ((IS_ENABLED(CONFIG_KASAN))) {
+		/*
+		 * Estimate kasan memory requirements, which it will reserve
+		 * at the very end of available physical memory. To estimate
+		 * that, we take into account that kasan would require
+		 * 1/8 of available physical memory (for shadow memory) +
+		 * creating page tables for the whole memory + shadow memory
+		 * region (1 + 1/8). To keep page tables estimates simple take
+		 * the double of combined ptes size.
+		 */
+		memory_limit = get_mem_detect_end();
+		if (memory_end_set && memory_limit > memory_end)
+			memory_limit = memory_end;
+
+		/* for shadow memory */
+		kasan_needs = memory_limit / 8;
+		/* for paging structures */
+		kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
+			       _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
+		memory_limit -= kasan_needs;
+	}
+
+	kernel_size = vmlinux.image_size + vmlinux.bss_size;
+	block_sum = 0;
+	for_each_mem_detect_block(i, &start, &end) {
+		if (memory_limit) {
+			if (start >= memory_limit)
+				break;
+			if (end > memory_limit)
+				end = memory_limit;
+		}
+		if (end - start < kernel_size)
+			continue;
+		block_sum += end - start - kernel_size;
+	}
+	if (!block_sum) {
+		sclp_early_printk("KASLR disabled: not enough memory\n");
+		return 0;
+	}
+
+	base = get_random(block_sum);
+	if (base == 0)
+		return 0;
+	if (base < safe_addr)
+		base = safe_addr;
+	block_sum = offset = 0;
+	for_each_mem_detect_block(i, &start, &end) {
+		if (memory_limit) {
+			if (start >= memory_limit)
+				break;
+			if (end > memory_limit)
+				end = memory_limit;
+		}
+		if (end - start < kernel_size)
+			continue;
+		block_sum += end - start - kernel_size;
+		if (base <= block_sum) {
+			base = start + base - offset;
+			base = ALIGN_DOWN(base, THREAD_SIZE);
+			break;
+		}
+		offset = block_sum;
+	}
+	return base;
+}
diff --git a/arch/s390/boot/machine_kexec_reloc.c b/arch/s390/boot/machine_kexec_reloc.c
new file mode 100644
index 0000000..b7a5d0f
--- /dev/null
+++ b/arch/s390/boot/machine_kexec_reloc.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../kernel/machine_kexec_reloc.c"
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c
new file mode 100644
index 0000000..62e7c13
--- /dev/null
+++ b/arch/s390/boot/mem_detect.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <asm/sclp.h>
+#include <asm/sections.h>
+#include <asm/mem_detect.h>
+#include <asm/sparsemem.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+unsigned long __bootdata(max_physmem_end);
+struct mem_detect_info __bootdata(mem_detect);
+
+/* up to 256 storage elements, 1020 subincrements each */
+#define ENTRIES_EXTENDED_MAX						       \
+	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
+
+/*
+ * To avoid corrupting old kernel memory during dump, find lowest memory
+ * chunk possible either right after the kernel end (decompressed kernel) or
+ * after initrd (if it is present and there is no hole between the kernel end
+ * and initrd)
+ */
+static void *mem_detect_alloc_extended(void)
+{
+	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
+
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
+	    INITRD_START < offset + ENTRIES_EXTENDED_MAX)
+		offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
+
+	return (void *)offset;
+}
+
+static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+{
+	if (n < MEM_INLINED_ENTRIES)
+		return &mem_detect.entries[n];
+	if (unlikely(!mem_detect.entries_extended))
+		mem_detect.entries_extended = mem_detect_alloc_extended();
+	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+}
+
+/*
+ * sequential calls to add_mem_detect_block with adjacent memory areas
+ * are merged together into single memory block.
+ */
+void add_mem_detect_block(u64 start, u64 end)
+{
+	struct mem_detect_block *block;
+
+	if (mem_detect.count) {
+		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
+		if (block->end == start) {
+			block->end = end;
+			return;
+		}
+	}
+
+	block = __get_mem_detect_block_ptr(mem_detect.count);
+	block->start = start;
+	block->end = end;
+	mem_detect.count++;
+}
+
+static int __diag260(unsigned long rx1, unsigned long rx2)
+{
+	register unsigned long _rx1 asm("2") = rx1;
+	register unsigned long _rx2 asm("3") = rx2;
+	register unsigned long _ry asm("4") = 0x10; /* storage configuration */
+	int rc = -1;				    /* fail */
+	unsigned long reg1, reg2;
+	psw_t old = S390_lowcore.program_new_psw;
+
+	asm volatile(
+		"	epsw	%0,%1\n"
+		"	st	%0,%[psw_pgm]\n"
+		"	st	%1,%[psw_pgm]+4\n"
+		"	larl	%0,1f\n"
+		"	stg	%0,%[psw_pgm]+8\n"
+		"	diag	%[rx],%[ry],0x260\n"
+		"	ipm	%[rc]\n"
+		"	srl	%[rc],28\n"
+		"1:\n"
+		: "=&d" (reg1), "=&a" (reg2),
+		  [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
+		  [rc] "+&d" (rc), [ry] "+d" (_ry)
+		: [rx] "d" (_rx1), "d" (_rx2)
+		: "cc", "memory");
+	S390_lowcore.program_new_psw = old;
+	return rc == 0 ? _ry : -1;
+}
+
+static int diag260(void)
+{
+	int rc, i;
+
+	struct {
+		unsigned long start;
+		unsigned long end;
+	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
+
+	memset(storage_extents, 0, sizeof(storage_extents));
+	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
+	if (rc == -1)
+		return -1;
+
+	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
+		add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
+	return 0;
+}
+
+static int tprot(unsigned long addr)
+{
+	unsigned long pgm_addr;
+	int rc = -EFAULT;
+	psw_t old = S390_lowcore.program_new_psw;
+
+	S390_lowcore.program_new_psw.mask = __extract_psw();
+	asm volatile(
+		"	larl	%[pgm_addr],1f\n"
+		"	stg	%[pgm_addr],%[psw_pgm_addr]\n"
+		"	tprot	0(%[addr]),0\n"
+		"	ipm	%[rc]\n"
+		"	srl	%[rc],28\n"
+		"1:\n"
+		: [pgm_addr] "=&d"(pgm_addr),
+		  [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
+		  [rc] "+&d"(rc)
+		: [addr] "a"(addr)
+		: "cc", "memory");
+	S390_lowcore.program_new_psw = old;
+	return rc;
+}
+
+static void search_mem_end(void)
+{
+	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
+	unsigned long offset = 0;
+	unsigned long pivot;
+
+	while (range > 1) {
+		range >>= 1;
+		pivot = offset + range;
+		if (!tprot(pivot << 20))
+			offset = pivot;
+	}
+
+	add_mem_detect_block(0, (offset + 1) << 20);
+}
+
+void detect_memory(void)
+{
+	sclp_early_get_memsize(&max_physmem_end);
+
+	if (!sclp_early_read_storage_info()) {
+		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+		return;
+	}
+
+	if (!diag260()) {
+		mem_detect.info_source = MEM_DETECT_DIAG260;
+		return;
+	}
+
+	if (max_physmem_end) {
+		add_mem_detect_block(0, max_physmem_end);
+		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+		return;
+	}
+
+	search_mem_end();
+	mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+	max_physmem_end = get_mem_detect_end();
+}
diff --git a/arch/s390/boot/pgm_check_info.c b/arch/s390/boot/pgm_check_info.c
new file mode 100644
index 0000000..83b5b79
--- /dev/null
+++ b/arch/s390/boot/pgm_check_info.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/lowcore.h>
+#include <asm/sclp.h>
+#include "boot.h"
+
+const char hex_asc[] = "0123456789abcdef";
+
+#define add_val_as_hex(dst, val)					       \
+	__add_val_as_hex(dst, (const unsigned char *)&val, sizeof(val))
+
+static char *__add_val_as_hex(char *dst, const unsigned char *src, size_t count)
+{
+	while (count--)
+		dst = hex_byte_pack(dst, *src++);
+	return dst;
+}
+
+static char *add_str(char *dst, char *src)
+{
+	strcpy(dst, src);
+	return dst + strlen(dst);
+}
+
+void print_pgm_check_info(void)
+{
+	struct psw_bits *psw = &psw_bits(S390_lowcore.psw_save_area);
+	unsigned short ilc = S390_lowcore.pgm_ilc >> 1;
+	char buf[256];
+	int row, col;
+	char *p;
+
+	add_str(buf, "Linux version ");
+	strlcat(buf, kernel_version, sizeof(buf));
+	sclp_early_printk(buf);
+
+	p = add_str(buf, "Kernel fault: interruption code ");
+	p = add_val_as_hex(buf + strlen(buf), S390_lowcore.pgm_code);
+	p = add_str(p, " ilc:");
+	*p++ = hex_asc_lo(ilc);
+	add_str(p, "\n");
+	sclp_early_printk(buf);
+
+	p = add_str(buf, "PSW : ");
+	p = add_val_as_hex(p, S390_lowcore.psw_save_area.mask);
+	p = add_str(p, " ");
+	p = add_val_as_hex(p, S390_lowcore.psw_save_area.addr);
+	add_str(p, "\n");
+	sclp_early_printk(buf);
+
+	p = add_str(buf, "      R:");
+	*p++ = hex_asc_lo(psw->per);
+	p = add_str(p, " T:");
+	*p++ = hex_asc_lo(psw->dat);
+	p = add_str(p, " IO:");
+	*p++ = hex_asc_lo(psw->io);
+	p = add_str(p, " EX:");
+	*p++ = hex_asc_lo(psw->ext);
+	p = add_str(p, " Key:");
+	*p++ = hex_asc_lo(psw->key);
+	p = add_str(p, " M:");
+	*p++ = hex_asc_lo(psw->mcheck);
+	p = add_str(p, " W:");
+	*p++ = hex_asc_lo(psw->wait);
+	p = add_str(p, " P:");
+	*p++ = hex_asc_lo(psw->pstate);
+	p = add_str(p, " AS:");
+	*p++ = hex_asc_lo(psw->as);
+	p = add_str(p, " CC:");
+	*p++ = hex_asc_lo(psw->cc);
+	p = add_str(p, " PM:");
+	*p++ = hex_asc_lo(psw->pm);
+	p = add_str(p, " RI:");
+	*p++ = hex_asc_lo(psw->ri);
+	p = add_str(p, " EA:");
+	*p++ = hex_asc_lo(psw->eaba);
+	add_str(p, "\n");
+	sclp_early_printk(buf);
+
+	for (row = 0; row < 4; row++) {
+		p = add_str(buf, row == 0 ? "GPRS:" : "     ");
+		for (col = 0; col < 4; col++) {
+			p = add_str(p, " ");
+			p = add_val_as_hex(p, S390_lowcore.gpregs_save_area[row * 4 + col]);
+		}
+		add_str(p, "\n");
+		sclp_early_printk(buf);
+	}
+}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
new file mode 100644
index 0000000..5367950
--- /dev/null
+++ b/arch/s390/boot/startup.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/string.h>
+#include <linux/elf.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/kexec.h>
+#include <asm/sclp.h>
+#include <asm/diag.h>
+#include <asm/uv.h>
+#include "compressed/decompressor.h"
+#include "boot.h"
+
+extern char __boot_data_start[], __boot_data_end[];
+extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
+unsigned long __bootdata_preserved(__kaslr_offset);
+
+/*
+ * Some code and data needs to stay below 2 GB, even when the kernel would be
+ * relocated above 2 GB, because it has to use 31 bit addresses.
+ * Such code and data is part of the .dma section, and its location is passed
+ * over to the decompressed / relocated kernel via the .boot.preserved.data
+ * section.
+ */
+extern char _sdma[], _edma[];
+extern char _stext_dma[], _etext_dma[];
+extern struct exception_table_entry _start_dma_ex_table[];
+extern struct exception_table_entry _stop_dma_ex_table[];
+unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
+unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
+unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
+unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
+struct exception_table_entry *
+	__bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
+struct exception_table_entry *
+	__bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
+
+int _diag210_dma(struct diag210 *addr);
+int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
+int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
+void _diag0c_dma(struct hypfs_diag0c_entry *entry);
+void _diag308_reset_dma(void);
+struct diag_ops __bootdata_preserved(diag_dma_ops) = {
+	.diag210 = _diag210_dma,
+	.diag26c = _diag26c_dma,
+	.diag14 = _diag14_dma,
+	.diag0c = _diag0c_dma,
+	.diag308_reset = _diag308_reset_dma
+};
+static struct diag210 _diag210_tmp_dma __section(".dma.data");
+struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
+void _swsusp_reset_dma(void);
+unsigned long __bootdata_preserved(__swsusp_reset_dma) = __pa(_swsusp_reset_dma);
+
+void error(char *x)
+{
+	sclp_early_printk("\n\n");
+	sclp_early_printk(x);
+	sclp_early_printk("\n\n -- System halted");
+
+	disabled_wait();
+}
+
+#ifdef CONFIG_KERNEL_UNCOMPRESSED
+unsigned long mem_safe_offset(void)
+{
+	return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
+}
+#endif
+
+static void rescue_initrd(unsigned long addr)
+{
+	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
+		return;
+	if (!INITRD_START || !INITRD_SIZE)
+		return;
+	if (addr <= INITRD_START)
+		return;
+	memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
+	INITRD_START = addr;
+}
+
+static void copy_bootdata(void)
+{
+	if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
+		error(".boot.data section size mismatch");
+	memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
+	if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
+		error(".boot.preserved.data section size mismatch");
+	memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
+}
+
+static void handle_relocs(unsigned long offset)
+{
+	Elf64_Rela *rela_start, *rela_end, *rela;
+	int r_type, r_sym, rc;
+	Elf64_Addr loc, val;
+	Elf64_Sym *dynsym;
+
+	rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
+	rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
+	dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
+	for (rela = rela_start; rela < rela_end; rela++) {
+		loc = rela->r_offset + offset;
+		val = rela->r_addend;
+		r_sym = ELF64_R_SYM(rela->r_info);
+		if (r_sym) {
+			if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+				val += dynsym[r_sym].st_value + offset;
+		} else {
+			/*
+			 * 0 == undefined symbol table index (STN_UNDEF),
+			 * used for R_390_RELATIVE, only add KASLR offset
+			 */
+			val += offset;
+		}
+		r_type = ELF64_R_TYPE(rela->r_info);
+		rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
+		if (rc)
+			error("Unknown relocation type");
+	}
+}
+
+static void clear_bss_section(void)
+{
+	memset((void *)vmlinux.default_lma + vmlinux.image_size, 0, vmlinux.bss_size);
+}
+
+void startup_kernel(void)
+{
+	unsigned long random_lma;
+	unsigned long safe_addr;
+	void *img;
+
+	store_ipl_parmblock();
+	safe_addr = mem_safe_offset();
+	safe_addr = read_ipl_report(safe_addr);
+	uv_query_info();
+	rescue_initrd(safe_addr);
+	sclp_early_read_info();
+	setup_boot_command_line();
+	parse_boot_command_line();
+	setup_memory_end();
+	detect_memory();
+
+	random_lma = __kaslr_offset = 0;
+	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
+		random_lma = get_random_base(safe_addr);
+		if (random_lma) {
+			__kaslr_offset = random_lma - vmlinux.default_lma;
+			img = (void *)vmlinux.default_lma;
+			vmlinux.default_lma += __kaslr_offset;
+			vmlinux.entry += __kaslr_offset;
+			vmlinux.bootdata_off += __kaslr_offset;
+			vmlinux.bootdata_preserved_off += __kaslr_offset;
+			vmlinux.rela_dyn_start += __kaslr_offset;
+			vmlinux.rela_dyn_end += __kaslr_offset;
+			vmlinux.dynsym_start += __kaslr_offset;
+		}
+	}
+
+	if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
+		img = decompress_kernel();
+		memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);
+	} else if (__kaslr_offset)
+		memcpy((void *)vmlinux.default_lma, img, vmlinux.image_size);
+
+	clear_bss_section();
+	copy_bootdata();
+	if (IS_ENABLED(CONFIG_RELOCATABLE))
+		handle_relocs(__kaslr_offset);
+
+	if (__kaslr_offset) {
+		/* Clear non-relocated kernel */
+		if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED))
+			memset(img, 0, vmlinux.image_size);
+	}
+	vmlinux.entry();
+}
diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c
new file mode 100644
index 0000000..b11e810
--- /dev/null
+++ b/arch/s390/boot/string.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#undef CONFIG_KASAN
+#include "../lib/string.c"
+
+int strncmp(const char *cs, const char *ct, size_t count)
+{
+	unsigned char c1, c2;
+
+	while (count) {
+		c1 = *cs++;
+		c2 = *ct++;
+		if (c1 != c2)
+			return c1 < c2 ? -1 : 1;
+		if (!c1)
+			break;
+		count--;
+	}
+	return 0;
+}
+
+char *skip_spaces(const char *str)
+{
+	while (isspace(*str))
+		++str;
+	return (char *)str;
+}
+
+char *strim(char *s)
+{
+	size_t size;
+	char *end;
+
+	size = strlen(s);
+	if (!size)
+		return s;
+
+	end = s + size - 1;
+	while (end >= s && isspace(*end))
+		end--;
+	*(end + 1) = '\0';
+
+	return skip_spaces(s);
+}
+
+/* Works only for digits and letters, but small and fast */
+#define TOLOWER(x) ((x) | 0x20)
+
+static unsigned int simple_guess_base(const char *cp)
+{
+	if (cp[0] == '0') {
+		if (TOLOWER(cp[1]) == 'x' && isxdigit(cp[2]))
+			return 16;
+		else
+			return 8;
+	} else {
+		return 10;
+	}
+}
+
+/**
+ * simple_strtoull - convert a string to an unsigned long long
+ * @cp: The start of the string
+ * @endp: A pointer to the end of the parsed string will be placed here
+ * @base: The number base to use
+ */
+
+unsigned long long simple_strtoull(const char *cp, char **endp,
+				   unsigned int base)
+{
+	unsigned long long result = 0;
+
+	if (!base)
+		base = simple_guess_base(cp);
+
+	if (base == 16 && cp[0] == '0' && TOLOWER(cp[1]) == 'x')
+		cp += 2;
+
+	while (isxdigit(*cp)) {
+		unsigned int value;
+
+		value = isdigit(*cp) ? *cp - '0' : TOLOWER(*cp) - 'a' + 10;
+		if (value >= base)
+			break;
+		result = result * base + value;
+		cp++;
+	}
+	if (endp)
+		*endp = (char *)cp;
+
+	return result;
+}
+
+long simple_strtol(const char *cp, char **endp, unsigned int base)
+{
+	if (*cp == '-')
+		return -simple_strtoull(cp + 1, endp, base);
+
+	return simple_strtoull(cp, endp, base);
+}
+
+int kstrtobool(const char *s, bool *res)
+{
+	if (!s)
+		return -EINVAL;
+
+	switch (s[0]) {
+	case 'y':
+	case 'Y':
+	case '1':
+		*res = true;
+		return 0;
+	case 'n':
+	case 'N':
+	case '0':
+		*res = false;
+		return 0;
+	case 'o':
+	case 'O':
+		switch (s[1]) {
+		case 'n':
+		case 'N':
+			*res = true;
+			return 0;
+		case 'f':
+		case 'F':
+			*res = false;
+			return 0;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S
new file mode 100644
index 0000000..9715715
--- /dev/null
+++ b/arch/s390/boot/text_dma.S
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Code that needs to run below 2 GB.
+ *
+ * Copyright IBM Corp. 2019
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/sigp.h>
+
+#ifdef CC_USING_EXPOLINE
+	.pushsection .dma.text.__s390_indirect_jump_r14,"axG"
+__dma__s390_indirect_jump_r14:
+	larl	%r1,0f
+	ex	0,0(%r1)
+	j	.
+0:	br	%r14
+	.popsection
+#endif
+
+	.section .dma.text,"ax"
+/*
+ * Simplified version of expoline thunk. The normal thunks can not be used here,
+ * because they might be more than 2 GB away, and not reachable by the relative
+ * branch. No comdat, exrl, etc. optimizations used here, because it only
+ * affects a few functions that are not performance-relevant.
+ */
+	.macro BR_EX_DMA_r14
+#ifdef CC_USING_EXPOLINE
+	jg	__dma__s390_indirect_jump_r14
+#else
+	br	%r14
+#endif
+	.endm
+
+/*
+ * int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
+ */
+ENTRY(_diag14_dma)
+	lgr	%r1,%r2
+	lgr	%r2,%r3
+	lgr	%r3,%r4
+	lhi	%r5,-EIO
+	sam31
+	diag	%r1,%r2,0x14
+.Ldiag14_ex:
+	ipm	%r5
+	srl	%r5,28
+.Ldiag14_fault:
+	sam64
+	lgfr	%r2,%r5
+	BR_EX_DMA_r14
+	EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
+ENDPROC(_diag14_dma)
+
+/*
+ * int _diag210_dma(struct diag210 *addr)
+ */
+ENTRY(_diag210_dma)
+	lgr	%r1,%r2
+	lhi	%r2,-1
+	sam31
+	diag	%r1,%r0,0x210
+.Ldiag210_ex:
+	ipm	%r2
+	srl	%r2,28
+.Ldiag210_fault:
+	sam64
+	lgfr	%r2,%r2
+	BR_EX_DMA_r14
+	EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
+ENDPROC(_diag210_dma)
+
+/*
+ * int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
+ */
+ENTRY(_diag26c_dma)
+	lghi	%r5,-EOPNOTSUPP
+	sam31
+	diag	%r2,%r4,0x26c
+.Ldiag26c_ex:
+	sam64
+	lgfr	%r2,%r5
+	BR_EX_DMA_r14
+	EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
+ENDPROC(_diag26c_dma)
+
+/*
+ * void _diag0c_dma(struct hypfs_diag0c_entry *entry)
+ */
+ENTRY(_diag0c_dma)
+	sam31
+	diag	%r2,%r2,0x0c
+	sam64
+	BR_EX_DMA_r14
+ENDPROC(_diag0c_dma)
+
+/*
+ * void _swsusp_reset_dma(void)
+ */
+ENTRY(_swsusp_reset_dma)
+	larl	%r1,restart_entry
+	larl	%r2,.Lrestart_diag308_psw
+	og	%r1,0(%r2)
+	stg	%r1,0(%r0)
+	lghi	%r0,0
+	diag	%r0,%r0,0x308
+restart_entry:
+	lhi	%r1,1
+	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE
+	sam64
+	BR_EX_DMA_r14
+ENDPROC(_swsusp_reset_dma)
+
+/*
+ * void _diag308_reset_dma(void)
+ *
+ * Calls diag 308 subcode 1 and continues execution
+ */
+ENTRY(_diag308_reset_dma)
+	larl	%r4,.Lctlregs		# Save control registers
+	stctg	%c0,%c15,0(%r4)
+	lg	%r2,0(%r4)		# Disable lowcore protection
+	nilh	%r2,0xefff
+	larl	%r4,.Lctlreg0
+	stg	%r2,0(%r4)
+	lctlg	%c0,%c0,0(%r4)
+	larl	%r4,.Lfpctl		# Floating point control register
+	stfpc	0(%r4)
+	larl	%r4,.Lprefix		# Save prefix register
+	stpx	0(%r4)
+	larl	%r4,.Lprefix_zero	# Set prefix register to 0
+	spx	0(%r4)
+	larl	%r4,.Lcontinue_psw	# Save PSW flags
+	epsw	%r2,%r3
+	stm	%r2,%r3,0(%r4)
+	larl	%r4,restart_part2	# Setup restart PSW at absolute 0
+	larl	%r3,.Lrestart_diag308_psw
+	og	%r4,0(%r3)		# Save PSW
+	lghi	%r3,0
+	sturg	%r4,%r3			# Use sturg, because of large pages
+	lghi	%r1,1
+	lghi	%r0,0
+	diag	%r0,%r1,0x308
+restart_part2:
+	lhi	%r0,0			# Load r0 with zero
+	lhi	%r1,2			# Use mode 2 = ESAME (dump)
+	sigp	%r1,%r0,SIGP_SET_ARCHITECTURE	# Switch to ESAME mode
+	sam64				# Switch to 64 bit addressing mode
+	larl	%r4,.Lctlregs		# Restore control registers
+	lctlg	%c0,%c15,0(%r4)
+	larl	%r4,.Lfpctl		# Restore floating point ctl register
+	lfpc	0(%r4)
+	larl	%r4,.Lprefix		# Restore prefix register
+	spx	0(%r4)
+	larl	%r4,.Lcontinue_psw	# Restore PSW flags
+	lpswe	0(%r4)
+.Lcontinue:
+	BR_EX_DMA_r14
+ENDPROC(_diag308_reset_dma)
+
+	.section .dma.data,"aw",@progbits
+.align	8
+.Lrestart_diag308_psw:
+	.long	0x00080000,0x80000000
+
+.align 8
+.Lcontinue_psw:
+	.quad	0,.Lcontinue
+
+.align 8
+.Lctlreg0:
+	.quad	0
+.Lctlregs:
+	.rept	16
+	.quad	0
+	.endr
+.Lfpctl:
+	.long	0
+.Lprefix:
+	.long	0
+.Lprefix_zero:
+	.long	0
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
new file mode 100644
index 0000000..ed007f4
--- /dev/null
+++ b/arch/s390/boot/uv.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <asm/uv.h>
+#include <asm/facility.h>
+#include <asm/sections.h>
+
+int __bootdata_preserved(prot_virt_guest);
+
+void uv_query_info(void)
+{
+	struct uv_cb_qui uvcb = {
+		.header.cmd = UVC_CMD_QUI,
+		.header.len = sizeof(uvcb)
+	};
+
+	if (!test_facility(158))
+		return;
+
+	if (uv_call(0, (uint64_t)&uvcb))
+		return;
+
+	if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
+	    test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
+		prot_virt_guest = 1;
+}
diff --git a/arch/s390/boot/version.c b/arch/s390/boot/version.c
new file mode 100644
index 0000000..d32e58b
--- /dev/null
+++ b/arch/s390/boot/version.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <generated/utsrelease.h>
+#include <generated/compile.h>
+#include "boot.h"
+
+const char kernel_version[] = UTS_RELEASE
+	" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " UTS_VERSION;