Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 7cdea2e..5d67b81 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -3,6 +3,8 @@
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/kasan.h>
+#include <asm/kasan.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -17,18 +19,26 @@
IDENTITY_NR = 0,
KERNEL_START_NR,
KERNEL_END_NR,
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
VMEMMAP_NR,
VMALLOC_NR,
MODULES_NR,
};
static struct addr_marker address_markers[] = {
- [IDENTITY_NR] = {0, "Identity Mapping"},
- [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
- [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
- [VMEMMAP_NR] = {0, "vmemmap Area"},
- [VMALLOC_NR] = {0, "vmalloc Area"},
- [MODULES_NR] = {0, "Modules Area"},
+ [IDENTITY_NR] = {0, "Identity Mapping"},
+ [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
+ [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KASAN
+ [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"},
+ [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"},
+#endif
+ [VMEMMAP_NR] = {0, "vmemmap Area"},
+ [VMALLOC_NR] = {0, "vmalloc Area"},
+ [MODULES_NR] = {0, "Modules Area"},
{ -1, NULL }
};
@@ -80,7 +90,7 @@
} else if (prot != cur || level != st->level ||
st->current_address >= st->marker[1].start_address) {
/* Print the actual finished series */
- seq_printf(m, "0x%0*lx-0x%0*lx",
+ seq_printf(m, "0x%0*lx-0x%0*lx ",
width, st->start_address,
width, st->current_address);
delta = (st->current_address - st->start_address) >> 10;
@@ -90,7 +100,7 @@
}
seq_printf(m, "%9lu%c ", delta, *unit);
print_prot(m, st->current_prot, st->level);
- if (st->current_address >= st->marker[1].start_address) {
+ while (st->current_address >= st->marker[1].start_address) {
st->marker++;
seq_printf(m, "---[ %s ]---\n", st->marker->name);
}
@@ -100,6 +110,18 @@
}
}
+#ifdef CONFIG_KASAN
+static void note_kasan_early_shadow_page(struct seq_file *m,
+ struct pg_state *st)
+{
+ unsigned int prot;
+
+ prot = pte_val(*kasan_early_shadow_pte) &
+ (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
+ note_page(m, st, prot, 4);
+}
+#endif
+
/*
* The actual page table walker functions. In order to keep the
* implementation of print_prot() short, we only check and pass
@@ -132,9 +154,16 @@
pmd_t *pmd;
int i;
- for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+ if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_early_shadow_pmd)) {
+ note_kasan_early_shadow_page(m, st);
+ return;
+ }
+#endif
+
+ pmd = pmd_offset(pud, addr);
+ for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++, pmd++) {
st->current_address = addr;
- pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
prot = pmd_val(*pmd) &
@@ -156,9 +185,16 @@
pud_t *pud;
int i;
- for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+ if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_early_shadow_pud)) {
+ note_kasan_early_shadow_page(m, st);
+ return;
+ }
+#endif
+
+ pud = pud_offset(p4d, addr);
+ for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++, pud++) {
st->current_address = addr;
- pud = pud_offset(p4d, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
prot = pud_val(*pud) &
@@ -179,9 +215,16 @@
p4d_t *p4d;
int i;
- for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) {
+#ifdef CONFIG_KASAN
+ if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_early_shadow_p4d)) {
+ note_kasan_early_shadow_page(m, st);
+ return;
+ }
+#endif
+
+ p4d = p4d_offset(pgd, addr);
+ for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++, p4d++) {
st->current_address = addr;
- p4d = p4d_offset(pgd, addr);
if (!p4d_none(*p4d))
walk_pud_level(m, st, p4d, addr);
else