David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Debug helper to dump the current kernel pagetables of the system |
| 4 | * so that we can see what the various memory ranges are set to. |
| 5 | * |
| 6 | * Derived from x86 implementation: |
| 7 | * (C) Copyright 2008 Intel Corporation |
| 8 | * |
| 9 | * Author: Arjan van de Ven <arjan@linux.intel.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | #include <linux/debugfs.h> |
| 12 | #include <linux/fs.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/seq_file.h> |
| 15 | |
| 16 | #include <asm/domain.h> |
| 17 | #include <asm/fixmap.h> |
| 18 | #include <asm/memory.h> |
| 19 | #include <asm/pgtable.h> |
| 20 | #include <asm/ptdump.h> |
| 21 | |
| 22 | static struct addr_marker address_markers[] = { |
| 23 | { MODULES_VADDR, "Modules" }, |
| 24 | { PAGE_OFFSET, "Kernel Mapping" }, |
| 25 | { 0, "vmalloc() Area" }, |
| 26 | { VMALLOC_END, "vmalloc() End" }, |
| 27 | { FIXADDR_START, "Fixmap Area" }, |
| 28 | { VECTORS_BASE, "Vectors" }, |
| 29 | { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, |
| 30 | { -1, NULL }, |
| 31 | }; |
| 32 | |
| 33 | #define pt_dump_seq_printf(m, fmt, args...) \ |
| 34 | ({ \ |
| 35 | if (m) \ |
| 36 | seq_printf(m, fmt, ##args); \ |
| 37 | }) |
| 38 | |
| 39 | #define pt_dump_seq_puts(m, fmt) \ |
| 40 | ({ \ |
| 41 | if (m) \ |
| 42 | seq_printf(m, fmt); \ |
| 43 | }) |
| 44 | |
| 45 | struct pg_state { |
| 46 | struct seq_file *seq; |
| 47 | const struct addr_marker *marker; |
| 48 | unsigned long start_address; |
| 49 | unsigned level; |
| 50 | u64 current_prot; |
| 51 | bool check_wx; |
| 52 | unsigned long wx_pages; |
| 53 | const char *current_domain; |
| 54 | }; |
| 55 | |
| 56 | struct prot_bits { |
| 57 | u64 mask; |
| 58 | u64 val; |
| 59 | const char *set; |
| 60 | const char *clear; |
| 61 | bool ro_bit; |
| 62 | bool nx_bit; |
| 63 | }; |
| 64 | |
| 65 | static const struct prot_bits pte_bits[] = { |
| 66 | { |
| 67 | .mask = L_PTE_USER, |
| 68 | .val = L_PTE_USER, |
| 69 | .set = "USR", |
| 70 | .clear = " ", |
| 71 | }, { |
| 72 | .mask = L_PTE_RDONLY, |
| 73 | .val = L_PTE_RDONLY, |
| 74 | .set = "ro", |
| 75 | .clear = "RW", |
| 76 | .ro_bit = true, |
| 77 | }, { |
| 78 | .mask = L_PTE_XN, |
| 79 | .val = L_PTE_XN, |
| 80 | .set = "NX", |
| 81 | .clear = "x ", |
| 82 | .nx_bit = true, |
| 83 | }, { |
| 84 | .mask = L_PTE_SHARED, |
| 85 | .val = L_PTE_SHARED, |
| 86 | .set = "SHD", |
| 87 | .clear = " ", |
| 88 | }, { |
| 89 | .mask = L_PTE_MT_MASK, |
| 90 | .val = L_PTE_MT_UNCACHED, |
| 91 | .set = "SO/UNCACHED", |
| 92 | }, { |
| 93 | .mask = L_PTE_MT_MASK, |
| 94 | .val = L_PTE_MT_BUFFERABLE, |
| 95 | .set = "MEM/BUFFERABLE/WC", |
| 96 | }, { |
| 97 | .mask = L_PTE_MT_MASK, |
| 98 | .val = L_PTE_MT_WRITETHROUGH, |
| 99 | .set = "MEM/CACHED/WT", |
| 100 | }, { |
| 101 | .mask = L_PTE_MT_MASK, |
| 102 | .val = L_PTE_MT_WRITEBACK, |
| 103 | .set = "MEM/CACHED/WBRA", |
| 104 | #ifndef CONFIG_ARM_LPAE |
| 105 | }, { |
| 106 | .mask = L_PTE_MT_MASK, |
| 107 | .val = L_PTE_MT_MINICACHE, |
| 108 | .set = "MEM/MINICACHE", |
| 109 | #endif |
| 110 | }, { |
| 111 | .mask = L_PTE_MT_MASK, |
| 112 | .val = L_PTE_MT_WRITEALLOC, |
| 113 | .set = "MEM/CACHED/WBWA", |
| 114 | }, { |
| 115 | .mask = L_PTE_MT_MASK, |
| 116 | .val = L_PTE_MT_DEV_SHARED, |
| 117 | .set = "DEV/SHARED", |
| 118 | #ifndef CONFIG_ARM_LPAE |
| 119 | }, { |
| 120 | .mask = L_PTE_MT_MASK, |
| 121 | .val = L_PTE_MT_DEV_NONSHARED, |
| 122 | .set = "DEV/NONSHARED", |
| 123 | #endif |
| 124 | }, { |
| 125 | .mask = L_PTE_MT_MASK, |
| 126 | .val = L_PTE_MT_DEV_WC, |
| 127 | .set = "DEV/WC", |
| 128 | }, { |
| 129 | .mask = L_PTE_MT_MASK, |
| 130 | .val = L_PTE_MT_DEV_CACHED, |
| 131 | .set = "DEV/CACHED", |
| 132 | }, |
| 133 | }; |
| 134 | |
| 135 | static const struct prot_bits section_bits[] = { |
| 136 | #ifdef CONFIG_ARM_LPAE |
| 137 | { |
| 138 | .mask = PMD_SECT_USER, |
| 139 | .val = PMD_SECT_USER, |
| 140 | .set = "USR", |
| 141 | }, { |
| 142 | .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
| 143 | .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2, |
| 144 | .set = "ro", |
| 145 | .clear = "RW", |
| 146 | .ro_bit = true, |
| 147 | #elif __LINUX_ARM_ARCH__ >= 6 |
| 148 | { |
| 149 | .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 150 | .val = PMD_SECT_APX | PMD_SECT_AP_WRITE, |
| 151 | .set = " ro", |
| 152 | .ro_bit = true, |
| 153 | }, { |
| 154 | .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 155 | .val = PMD_SECT_AP_WRITE, |
| 156 | .set = " RW", |
| 157 | }, { |
| 158 | .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 159 | .val = PMD_SECT_AP_READ, |
| 160 | .set = "USR ro", |
| 161 | }, { |
| 162 | .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 163 | .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 164 | .set = "USR RW", |
| 165 | #else /* ARMv4/ARMv5 */ |
| 166 | /* These are approximate */ |
| 167 | { |
| 168 | .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 169 | .val = 0, |
| 170 | .set = " ro", |
| 171 | .ro_bit = true, |
| 172 | }, { |
| 173 | .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 174 | .val = PMD_SECT_AP_WRITE, |
| 175 | .set = " RW", |
| 176 | }, { |
| 177 | .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 178 | .val = PMD_SECT_AP_READ, |
| 179 | .set = "USR ro", |
| 180 | }, { |
| 181 | .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 182 | .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, |
| 183 | .set = "USR RW", |
| 184 | #endif |
| 185 | }, { |
| 186 | .mask = PMD_SECT_XN, |
| 187 | .val = PMD_SECT_XN, |
| 188 | .set = "NX", |
| 189 | .clear = "x ", |
| 190 | .nx_bit = true, |
| 191 | }, { |
| 192 | .mask = PMD_SECT_S, |
| 193 | .val = PMD_SECT_S, |
| 194 | .set = "SHD", |
| 195 | .clear = " ", |
| 196 | }, |
| 197 | }; |
| 198 | |
| 199 | struct pg_level { |
| 200 | const struct prot_bits *bits; |
| 201 | size_t num; |
| 202 | u64 mask; |
| 203 | const struct prot_bits *ro_bit; |
| 204 | const struct prot_bits *nx_bit; |
| 205 | }; |
| 206 | |
| 207 | static struct pg_level pg_level[] = { |
| 208 | { |
| 209 | }, { /* pgd */ |
| 210 | }, { /* pud */ |
| 211 | }, { /* pmd */ |
| 212 | .bits = section_bits, |
| 213 | .num = ARRAY_SIZE(section_bits), |
| 214 | }, { /* pte */ |
| 215 | .bits = pte_bits, |
| 216 | .num = ARRAY_SIZE(pte_bits), |
| 217 | }, |
| 218 | }; |
| 219 | |
| 220 | static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t num) |
| 221 | { |
| 222 | unsigned i; |
| 223 | |
| 224 | for (i = 0; i < num; i++, bits++) { |
| 225 | const char *s; |
| 226 | |
| 227 | if ((st->current_prot & bits->mask) == bits->val) |
| 228 | s = bits->set; |
| 229 | else |
| 230 | s = bits->clear; |
| 231 | |
| 232 | if (s) |
| 233 | pt_dump_seq_printf(st->seq, " %s", s); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | static void note_prot_wx(struct pg_state *st, unsigned long addr) |
| 238 | { |
| 239 | if (!st->check_wx) |
| 240 | return; |
| 241 | if ((st->current_prot & pg_level[st->level].ro_bit->mask) == |
| 242 | pg_level[st->level].ro_bit->val) |
| 243 | return; |
| 244 | if ((st->current_prot & pg_level[st->level].nx_bit->mask) == |
| 245 | pg_level[st->level].nx_bit->val) |
| 246 | return; |
| 247 | |
| 248 | WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n", |
| 249 | (void *)st->start_address); |
| 250 | |
| 251 | st->wx_pages += (addr - st->start_address) / PAGE_SIZE; |
| 252 | } |
| 253 | |
| 254 | static void note_page(struct pg_state *st, unsigned long addr, |
| 255 | unsigned int level, u64 val, const char *domain) |
| 256 | { |
| 257 | static const char units[] = "KMGTPE"; |
| 258 | u64 prot = val & pg_level[level].mask; |
| 259 | |
| 260 | if (!st->level) { |
| 261 | st->level = level; |
| 262 | st->current_prot = prot; |
| 263 | st->current_domain = domain; |
| 264 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); |
| 265 | } else if (prot != st->current_prot || level != st->level || |
| 266 | domain != st->current_domain || |
| 267 | addr >= st->marker[1].start_address) { |
| 268 | const char *unit = units; |
| 269 | unsigned long delta; |
| 270 | |
| 271 | if (st->current_prot) { |
| 272 | note_prot_wx(st, addr); |
| 273 | pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ", |
| 274 | st->start_address, addr); |
| 275 | |
| 276 | delta = (addr - st->start_address) >> 10; |
| 277 | while (!(delta & 1023) && unit[1]) { |
| 278 | delta >>= 10; |
| 279 | unit++; |
| 280 | } |
| 281 | pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit); |
| 282 | if (st->current_domain) |
| 283 | pt_dump_seq_printf(st->seq, " %s", |
| 284 | st->current_domain); |
| 285 | if (pg_level[st->level].bits) |
| 286 | dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); |
| 287 | pt_dump_seq_printf(st->seq, "\n"); |
| 288 | } |
| 289 | |
| 290 | if (addr >= st->marker[1].start_address) { |
| 291 | st->marker++; |
| 292 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", |
| 293 | st->marker->name); |
| 294 | } |
| 295 | st->start_address = addr; |
| 296 | st->current_prot = prot; |
| 297 | st->current_domain = domain; |
| 298 | st->level = level; |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start, |
| 303 | const char *domain) |
| 304 | { |
| 305 | pte_t *pte = pte_offset_kernel(pmd, 0); |
| 306 | unsigned long addr; |
| 307 | unsigned i; |
| 308 | |
| 309 | for (i = 0; i < PTRS_PER_PTE; i++, pte++) { |
| 310 | addr = start + i * PAGE_SIZE; |
| 311 | note_page(st, addr, 4, pte_val(*pte), domain); |
| 312 | } |
| 313 | } |
| 314 | |
| 315 | static const char *get_domain_name(pmd_t *pmd) |
| 316 | { |
| 317 | #ifndef CONFIG_ARM_LPAE |
| 318 | switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) { |
| 319 | case PMD_DOMAIN(DOMAIN_KERNEL): |
| 320 | return "KERNEL "; |
| 321 | case PMD_DOMAIN(DOMAIN_USER): |
| 322 | return "USER "; |
| 323 | case PMD_DOMAIN(DOMAIN_IO): |
| 324 | return "IO "; |
| 325 | case PMD_DOMAIN(DOMAIN_VECTORS): |
| 326 | return "VECTORS"; |
| 327 | default: |
| 328 | return "unknown"; |
| 329 | } |
| 330 | #endif |
| 331 | return NULL; |
| 332 | } |
| 333 | |
| 334 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) |
| 335 | { |
| 336 | pmd_t *pmd = pmd_offset(pud, 0); |
| 337 | unsigned long addr; |
| 338 | unsigned i; |
| 339 | const char *domain; |
| 340 | |
| 341 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { |
| 342 | addr = start + i * PMD_SIZE; |
| 343 | domain = get_domain_name(pmd); |
| 344 | if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) |
| 345 | note_page(st, addr, 3, pmd_val(*pmd), domain); |
| 346 | else |
| 347 | walk_pte(st, pmd, addr, domain); |
| 348 | |
| 349 | if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) { |
| 350 | addr += SECTION_SIZE; |
| 351 | pmd++; |
| 352 | domain = get_domain_name(pmd); |
| 353 | note_page(st, addr, 3, pmd_val(*pmd), domain); |
| 354 | } |
| 355 | } |
| 356 | } |
| 357 | |
| 358 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) |
| 359 | { |
| 360 | pud_t *pud = pud_offset(pgd, 0); |
| 361 | unsigned long addr; |
| 362 | unsigned i; |
| 363 | |
| 364 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { |
| 365 | addr = start + i * PUD_SIZE; |
| 366 | if (!pud_none(*pud)) { |
| 367 | walk_pmd(st, pud, addr); |
| 368 | } else { |
| 369 | note_page(st, addr, 2, pud_val(*pud), NULL); |
| 370 | } |
| 371 | } |
| 372 | } |
| 373 | |
| 374 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, |
| 375 | unsigned long start) |
| 376 | { |
| 377 | pgd_t *pgd = pgd_offset(mm, 0UL); |
| 378 | unsigned i; |
| 379 | unsigned long addr; |
| 380 | |
| 381 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { |
| 382 | addr = start + i * PGDIR_SIZE; |
| 383 | if (!pgd_none(*pgd)) { |
| 384 | walk_pud(st, pgd, addr); |
| 385 | } else { |
| 386 | note_page(st, addr, 1, pgd_val(*pgd), NULL); |
| 387 | } |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) |
| 392 | { |
| 393 | struct pg_state st = { |
| 394 | .seq = m, |
| 395 | .marker = info->markers, |
| 396 | .check_wx = false, |
| 397 | }; |
| 398 | |
| 399 | walk_pgd(&st, info->mm, info->base_addr); |
| 400 | note_page(&st, 0, 0, 0, NULL); |
| 401 | } |
| 402 | |
| 403 | static void ptdump_initialize(void) |
| 404 | { |
| 405 | unsigned i, j; |
| 406 | |
| 407 | for (i = 0; i < ARRAY_SIZE(pg_level); i++) |
| 408 | if (pg_level[i].bits) |
| 409 | for (j = 0; j < pg_level[i].num; j++) { |
| 410 | pg_level[i].mask |= pg_level[i].bits[j].mask; |
| 411 | if (pg_level[i].bits[j].ro_bit) |
| 412 | pg_level[i].ro_bit = &pg_level[i].bits[j]; |
| 413 | if (pg_level[i].bits[j].nx_bit) |
| 414 | pg_level[i].nx_bit = &pg_level[i].bits[j]; |
| 415 | } |
| 416 | |
| 417 | address_markers[2].start_address = VMALLOC_START; |
| 418 | } |
| 419 | |
| 420 | static struct ptdump_info kernel_ptdump_info = { |
| 421 | .mm = &init_mm, |
| 422 | .markers = address_markers, |
| 423 | .base_addr = 0, |
| 424 | }; |
| 425 | |
| 426 | void ptdump_check_wx(void) |
| 427 | { |
| 428 | struct pg_state st = { |
| 429 | .seq = NULL, |
| 430 | .marker = (struct addr_marker[]) { |
| 431 | { 0, NULL}, |
| 432 | { -1, NULL}, |
| 433 | }, |
| 434 | .check_wx = true, |
| 435 | }; |
| 436 | |
| 437 | walk_pgd(&st, &init_mm, 0); |
| 438 | note_page(&st, 0, 0, 0, NULL); |
| 439 | if (st.wx_pages) |
| 440 | pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", |
| 441 | st.wx_pages); |
| 442 | else |
| 443 | pr_info("Checked W+X mappings: passed, no W+X pages found\n"); |
| 444 | } |
| 445 | |
| 446 | static int ptdump_init(void) |
| 447 | { |
| 448 | ptdump_initialize(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 449 | ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); |
| 450 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 451 | } |
| 452 | __initcall(ptdump_init); |