David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <linux/kasan.h> |
| 3 | #include <linux/sched/task.h> |
| 4 | #include <linux/memblock.h> |
| 5 | #include <asm/pgalloc.h> |
| 6 | #include <asm/pgtable.h> |
| 7 | #include <asm/kasan.h> |
| 8 | #include <asm/mem_detect.h> |
| 9 | #include <asm/processor.h> |
| 10 | #include <asm/sclp.h> |
| 11 | #include <asm/facility.h> |
| 12 | #include <asm/sections.h> |
| 13 | #include <asm/setup.h> |
| 14 | |
| 15 | static unsigned long segment_pos __initdata; |
| 16 | static unsigned long segment_low __initdata; |
| 17 | static unsigned long pgalloc_pos __initdata; |
| 18 | static unsigned long pgalloc_low __initdata; |
| 19 | static unsigned long pgalloc_freeable __initdata; |
| 20 | static bool has_edat __initdata; |
| 21 | static bool has_nx __initdata; |
| 22 | |
| 23 | #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) |
| 24 | |
| 25 | static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); |
| 26 | |
| 27 | static void __init kasan_early_panic(const char *reason) |
| 28 | { |
| 29 | sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n"); |
| 30 | sclp_early_printk(reason); |
| 31 | disabled_wait(); |
| 32 | } |
| 33 | |
| 34 | static void * __init kasan_early_alloc_segment(void) |
| 35 | { |
| 36 | segment_pos -= _SEGMENT_SIZE; |
| 37 | |
| 38 | if (segment_pos < segment_low) |
| 39 | kasan_early_panic("out of memory during initialisation\n"); |
| 40 | |
| 41 | return (void *)segment_pos; |
| 42 | } |
| 43 | |
| 44 | static void * __init kasan_early_alloc_pages(unsigned int order) |
| 45 | { |
| 46 | pgalloc_pos -= (PAGE_SIZE << order); |
| 47 | |
| 48 | if (pgalloc_pos < pgalloc_low) |
| 49 | kasan_early_panic("out of memory during initialisation\n"); |
| 50 | |
| 51 | return (void *)pgalloc_pos; |
| 52 | } |
| 53 | |
| 54 | static void * __init kasan_early_crst_alloc(unsigned long val) |
| 55 | { |
| 56 | unsigned long *table; |
| 57 | |
| 58 | table = kasan_early_alloc_pages(CRST_ALLOC_ORDER); |
| 59 | if (table) |
| 60 | crst_table_init(table, val); |
| 61 | return table; |
| 62 | } |
| 63 | |
| 64 | static pte_t * __init kasan_early_pte_alloc(void) |
| 65 | { |
| 66 | static void *pte_leftover; |
| 67 | pte_t *pte; |
| 68 | |
| 69 | BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE); |
| 70 | |
| 71 | if (!pte_leftover) { |
| 72 | pte_leftover = kasan_early_alloc_pages(0); |
| 73 | pte = pte_leftover + _PAGE_TABLE_SIZE; |
| 74 | } else { |
| 75 | pte = pte_leftover; |
| 76 | pte_leftover = NULL; |
| 77 | } |
| 78 | memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); |
| 79 | return pte; |
| 80 | } |
| 81 | |
| 82 | enum populate_mode { |
| 83 | POPULATE_ONE2ONE, |
| 84 | POPULATE_MAP, |
| 85 | POPULATE_ZERO_SHADOW |
| 86 | }; |
| 87 | static void __init kasan_early_vmemmap_populate(unsigned long address, |
| 88 | unsigned long end, |
| 89 | enum populate_mode mode) |
| 90 | { |
| 91 | unsigned long pgt_prot_zero, pgt_prot, sgt_prot; |
| 92 | pgd_t *pg_dir; |
| 93 | p4d_t *p4_dir; |
| 94 | pud_t *pu_dir; |
| 95 | pmd_t *pm_dir; |
| 96 | pte_t *pt_dir; |
| 97 | |
| 98 | pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); |
| 99 | if (!has_nx) |
| 100 | pgt_prot_zero &= ~_PAGE_NOEXEC; |
| 101 | pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); |
| 102 | sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC); |
| 103 | |
| 104 | while (address < end) { |
| 105 | pg_dir = pgd_offset_k(address); |
| 106 | if (pgd_none(*pg_dir)) { |
| 107 | if (mode == POPULATE_ZERO_SHADOW && |
| 108 | IS_ALIGNED(address, PGDIR_SIZE) && |
| 109 | end - address >= PGDIR_SIZE) { |
| 110 | pgd_populate(&init_mm, pg_dir, |
| 111 | kasan_early_shadow_p4d); |
| 112 | address = (address + PGDIR_SIZE) & PGDIR_MASK; |
| 113 | continue; |
| 114 | } |
| 115 | p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY); |
| 116 | pgd_populate(&init_mm, pg_dir, p4_dir); |
| 117 | } |
| 118 | |
| 119 | p4_dir = p4d_offset(pg_dir, address); |
| 120 | if (p4d_none(*p4_dir)) { |
| 121 | if (mode == POPULATE_ZERO_SHADOW && |
| 122 | IS_ALIGNED(address, P4D_SIZE) && |
| 123 | end - address >= P4D_SIZE) { |
| 124 | p4d_populate(&init_mm, p4_dir, |
| 125 | kasan_early_shadow_pud); |
| 126 | address = (address + P4D_SIZE) & P4D_MASK; |
| 127 | continue; |
| 128 | } |
| 129 | pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY); |
| 130 | p4d_populate(&init_mm, p4_dir, pu_dir); |
| 131 | } |
| 132 | |
| 133 | pu_dir = pud_offset(p4_dir, address); |
| 134 | if (pud_none(*pu_dir)) { |
| 135 | if (mode == POPULATE_ZERO_SHADOW && |
| 136 | IS_ALIGNED(address, PUD_SIZE) && |
| 137 | end - address >= PUD_SIZE) { |
| 138 | pud_populate(&init_mm, pu_dir, |
| 139 | kasan_early_shadow_pmd); |
| 140 | address = (address + PUD_SIZE) & PUD_MASK; |
| 141 | continue; |
| 142 | } |
| 143 | pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY); |
| 144 | pud_populate(&init_mm, pu_dir, pm_dir); |
| 145 | } |
| 146 | |
| 147 | pm_dir = pmd_offset(pu_dir, address); |
| 148 | if (pmd_none(*pm_dir)) { |
| 149 | if (mode == POPULATE_ZERO_SHADOW && |
| 150 | IS_ALIGNED(address, PMD_SIZE) && |
| 151 | end - address >= PMD_SIZE) { |
| 152 | pmd_populate(&init_mm, pm_dir, |
| 153 | kasan_early_shadow_pte); |
| 154 | address = (address + PMD_SIZE) & PMD_MASK; |
| 155 | continue; |
| 156 | } |
| 157 | /* the first megabyte of 1:1 is mapped with 4k pages */ |
| 158 | if (has_edat && address && end - address >= PMD_SIZE && |
| 159 | mode != POPULATE_ZERO_SHADOW) { |
| 160 | void *page; |
| 161 | |
| 162 | if (mode == POPULATE_ONE2ONE) { |
| 163 | page = (void *)address; |
| 164 | } else { |
| 165 | page = kasan_early_alloc_segment(); |
| 166 | memset(page, 0, _SEGMENT_SIZE); |
| 167 | } |
| 168 | pmd_val(*pm_dir) = __pa(page) | sgt_prot; |
| 169 | address = (address + PMD_SIZE) & PMD_MASK; |
| 170 | continue; |
| 171 | } |
| 172 | |
| 173 | pt_dir = kasan_early_pte_alloc(); |
| 174 | pmd_populate(&init_mm, pm_dir, pt_dir); |
| 175 | } else if (pmd_large(*pm_dir)) { |
| 176 | address = (address + PMD_SIZE) & PMD_MASK; |
| 177 | continue; |
| 178 | } |
| 179 | |
| 180 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 181 | if (pte_none(*pt_dir)) { |
| 182 | void *page; |
| 183 | |
| 184 | switch (mode) { |
| 185 | case POPULATE_ONE2ONE: |
| 186 | page = (void *)address; |
| 187 | pte_val(*pt_dir) = __pa(page) | pgt_prot; |
| 188 | break; |
| 189 | case POPULATE_MAP: |
| 190 | page = kasan_early_alloc_pages(0); |
| 191 | memset(page, 0, PAGE_SIZE); |
| 192 | pte_val(*pt_dir) = __pa(page) | pgt_prot; |
| 193 | break; |
| 194 | case POPULATE_ZERO_SHADOW: |
| 195 | page = kasan_early_shadow_page; |
| 196 | pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; |
| 197 | break; |
| 198 | } |
| 199 | } |
| 200 | address += PAGE_SIZE; |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type) |
| 205 | { |
| 206 | unsigned long asce_bits; |
| 207 | |
| 208 | asce_bits = asce_type | _ASCE_TABLE_LENGTH; |
| 209 | S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits; |
| 210 | S390_lowcore.user_asce = S390_lowcore.kernel_asce; |
| 211 | |
| 212 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
| 213 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); |
| 214 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
| 215 | } |
| 216 | |
| 217 | static void __init kasan_enable_dat(void) |
| 218 | { |
| 219 | psw_t psw; |
| 220 | |
| 221 | psw.mask = __extract_psw(); |
| 222 | psw_bits(psw).dat = 1; |
| 223 | psw_bits(psw).as = PSW_BITS_AS_HOME; |
| 224 | __load_psw_mask(psw.mask); |
| 225 | } |
| 226 | |
| 227 | static void __init kasan_early_detect_facilities(void) |
| 228 | { |
| 229 | if (test_facility(8)) { |
| 230 | has_edat = true; |
| 231 | __ctl_set_bit(0, 23); |
| 232 | } |
| 233 | if (!noexec_disabled && test_facility(130)) { |
| 234 | has_nx = true; |
| 235 | __ctl_set_bit(0, 20); |
| 236 | } |
| 237 | } |
| 238 | |
| 239 | void __init kasan_early_init(void) |
| 240 | { |
| 241 | unsigned long untracked_mem_end; |
| 242 | unsigned long shadow_alloc_size; |
| 243 | unsigned long initrd_end; |
| 244 | unsigned long asce_type; |
| 245 | unsigned long memsize; |
| 246 | unsigned long vmax; |
| 247 | unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); |
| 248 | pte_t pte_z; |
| 249 | pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY); |
| 250 | pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY); |
| 251 | p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY); |
| 252 | |
| 253 | kasan_early_detect_facilities(); |
| 254 | if (!has_nx) |
| 255 | pgt_prot &= ~_PAGE_NOEXEC; |
| 256 | pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot); |
| 257 | |
| 258 | memsize = get_mem_detect_end(); |
| 259 | if (!memsize) |
| 260 | kasan_early_panic("cannot detect physical memory size\n"); |
| 261 | /* respect mem= cmdline parameter */ |
| 262 | if (memory_end_set && memsize > memory_end) |
| 263 | memsize = memory_end; |
| 264 | if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE) |
| 265 | memsize = min(memsize, OLDMEM_SIZE); |
| 266 | memsize = min(memsize, KASAN_SHADOW_START); |
| 267 | |
| 268 | if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) { |
| 269 | /* 4 level paging */ |
| 270 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); |
| 271 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); |
| 272 | crst_table_init((unsigned long *)early_pg_dir, |
| 273 | _REGION2_ENTRY_EMPTY); |
| 274 | untracked_mem_end = vmax = _REGION1_SIZE; |
| 275 | asce_type = _ASCE_TYPE_REGION2; |
| 276 | } else { |
| 277 | /* 3 level paging */ |
| 278 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE)); |
| 279 | BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); |
| 280 | crst_table_init((unsigned long *)early_pg_dir, |
| 281 | _REGION3_ENTRY_EMPTY); |
| 282 | untracked_mem_end = vmax = _REGION2_SIZE; |
| 283 | asce_type = _ASCE_TYPE_REGION3; |
| 284 | } |
| 285 | |
| 286 | /* init kasan zero shadow */ |
| 287 | crst_table_init((unsigned long *)kasan_early_shadow_p4d, |
| 288 | p4d_val(p4d_z)); |
| 289 | crst_table_init((unsigned long *)kasan_early_shadow_pud, |
| 290 | pud_val(pud_z)); |
| 291 | crst_table_init((unsigned long *)kasan_early_shadow_pmd, |
| 292 | pmd_val(pmd_z)); |
| 293 | memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE); |
| 294 | |
| 295 | shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; |
| 296 | pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE); |
| 297 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) { |
| 298 | initrd_end = |
| 299 | round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE); |
| 300 | pgalloc_low = max(pgalloc_low, initrd_end); |
| 301 | } |
| 302 | |
| 303 | if (pgalloc_low + shadow_alloc_size > memsize) |
| 304 | kasan_early_panic("out of memory during initialisation\n"); |
| 305 | |
| 306 | if (has_edat) { |
| 307 | segment_pos = round_down(memsize, _SEGMENT_SIZE); |
| 308 | segment_low = segment_pos - shadow_alloc_size; |
| 309 | pgalloc_pos = segment_low; |
| 310 | } else { |
| 311 | pgalloc_pos = memsize; |
| 312 | } |
| 313 | init_mm.pgd = early_pg_dir; |
| 314 | /* |
| 315 | * Current memory layout: |
| 316 | * +- 0 -------------+ +- shadow start -+ |
| 317 | * | 1:1 ram mapping | /| 1/8 ram | |
| 318 | * +- end of ram ----+ / +----------------+ |
| 319 | * | ... gap ... |/ | kasan | |
| 320 | * +- shadow start --+ | zero | |
| 321 | * | 1/8 addr space | | page | |
| 322 | * +- shadow end -+ | mapping | |
| 323 | * | ... gap ... |\ | (untracked) | |
| 324 | * +- modules vaddr -+ \ +----------------+ |
| 325 | * | 2Gb | \| unmapped | allocated per module |
| 326 | * +-----------------+ +- shadow end ---+ |
| 327 | */ |
| 328 | /* populate kasan shadow (for identity mapping and zero page mapping) */ |
| 329 | kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); |
| 330 | if (IS_ENABLED(CONFIG_MODULES)) |
| 331 | untracked_mem_end = vmax - MODULES_LEN; |
| 332 | kasan_early_vmemmap_populate(__sha(max_physmem_end), |
| 333 | __sha(untracked_mem_end), |
| 334 | POPULATE_ZERO_SHADOW); |
| 335 | /* memory allocated for identity mapping structs will be freed later */ |
| 336 | pgalloc_freeable = pgalloc_pos; |
| 337 | /* populate identity mapping */ |
| 338 | kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE); |
| 339 | kasan_set_pgd(early_pg_dir, asce_type); |
| 340 | kasan_enable_dat(); |
| 341 | /* enable kasan */ |
| 342 | init_task.kasan_depth = 0; |
| 343 | memblock_reserve(pgalloc_pos, memsize - pgalloc_pos); |
| 344 | sclp_early_printk("KernelAddressSanitizer initialized\n"); |
| 345 | } |
| 346 | |
| 347 | void __init kasan_copy_shadow(pgd_t *pg_dir) |
| 348 | { |
| 349 | /* |
| 350 | * At this point we are still running on early pages setup early_pg_dir, |
| 351 | * while swapper_pg_dir has just been initialized with identity mapping. |
| 352 | * Carry over shadow memory region from early_pg_dir to swapper_pg_dir. |
| 353 | */ |
| 354 | |
| 355 | pgd_t *pg_dir_src; |
| 356 | pgd_t *pg_dir_dst; |
| 357 | p4d_t *p4_dir_src; |
| 358 | p4d_t *p4_dir_dst; |
| 359 | pud_t *pu_dir_src; |
| 360 | pud_t *pu_dir_dst; |
| 361 | |
| 362 | pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START); |
| 363 | pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START); |
| 364 | p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START); |
| 365 | p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START); |
| 366 | if (!p4d_folded(*p4_dir_src)) { |
| 367 | /* 4 level paging */ |
| 368 | memcpy(p4_dir_dst, p4_dir_src, |
| 369 | (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t)); |
| 370 | return; |
| 371 | } |
| 372 | /* 3 level paging */ |
| 373 | pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START); |
| 374 | pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START); |
| 375 | memcpy(pu_dir_dst, pu_dir_src, |
| 376 | (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t)); |
| 377 | } |
| 378 | |
| 379 | void __init kasan_free_early_identity(void) |
| 380 | { |
| 381 | memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos); |
| 382 | } |