blob: 5182e0836ca7190ea1faa0935fe0b7c37716e975 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0
2#include <linux/kasan.h>
3#include <linux/sched/task.h>
4#include <linux/memblock.h>
5#include <asm/pgalloc.h>
6#include <asm/pgtable.h>
7#include <asm/kasan.h>
8#include <asm/mem_detect.h>
9#include <asm/processor.h>
10#include <asm/sclp.h>
11#include <asm/facility.h>
12#include <asm/sections.h>
13#include <asm/setup.h>
14
15static unsigned long segment_pos __initdata;
16static unsigned long segment_low __initdata;
17static unsigned long pgalloc_pos __initdata;
18static unsigned long pgalloc_low __initdata;
19static unsigned long pgalloc_freeable __initdata;
20static bool has_edat __initdata;
21static bool has_nx __initdata;
22
23#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
24
25static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
26
27static void __init kasan_early_panic(const char *reason)
28{
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason);
31 disabled_wait();
32}
33
34static void * __init kasan_early_alloc_segment(void)
35{
36 segment_pos -= _SEGMENT_SIZE;
37
38 if (segment_pos < segment_low)
39 kasan_early_panic("out of memory during initialisation\n");
40
41 return (void *)segment_pos;
42}
43
44static void * __init kasan_early_alloc_pages(unsigned int order)
45{
46 pgalloc_pos -= (PAGE_SIZE << order);
47
48 if (pgalloc_pos < pgalloc_low)
49 kasan_early_panic("out of memory during initialisation\n");
50
51 return (void *)pgalloc_pos;
52}
53
54static void * __init kasan_early_crst_alloc(unsigned long val)
55{
56 unsigned long *table;
57
58 table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
59 if (table)
60 crst_table_init(table, val);
61 return table;
62}
63
64static pte_t * __init kasan_early_pte_alloc(void)
65{
66 static void *pte_leftover;
67 pte_t *pte;
68
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
70
71 if (!pte_leftover) {
72 pte_leftover = kasan_early_alloc_pages(0);
73 pte = pte_leftover + _PAGE_TABLE_SIZE;
74 } else {
75 pte = pte_leftover;
76 pte_leftover = NULL;
77 }
78 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
79 return pte;
80}
81
82enum populate_mode {
83 POPULATE_ONE2ONE,
84 POPULATE_MAP,
85 POPULATE_ZERO_SHADOW
86};
87static void __init kasan_early_vmemmap_populate(unsigned long address,
88 unsigned long end,
89 enum populate_mode mode)
90{
91 unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
92 pgd_t *pg_dir;
93 p4d_t *p4_dir;
94 pud_t *pu_dir;
95 pmd_t *pm_dir;
96 pte_t *pt_dir;
97
98 pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
99 if (!has_nx)
100 pgt_prot_zero &= ~_PAGE_NOEXEC;
101 pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
102 sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
103
Olivier Deprez0e641232021-09-23 10:07:05 +0200104 /*
105 * The first 1MB of 1:1 mapping is mapped with 4KB pages
106 */
David Brazdil0f672f62019-12-10 10:32:29 +0000107 while (address < end) {
108 pg_dir = pgd_offset_k(address);
109 if (pgd_none(*pg_dir)) {
110 if (mode == POPULATE_ZERO_SHADOW &&
111 IS_ALIGNED(address, PGDIR_SIZE) &&
112 end - address >= PGDIR_SIZE) {
113 pgd_populate(&init_mm, pg_dir,
114 kasan_early_shadow_p4d);
115 address = (address + PGDIR_SIZE) & PGDIR_MASK;
116 continue;
117 }
118 p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
119 pgd_populate(&init_mm, pg_dir, p4_dir);
120 }
121
122 p4_dir = p4d_offset(pg_dir, address);
123 if (p4d_none(*p4_dir)) {
124 if (mode == POPULATE_ZERO_SHADOW &&
125 IS_ALIGNED(address, P4D_SIZE) &&
126 end - address >= P4D_SIZE) {
127 p4d_populate(&init_mm, p4_dir,
128 kasan_early_shadow_pud);
129 address = (address + P4D_SIZE) & P4D_MASK;
130 continue;
131 }
132 pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
133 p4d_populate(&init_mm, p4_dir, pu_dir);
134 }
135
136 pu_dir = pud_offset(p4_dir, address);
137 if (pud_none(*pu_dir)) {
138 if (mode == POPULATE_ZERO_SHADOW &&
139 IS_ALIGNED(address, PUD_SIZE) &&
140 end - address >= PUD_SIZE) {
141 pud_populate(&init_mm, pu_dir,
142 kasan_early_shadow_pmd);
143 address = (address + PUD_SIZE) & PUD_MASK;
144 continue;
145 }
146 pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
147 pud_populate(&init_mm, pu_dir, pm_dir);
148 }
149
150 pm_dir = pmd_offset(pu_dir, address);
151 if (pmd_none(*pm_dir)) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200152 if (IS_ALIGNED(address, PMD_SIZE) &&
David Brazdil0f672f62019-12-10 10:32:29 +0000153 end - address >= PMD_SIZE) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200154 if (mode == POPULATE_ZERO_SHADOW) {
155 pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
156 address = (address + PMD_SIZE) & PMD_MASK;
157 continue;
158 } else if (has_edat && address) {
159 void *page;
David Brazdil0f672f62019-12-10 10:32:29 +0000160
Olivier Deprez0e641232021-09-23 10:07:05 +0200161 if (mode == POPULATE_ONE2ONE) {
162 page = (void *)address;
163 } else {
164 page = kasan_early_alloc_segment();
165 memset(page, 0, _SEGMENT_SIZE);
166 }
167 pmd_val(*pm_dir) = __pa(page) | sgt_prot;
168 address = (address + PMD_SIZE) & PMD_MASK;
169 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000170 }
David Brazdil0f672f62019-12-10 10:32:29 +0000171 }
David Brazdil0f672f62019-12-10 10:32:29 +0000172 pt_dir = kasan_early_pte_alloc();
173 pmd_populate(&init_mm, pm_dir, pt_dir);
174 } else if (pmd_large(*pm_dir)) {
175 address = (address + PMD_SIZE) & PMD_MASK;
176 continue;
177 }
178
179 pt_dir = pte_offset_kernel(pm_dir, address);
180 if (pte_none(*pt_dir)) {
181 void *page;
182
183 switch (mode) {
184 case POPULATE_ONE2ONE:
185 page = (void *)address;
186 pte_val(*pt_dir) = __pa(page) | pgt_prot;
187 break;
188 case POPULATE_MAP:
189 page = kasan_early_alloc_pages(0);
190 memset(page, 0, PAGE_SIZE);
191 pte_val(*pt_dir) = __pa(page) | pgt_prot;
192 break;
193 case POPULATE_ZERO_SHADOW:
194 page = kasan_early_shadow_page;
195 pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
196 break;
197 }
198 }
199 address += PAGE_SIZE;
200 }
201}
202
203static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
204{
205 unsigned long asce_bits;
206
207 asce_bits = asce_type | _ASCE_TABLE_LENGTH;
208 S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
209 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
210
211 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
212 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
213 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
214}
215
216static void __init kasan_enable_dat(void)
217{
218 psw_t psw;
219
220 psw.mask = __extract_psw();
221 psw_bits(psw).dat = 1;
222 psw_bits(psw).as = PSW_BITS_AS_HOME;
223 __load_psw_mask(psw.mask);
224}
225
226static void __init kasan_early_detect_facilities(void)
227{
228 if (test_facility(8)) {
229 has_edat = true;
230 __ctl_set_bit(0, 23);
231 }
232 if (!noexec_disabled && test_facility(130)) {
233 has_nx = true;
234 __ctl_set_bit(0, 20);
235 }
236}
237
238void __init kasan_early_init(void)
239{
240 unsigned long untracked_mem_end;
241 unsigned long shadow_alloc_size;
242 unsigned long initrd_end;
243 unsigned long asce_type;
244 unsigned long memsize;
245 unsigned long vmax;
246 unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
247 pte_t pte_z;
248 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
249 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
250 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
251
252 kasan_early_detect_facilities();
253 if (!has_nx)
254 pgt_prot &= ~_PAGE_NOEXEC;
255 pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
256
257 memsize = get_mem_detect_end();
258 if (!memsize)
259 kasan_early_panic("cannot detect physical memory size\n");
260 /* respect mem= cmdline parameter */
261 if (memory_end_set && memsize > memory_end)
262 memsize = memory_end;
263 if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
264 memsize = min(memsize, OLDMEM_SIZE);
265 memsize = min(memsize, KASAN_SHADOW_START);
266
267 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
268 /* 4 level paging */
269 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
270 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
271 crst_table_init((unsigned long *)early_pg_dir,
272 _REGION2_ENTRY_EMPTY);
273 untracked_mem_end = vmax = _REGION1_SIZE;
274 asce_type = _ASCE_TYPE_REGION2;
275 } else {
276 /* 3 level paging */
277 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
278 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
279 crst_table_init((unsigned long *)early_pg_dir,
280 _REGION3_ENTRY_EMPTY);
281 untracked_mem_end = vmax = _REGION2_SIZE;
282 asce_type = _ASCE_TYPE_REGION3;
283 }
284
285 /* init kasan zero shadow */
286 crst_table_init((unsigned long *)kasan_early_shadow_p4d,
287 p4d_val(p4d_z));
288 crst_table_init((unsigned long *)kasan_early_shadow_pud,
289 pud_val(pud_z));
290 crst_table_init((unsigned long *)kasan_early_shadow_pmd,
291 pmd_val(pmd_z));
292 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
293
294 shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
295 pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
296 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
297 initrd_end =
298 round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
299 pgalloc_low = max(pgalloc_low, initrd_end);
300 }
301
302 if (pgalloc_low + shadow_alloc_size > memsize)
303 kasan_early_panic("out of memory during initialisation\n");
304
305 if (has_edat) {
306 segment_pos = round_down(memsize, _SEGMENT_SIZE);
307 segment_low = segment_pos - shadow_alloc_size;
308 pgalloc_pos = segment_low;
309 } else {
310 pgalloc_pos = memsize;
311 }
312 init_mm.pgd = early_pg_dir;
313 /*
314 * Current memory layout:
315 * +- 0 -------------+ +- shadow start -+
316 * | 1:1 ram mapping | /| 1/8 ram |
317 * +- end of ram ----+ / +----------------+
318 * | ... gap ... |/ | kasan |
319 * +- shadow start --+ | zero |
320 * | 1/8 addr space | | page |
321 * +- shadow end -+ | mapping |
322 * | ... gap ... |\ | (untracked) |
323 * +- modules vaddr -+ \ +----------------+
324 * | 2Gb | \| unmapped | allocated per module
325 * +-----------------+ +- shadow end ---+
326 */
327 /* populate kasan shadow (for identity mapping and zero page mapping) */
328 kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
329 if (IS_ENABLED(CONFIG_MODULES))
330 untracked_mem_end = vmax - MODULES_LEN;
331 kasan_early_vmemmap_populate(__sha(max_physmem_end),
332 __sha(untracked_mem_end),
333 POPULATE_ZERO_SHADOW);
334 /* memory allocated for identity mapping structs will be freed later */
335 pgalloc_freeable = pgalloc_pos;
336 /* populate identity mapping */
337 kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
338 kasan_set_pgd(early_pg_dir, asce_type);
339 kasan_enable_dat();
340 /* enable kasan */
341 init_task.kasan_depth = 0;
342 memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
343 sclp_early_printk("KernelAddressSanitizer initialized\n");
344}
345
346void __init kasan_copy_shadow(pgd_t *pg_dir)
347{
348 /*
349 * At this point we are still running on early pages setup early_pg_dir,
350 * while swapper_pg_dir has just been initialized with identity mapping.
351 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
352 */
353
354 pgd_t *pg_dir_src;
355 pgd_t *pg_dir_dst;
356 p4d_t *p4_dir_src;
357 p4d_t *p4_dir_dst;
358 pud_t *pu_dir_src;
359 pud_t *pu_dir_dst;
360
361 pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
362 pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
363 p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
364 p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
365 if (!p4d_folded(*p4_dir_src)) {
366 /* 4 level paging */
367 memcpy(p4_dir_dst, p4_dir_src,
368 (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
369 return;
370 }
371 /* 3 level paging */
372 pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
373 pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
374 memcpy(pu_dir_dst, pu_dir_src,
375 (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
376}
377
378void __init kasan_free_early_identity(void)
379{
380 memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
381}