blob: f661f176966f5c2932fae08b0dc3e20606b61817 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
19#include <linux/errno.h>
20#include <linux/export.h>
21#include <linux/sched.h>
22#include <linux/sched/task.h>
23#include <linux/cpu.h>
24#include <linux/kernel.h>
25#include <linux/memblock.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
30#include <linux/random.h>
31#include <linux/user.h>
32#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
35#include <linux/init.h>
36#include <linux/initrd.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000037#include <linux/root_dev.h>
38#include <linux/console.h>
39#include <linux/kernel_stat.h>
40#include <linux/dma-contiguous.h>
41#include <linux/device.h>
42#include <linux/notifier.h>
43#include <linux/pfn.h>
44#include <linux/ctype.h>
45#include <linux/reboot.h>
46#include <linux/topology.h>
47#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
50#include <linux/compat.h>
David Brazdil0f672f62019-12-10 10:32:29 +000051#include <linux/start_kernel.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052
David Brazdil0f672f62019-12-10 10:32:29 +000053#include <asm/boot_data.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054#include <asm/ipl.h>
55#include <asm/facility.h>
56#include <asm/smp.h>
57#include <asm/mmu_context.h>
58#include <asm/cpcmd.h>
59#include <asm/lowcore.h>
60#include <asm/nmi.h>
61#include <asm/irq.h>
62#include <asm/page.h>
63#include <asm/ptrace.h>
64#include <asm/sections.h>
65#include <asm/ebcdic.h>
66#include <asm/diag.h>
67#include <asm/os_info.h>
68#include <asm/sclp.h>
David Brazdil0f672f62019-12-10 10:32:29 +000069#include <asm/stacktrace.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070#include <asm/sysinfo.h>
71#include <asm/numa.h>
72#include <asm/alternative.h>
73#include <asm/nospec-branch.h>
David Brazdil0f672f62019-12-10 10:32:29 +000074#include <asm/mem_detect.h>
75#include <asm/uv.h>
Olivier Deprez0e641232021-09-23 10:07:05 +020076#include <asm/asm-offsets.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077#include "entry.h"
78
79/*
80 * Machine setup..
81 */
82unsigned int console_mode = 0;
83EXPORT_SYMBOL(console_mode);
84
85unsigned int console_devno = -1;
86EXPORT_SYMBOL(console_devno);
87
88unsigned int console_irq = -1;
89EXPORT_SYMBOL(console_irq);
90
91unsigned long elf_hwcap __read_mostly = 0;
92char elf_platform[ELF_PLATFORM_SIZE];
93
94unsigned long int_hwcap = 0;
95
David Brazdil0f672f62019-12-10 10:32:29 +000096#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
97int __bootdata_preserved(prot_virt_guest);
98#endif
99
100int __bootdata(noexec_disabled);
101int __bootdata(memory_end_set);
102unsigned long __bootdata(memory_end);
103unsigned long __bootdata(vmalloc_size);
104unsigned long __bootdata(max_physmem_end);
105struct mem_detect_info __bootdata(mem_detect);
106
107struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
108struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
109unsigned long __bootdata_preserved(__swsusp_reset_dma);
110unsigned long __bootdata_preserved(__stext_dma);
111unsigned long __bootdata_preserved(__etext_dma);
112unsigned long __bootdata_preserved(__sdma);
113unsigned long __bootdata_preserved(__edma);
114unsigned long __bootdata_preserved(__kaslr_offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115
116unsigned long VMALLOC_START;
117EXPORT_SYMBOL(VMALLOC_START);
118
119unsigned long VMALLOC_END;
120EXPORT_SYMBOL(VMALLOC_END);
121
122struct page *vmemmap;
123EXPORT_SYMBOL(vmemmap);
124
125unsigned long MODULES_VADDR;
126unsigned long MODULES_END;
127
128/* An array with a pointer to the lowcore of every CPU. */
129struct lowcore *lowcore_ptr[NR_CPUS];
130EXPORT_SYMBOL(lowcore_ptr);
131
132/*
133 * This is set up by the setup-routine at boot-time
134 * for S390 need to find out, what we have to setup
135 * using address 0x10400 ...
136 */
137
138#include <asm/setup.h>
139
140/*
141 * condev= and conmode= setup parameter.
142 */
143
144static int __init condev_setup(char *str)
145{
146 int vdev;
147
148 vdev = simple_strtoul(str, &str, 0);
149 if (vdev >= 0 && vdev < 65536) {
150 console_devno = vdev;
151 console_irq = -1;
152 }
153 return 1;
154}
155
156__setup("condev=", condev_setup);
157
158static void __init set_preferred_console(void)
159{
160 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
161 add_preferred_console("ttyS", 0, NULL);
162 else if (CONSOLE_IS_3270)
163 add_preferred_console("tty3270", 0, NULL);
164 else if (CONSOLE_IS_VT220)
Olivier Deprez0e641232021-09-23 10:07:05 +0200165 add_preferred_console("ttysclp", 0, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166 else if (CONSOLE_IS_HVC)
167 add_preferred_console("hvc", 0, NULL);
168}
169
170static int __init conmode_setup(char *str)
171{
172#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
David Brazdil0f672f62019-12-10 10:32:29 +0000173 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 SET_CONSOLE_SCLP;
175#endif
176#if defined(CONFIG_TN3215_CONSOLE)
David Brazdil0f672f62019-12-10 10:32:29 +0000177 if (!strcmp(str, "3215"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178 SET_CONSOLE_3215;
179#endif
180#if defined(CONFIG_TN3270_CONSOLE)
David Brazdil0f672f62019-12-10 10:32:29 +0000181 if (!strcmp(str, "3270"))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 SET_CONSOLE_3270;
183#endif
184 set_preferred_console();
185 return 1;
186}
187
188__setup("conmode=", conmode_setup);
189
190static void __init conmode_default(void)
191{
192 char query_buffer[1024];
193 char *ptr;
194
195 if (MACHINE_IS_VM) {
196 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
197 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
198 ptr = strstr(query_buffer, "SUBCHANNEL =");
199 console_irq = simple_strtoul(ptr + 13, NULL, 16);
200 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
201 ptr = strstr(query_buffer, "CONMODE");
202 /*
203 * Set the conmode to 3215 so that the device recognition
204 * will set the cu_type of the console to 3215. If the
205 * conmode is 3270 and we don't set it back then both
206 * 3215 and the 3270 driver will try to access the console
207 * device (3215 as console and 3270 as normal tty).
208 */
209 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
210 if (ptr == NULL) {
211#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
212 SET_CONSOLE_SCLP;
213#endif
214 return;
215 }
David Brazdil0f672f62019-12-10 10:32:29 +0000216 if (str_has_prefix(ptr + 8, "3270")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000217#if defined(CONFIG_TN3270_CONSOLE)
218 SET_CONSOLE_3270;
219#elif defined(CONFIG_TN3215_CONSOLE)
220 SET_CONSOLE_3215;
221#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
222 SET_CONSOLE_SCLP;
223#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000224 } else if (str_has_prefix(ptr + 8, "3215")) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225#if defined(CONFIG_TN3215_CONSOLE)
226 SET_CONSOLE_3215;
227#elif defined(CONFIG_TN3270_CONSOLE)
228 SET_CONSOLE_3270;
229#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
230 SET_CONSOLE_SCLP;
231#endif
232 }
233 } else if (MACHINE_IS_KVM) {
234 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
235 SET_CONSOLE_VT220;
236 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
237 SET_CONSOLE_SCLP;
238 else
239 SET_CONSOLE_HVC;
240 } else {
241#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
242 SET_CONSOLE_SCLP;
243#endif
244 }
245 if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
246 conswitchp = &dummy_con;
247}
248
249#ifdef CONFIG_CRASH_DUMP
250static void __init setup_zfcpdump(void)
251{
252 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
253 return;
254 if (OLDMEM_BASE)
255 return;
256 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
257 console_loglevel = 2;
258}
259#else
260static inline void setup_zfcpdump(void) {}
261#endif /* CONFIG_CRASH_DUMP */
262
263 /*
264 * Reboot, halt and power_off stubs. They just call _machine_restart,
265 * _machine_halt or _machine_power_off.
266 */
267
268void machine_restart(char *command)
269{
270 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
271 /*
272 * Only unblank the console if we are called in enabled
273 * context or a bust_spinlocks cleared the way for us.
274 */
275 console_unblank();
276 _machine_restart(command);
277}
278
279void machine_halt(void)
280{
281 if (!in_interrupt() || oops_in_progress)
282 /*
283 * Only unblank the console if we are called in enabled
284 * context or a bust_spinlocks cleared the way for us.
285 */
286 console_unblank();
287 _machine_halt();
288}
289
290void machine_power_off(void)
291{
292 if (!in_interrupt() || oops_in_progress)
293 /*
294 * Only unblank the console if we are called in enabled
295 * context or a bust_spinlocks cleared the way for us.
296 */
297 console_unblank();
298 _machine_power_off();
299}
300
301/*
302 * Dummy power off function.
303 */
304void (*pm_power_off)(void) = machine_power_off;
305EXPORT_SYMBOL_GPL(pm_power_off);
306
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000307void *restart_stack __section(.data);
308
David Brazdil0f672f62019-12-10 10:32:29 +0000309unsigned long stack_alloc(void)
310{
311#ifdef CONFIG_VMAP_STACK
312 return (unsigned long)
313 __vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
314 VMALLOC_START, VMALLOC_END,
315 THREADINFO_GFP,
316 PAGE_KERNEL, 0, NUMA_NO_NODE,
317 __builtin_return_address(0));
318#else
319 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
320#endif
321}
322
323void stack_free(unsigned long stack)
324{
325#ifdef CONFIG_VMAP_STACK
326 vfree((void *) stack);
327#else
328 free_pages(stack, THREAD_SIZE_ORDER);
329#endif
330}
331
332int __init arch_early_irq_init(void)
333{
334 unsigned long stack;
335
336 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
337 if (!stack)
338 panic("Couldn't allocate async stack");
339 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
340 return 0;
341}
342
343static int __init async_stack_realloc(void)
344{
345 unsigned long old, new;
346
347 old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
348 new = stack_alloc();
349 if (!new)
350 panic("Couldn't allocate async stack");
351 S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
352 free_pages(old, THREAD_SIZE_ORDER);
353 return 0;
354}
355early_initcall(async_stack_realloc);
356
357void __init arch_call_rest_init(void)
358{
David Brazdil0f672f62019-12-10 10:32:29 +0000359 unsigned long stack;
360
361 stack = stack_alloc();
362 if (!stack)
363 panic("Couldn't allocate kernel stack");
364 current->stack = (void *) stack;
365#ifdef CONFIG_VMAP_STACK
366 current->stack_vm_area = (void *) stack;
367#endif
368 set_task_stack_end_magic(current);
369 stack += STACK_INIT_OFFSET;
370 S390_lowcore.kernel_stack = stack;
Olivier Deprez0e641232021-09-23 10:07:05 +0200371 CALL_ON_STACK_NORETURN(rest_init, stack);
David Brazdil0f672f62019-12-10 10:32:29 +0000372}
373
374static void __init setup_lowcore_dat_off(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375{
376 struct lowcore *lc;
377
378 /*
379 * Setup lowcore for boot cpu
380 */
381 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
David Brazdil0f672f62019-12-10 10:32:29 +0000382 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
383 if (!lc)
384 panic("%s: Failed to allocate %zu bytes align=%zx\n",
385 __func__, sizeof(*lc), sizeof(*lc));
386
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387 lc->restart_psw.mask = PSW_KERNEL_BITS;
388 lc->restart_psw.addr = (unsigned long) restart_int_handler;
David Brazdil0f672f62019-12-10 10:32:29 +0000389 lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000390 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
391 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
David Brazdil0f672f62019-12-10 10:32:29 +0000392 PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 lc->svc_new_psw.addr = (unsigned long) system_call;
David Brazdil0f672f62019-12-10 10:32:29 +0000394 lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
396 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
397 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
David Brazdil0f672f62019-12-10 10:32:29 +0000398 lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399 lc->io_new_psw.addr = (unsigned long) io_int_handler;
400 lc->clock_comparator = clock_comparator_max;
David Brazdil0f672f62019-12-10 10:32:29 +0000401 lc->nodat_stack = ((unsigned long) &init_thread_union)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000402 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 lc->current_task = (unsigned long)&init_task;
404 lc->lpp = LPP_MAGIC;
405 lc->machine_flags = S390_lowcore.machine_flags;
406 lc->preempt_count = S390_lowcore.preempt_count;
407 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
408 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
409 sizeof(lc->stfle_fac_list));
410 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
411 sizeof(lc->alt_stfle_fac_list));
412 nmi_alloc_boot_cpu(lc);
413 vdso_alloc_boot_cpu(lc);
414 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
415 lc->async_enter_timer = S390_lowcore.async_enter_timer;
416 lc->exit_timer = S390_lowcore.exit_timer;
417 lc->user_timer = S390_lowcore.user_timer;
418 lc->system_timer = S390_lowcore.system_timer;
419 lc->steal_timer = S390_lowcore.steal_timer;
420 lc->last_update_timer = S390_lowcore.last_update_timer;
421 lc->last_update_clock = S390_lowcore.last_update_clock;
422
David Brazdil0f672f62019-12-10 10:32:29 +0000423 /*
424 * Allocate the global restart stack which is the same for
425 * all CPUs in cast *one* of them does a PSW restart.
426 */
427 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
428 if (!restart_stack)
429 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
430 __func__, THREAD_SIZE, THREAD_SIZE);
431 restart_stack += STACK_INIT_OFFSET;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000432
433 /*
434 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
435 * restart data to the absolute zero lowcore. This is necessary if
436 * PSW restart is done on an offline CPU that has lowcore zero.
437 */
438 lc->restart_stack = (unsigned long) restart_stack;
439 lc->restart_fn = (unsigned long) do_restart;
440 lc->restart_data = 0;
441 lc->restart_source = -1UL;
442
443 /* Setup absolute zero lowcore */
444 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
445 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
446 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
447 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
448 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
449
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 lc->spinlock_lockval = arch_spin_lockval(0);
451 lc->spinlock_index = 0;
452 arch_spin_lock_setup(0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000453 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200454 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
455 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000456
457 set_prefix((u32)(unsigned long) lc);
458 lowcore_ptr[0] = lc;
459}
460
David Brazdil0f672f62019-12-10 10:32:29 +0000461static void __init setup_lowcore_dat_on(void)
462{
463 __ctl_clear_bit(0, 28);
464 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
465 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
466 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
467 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
468 __ctl_set_bit(0, 28);
469}
470
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471static struct resource code_resource = {
472 .name = "Kernel code",
473 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
474};
475
476static struct resource data_resource = {
477 .name = "Kernel data",
478 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
479};
480
481static struct resource bss_resource = {
482 .name = "Kernel bss",
483 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
484};
485
486static struct resource __initdata *standard_resources[] = {
487 &code_resource,
488 &data_resource,
489 &bss_resource,
490};
491
492static void __init setup_resources(void)
493{
494 struct resource *res, *std_res, *sub_res;
495 struct memblock_region *reg;
496 int j;
497
498 code_resource.start = (unsigned long) _text;
499 code_resource.end = (unsigned long) _etext - 1;
500 data_resource.start = (unsigned long) _etext;
501 data_resource.end = (unsigned long) _edata - 1;
502 bss_resource.start = (unsigned long) __bss_start;
503 bss_resource.end = (unsigned long) __bss_stop - 1;
504
505 for_each_memblock(memory, reg) {
David Brazdil0f672f62019-12-10 10:32:29 +0000506 res = memblock_alloc(sizeof(*res), 8);
507 if (!res)
508 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
509 __func__, sizeof(*res), 8);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000510 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
511
512 res->name = "System RAM";
513 res->start = reg->base;
514 res->end = reg->base + reg->size - 1;
515 request_resource(&iomem_resource, res);
516
517 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
518 std_res = standard_resources[j];
519 if (std_res->start < res->start ||
520 std_res->start > res->end)
521 continue;
522 if (std_res->end > res->end) {
David Brazdil0f672f62019-12-10 10:32:29 +0000523 sub_res = memblock_alloc(sizeof(*sub_res), 8);
524 if (!sub_res)
525 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
526 __func__, sizeof(*sub_res), 8);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000527 *sub_res = *std_res;
528 sub_res->end = res->end;
529 std_res->start = res->end + 1;
530 request_resource(res, sub_res);
531 } else {
532 request_resource(res, std_res);
533 }
534 }
535 }
536#ifdef CONFIG_CRASH_DUMP
537 /*
538 * Re-add removed crash kernel memory as reserved memory. This makes
539 * sure it will be mapped with the identity mapping and struct pages
540 * will be created, so it can be resized later on.
541 * However add it later since the crash kernel resource should not be
542 * part of the System RAM resource.
543 */
544 if (crashk_res.end) {
545 memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
546 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
547 insert_resource(&iomem_resource, &crashk_res);
548 }
549#endif
550}
551
552static void __init setup_memory_end(void)
553{
David Brazdil0f672f62019-12-10 10:32:29 +0000554 unsigned long vmax, tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000555
David Brazdil0f672f62019-12-10 10:32:29 +0000556 /* Choose kernel address space layout: 3 or 4 levels. */
557 if (IS_ENABLED(CONFIG_KASAN)) {
558 vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
559 ? _REGION1_SIZE
560 : _REGION2_SIZE;
561 } else {
562 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
563 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
564 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
565 vmax = _REGION2_SIZE; /* 3-level kernel page table */
566 else
567 vmax = _REGION1_SIZE; /* 4-level kernel page table */
568 }
569
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000570 /* module area is at the end of the kernel address space. */
571 MODULES_END = vmax;
572 MODULES_VADDR = MODULES_END - MODULES_LEN;
573 VMALLOC_END = MODULES_VADDR;
David Brazdil0f672f62019-12-10 10:32:29 +0000574 VMALLOC_START = VMALLOC_END - vmalloc_size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575
576 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
577 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
578 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
579 tmp = SECTION_ALIGN_UP(tmp);
580 tmp = VMALLOC_START - tmp * sizeof(struct page);
581 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
582 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
583 vmemmap = (struct page *) tmp;
584
585 /* Take care that memory_end is set and <= vmemmap */
David Brazdil0f672f62019-12-10 10:32:29 +0000586 memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
587#ifdef CONFIG_KASAN
588 /* fit in kasan shadow memory region between 1:1 and vmemmap */
589 memory_end = min(memory_end, KASAN_SHADOW_START);
590 vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
591#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000592 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
593 memblock_remove(memory_end, ULONG_MAX);
594
595 pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
596}
597
598#ifdef CONFIG_CRASH_DUMP
599
600/*
601 * When kdump is enabled, we have to ensure that no memory from
602 * the area [0 - crashkernel memory size] and
603 * [crashk_res.start - crashk_res.end] is set offline.
604 */
605static int kdump_mem_notifier(struct notifier_block *nb,
606 unsigned long action, void *data)
607{
608 struct memory_notify *arg = data;
609
610 if (action != MEM_GOING_OFFLINE)
611 return NOTIFY_OK;
612 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
613 return NOTIFY_BAD;
614 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
615 return NOTIFY_OK;
616 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
617 return NOTIFY_OK;
618 return NOTIFY_BAD;
619}
620
621static struct notifier_block kdump_mem_nb = {
622 .notifier_call = kdump_mem_notifier,
623};
624
625#endif
626
627/*
628 * Make sure that the area behind memory_end is protected
629 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200630static void __init reserve_memory_end(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000631{
David Brazdil0f672f62019-12-10 10:32:29 +0000632 if (memory_end_set)
633 memblock_reserve(memory_end, ULONG_MAX);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000634}
635
636/*
637 * Make sure that oldmem, where the dump is stored, is protected
638 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200639static void __init reserve_oldmem(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640{
641#ifdef CONFIG_CRASH_DUMP
642 if (OLDMEM_BASE)
643 /* Forget all memory above the running kdump system */
644 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
645#endif
646}
647
648/*
649 * Make sure that oldmem, where the dump is stored, is protected
650 */
Olivier Deprez0e641232021-09-23 10:07:05 +0200651static void __init remove_oldmem(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000652{
653#ifdef CONFIG_CRASH_DUMP
654 if (OLDMEM_BASE)
655 /* Forget all memory above the running kdump system */
656 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
657#endif
658}
659
660/*
661 * Reserve memory for kdump kernel to be loaded with kexec
662 */
663static void __init reserve_crashkernel(void)
664{
665#ifdef CONFIG_CRASH_DUMP
666 unsigned long long crash_base, crash_size;
667 phys_addr_t low, high;
668 int rc;
669
670 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
671 &crash_base);
672
673 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
674 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
675 if (rc || crash_size == 0)
676 return;
677
678 if (memblock.memory.regions[0].size < crash_size) {
679 pr_info("crashkernel reservation failed: %s\n",
680 "first memory chunk must be at least crashkernel size");
681 return;
682 }
683
684 low = crash_base ?: OLDMEM_BASE;
685 high = low + crash_size;
686 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
687 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
688 crash_base = low;
689 } else {
690 /* Find suitable area in free memory */
691 low = max_t(unsigned long, crash_size, sclp.hsa_size);
692 high = crash_base ? crash_base + crash_size : ULONG_MAX;
693
694 if (crash_base && crash_base < low) {
695 pr_info("crashkernel reservation failed: %s\n",
696 "crash_base too low");
697 return;
698 }
699 low = crash_base ?: low;
700 crash_base = memblock_find_in_range(low, high, crash_size,
701 KEXEC_CRASH_MEM_ALIGN);
702 }
703
704 if (!crash_base) {
705 pr_info("crashkernel reservation failed: %s\n",
706 "no suitable area found");
707 return;
708 }
709
710 if (register_memory_notifier(&kdump_mem_nb))
711 return;
712
713 if (!OLDMEM_BASE && MACHINE_IS_VM)
714 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
715 crashk_res.start = crash_base;
716 crashk_res.end = crash_base + crash_size - 1;
717 memblock_remove(crash_base, crash_size);
718 pr_info("Reserving %lluMB of memory at %lluMB "
719 "for crashkernel (System RAM: %luMB)\n",
720 crash_size >> 20, crash_base >> 20,
721 (unsigned long)memblock.memory.total_size >> 20);
722 os_info_crashkernel_add(crash_base, crash_size);
723#endif
724}
725
726/*
727 * Reserve the initrd from being used by memblock
728 */
729static void __init reserve_initrd(void)
730{
731#ifdef CONFIG_BLK_DEV_INITRD
732 if (!INITRD_START || !INITRD_SIZE)
733 return;
734 initrd_start = INITRD_START;
735 initrd_end = initrd_start + INITRD_SIZE;
736 memblock_reserve(INITRD_START, INITRD_SIZE);
737#endif
738}
739
740/*
David Brazdil0f672f62019-12-10 10:32:29 +0000741 * Reserve the memory area used to pass the certificate lists
742 */
743static void __init reserve_certificate_list(void)
744{
745 if (ipl_cert_list_addr)
746 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
747}
748
749static void __init reserve_mem_detect_info(void)
750{
751 unsigned long start, size;
752
753 get_mem_detect_reserved(&start, &size);
754 if (size)
755 memblock_reserve(start, size);
756}
757
758static void __init free_mem_detect_info(void)
759{
760 unsigned long start, size;
761
762 get_mem_detect_reserved(&start, &size);
763 if (size)
764 memblock_free(start, size);
765}
766
767static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
768{
769 memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
770 start, start + size - 1);
771 memblock_add_range(&memblock.memory, start, size, 0, 0);
772 memblock_add_range(&memblock.physmem, start, size, 0, 0);
773}
774
775static const char * __init get_mem_info_source(void)
776{
777 switch (mem_detect.info_source) {
778 case MEM_DETECT_SCLP_STOR_INFO:
779 return "sclp storage info";
780 case MEM_DETECT_DIAG260:
781 return "diag260";
782 case MEM_DETECT_SCLP_READ_INFO:
783 return "sclp read info";
784 case MEM_DETECT_BIN_SEARCH:
785 return "binary search";
786 }
787 return "none";
788}
789
790static void __init memblock_add_mem_detect_info(void)
791{
792 unsigned long start, end;
793 int i;
794
795 memblock_dbg("physmem info source: %s (%hhd)\n",
796 get_mem_info_source(), mem_detect.info_source);
797 /* keep memblock lists close to the kernel */
798 memblock_set_bottom_up(true);
799 for_each_mem_detect_block(i, &start, &end)
800 memblock_physmem_add(start, end - start);
801 memblock_set_bottom_up(false);
802 memblock_dump_all();
803}
804
805/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000806 * Check for initrd being in usable memory
807 */
808static void __init check_initrd(void)
809{
810#ifdef CONFIG_BLK_DEV_INITRD
811 if (INITRD_START && INITRD_SIZE &&
812 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
813 pr_err("The initial RAM disk does not fit into the memory\n");
814 memblock_free(INITRD_START, INITRD_SIZE);
815 initrd_start = initrd_end = 0;
816 }
817#endif
818}
819
820/*
821 * Reserve memory used for lowcore/command line/kernel image.
822 */
823static void __init reserve_kernel(void)
824{
825 unsigned long start_pfn = PFN_UP(__pa(_end));
826
David Brazdil0f672f62019-12-10 10:32:29 +0000827 memblock_reserve(0, HEAD_END);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000828 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
829 - (unsigned long)_stext);
David Brazdil0f672f62019-12-10 10:32:29 +0000830 memblock_reserve(__sdma, __edma - __sdma);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831}
832
833static void __init setup_memory(void)
834{
835 struct memblock_region *reg;
836
837 /*
838 * Init storage key for present memory
839 */
840 for_each_memblock(memory, reg) {
841 storage_key_init_range(reg->base, reg->base + reg->size);
842 }
843 psw_set_key(PAGE_DEFAULT_KEY);
844
845 /* Only cosmetics */
846 memblock_enforce_memory_limit(memblock_end_of_DRAM());
847}
848
849/*
850 * Setup hardware capabilities.
851 */
852static int __init setup_hwcaps(void)
853{
854 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
855 struct cpuid cpu_id;
856 int i;
857
858 /*
859 * The store facility list bits numbers as found in the principles
860 * of operation are numbered with bit 1UL<<31 as number 0 to
861 * bit 1UL<<0 as number 31.
862 * Bit 0: instructions named N3, "backported" to esa-mode
863 * Bit 2: z/Architecture mode is active
864 * Bit 7: the store-facility-list-extended facility is installed
865 * Bit 17: the message-security assist is installed
866 * Bit 19: the long-displacement facility is installed
867 * Bit 21: the extended-immediate facility is installed
868 * Bit 22: extended-translation facility 3 is installed
869 * Bit 30: extended-translation facility 3 enhancement facility
870 * These get translated to:
871 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
872 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
873 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
874 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
875 */
876 for (i = 0; i < 6; i++)
877 if (test_facility(stfl_bits[i]))
878 elf_hwcap |= 1UL << i;
879
880 if (test_facility(22) && test_facility(30))
881 elf_hwcap |= HWCAP_S390_ETF3EH;
882
883 /*
884 * Check for additional facilities with store-facility-list-extended.
885 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
886 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
887 * as stored by stfl, bits 32-xxx contain additional facilities.
888 * How many facility words are stored depends on the number of
889 * doublewords passed to the instruction. The additional facilities
890 * are:
891 * Bit 42: decimal floating point facility is installed
892 * Bit 44: perform floating point operation facility is installed
893 * translated to:
894 * HWCAP_S390_DFP bit 6 (42 && 44).
895 */
896 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
897 elf_hwcap |= HWCAP_S390_DFP;
898
899 /*
900 * Huge page support HWCAP_S390_HPAGE is bit 7.
901 */
902 if (MACHINE_HAS_EDAT1)
903 elf_hwcap |= HWCAP_S390_HPAGE;
904
905 /*
906 * 64-bit register support for 31-bit processes
907 * HWCAP_S390_HIGH_GPRS is bit 9.
908 */
909 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
910
911 /*
912 * Transactional execution support HWCAP_S390_TE is bit 10.
913 */
914 if (MACHINE_HAS_TE)
915 elf_hwcap |= HWCAP_S390_TE;
916
917 /*
918 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
919 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
920 * instead of facility bit 129.
921 */
922 if (MACHINE_HAS_VX) {
923 elf_hwcap |= HWCAP_S390_VXRS;
924 if (test_facility(134))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000925 elf_hwcap |= HWCAP_S390_VXRS_BCD;
Olivier Deprez0e641232021-09-23 10:07:05 +0200926 if (test_facility(135))
927 elf_hwcap |= HWCAP_S390_VXRS_EXT;
David Brazdil0f672f62019-12-10 10:32:29 +0000928 if (test_facility(148))
929 elf_hwcap |= HWCAP_S390_VXRS_EXT2;
930 if (test_facility(152))
931 elf_hwcap |= HWCAP_S390_VXRS_PDE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000932 }
David Brazdil0f672f62019-12-10 10:32:29 +0000933 if (test_facility(150))
934 elf_hwcap |= HWCAP_S390_SORT;
935 if (test_facility(151))
936 elf_hwcap |= HWCAP_S390_DFLT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937
938 /*
939 * Guarded storage support HWCAP_S390_GS is bit 12.
940 */
941 if (MACHINE_HAS_GS)
942 elf_hwcap |= HWCAP_S390_GS;
943
944 get_cpu_id(&cpu_id);
945 add_device_randomness(&cpu_id, sizeof(cpu_id));
946 switch (cpu_id.machine) {
947 case 0x2064:
948 case 0x2066:
949 default: /* Use "z900" as default for 64 bit kernels. */
950 strcpy(elf_platform, "z900");
951 break;
952 case 0x2084:
953 case 0x2086:
954 strcpy(elf_platform, "z990");
955 break;
956 case 0x2094:
957 case 0x2096:
958 strcpy(elf_platform, "z9-109");
959 break;
960 case 0x2097:
961 case 0x2098:
962 strcpy(elf_platform, "z10");
963 break;
964 case 0x2817:
965 case 0x2818:
966 strcpy(elf_platform, "z196");
967 break;
968 case 0x2827:
969 case 0x2828:
970 strcpy(elf_platform, "zEC12");
971 break;
972 case 0x2964:
973 case 0x2965:
974 strcpy(elf_platform, "z13");
975 break;
976 case 0x3906:
977 case 0x3907:
978 strcpy(elf_platform, "z14");
979 break;
David Brazdil0f672f62019-12-10 10:32:29 +0000980 case 0x8561:
981 case 0x8562:
982 strcpy(elf_platform, "z15");
983 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000984 }
985
986 /*
987 * Virtualization support HWCAP_INT_SIE is bit 0.
988 */
989 if (sclp.has_sief2)
990 int_hwcap |= HWCAP_INT_SIE;
991
992 return 0;
993}
994arch_initcall(setup_hwcaps);
995
996/*
997 * Add system information as device randomness
998 */
999static void __init setup_randomness(void)
1000{
1001 struct sysinfo_3_2_2 *vmms;
1002
David Brazdil0f672f62019-12-10 10:32:29 +00001003 vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
1004 PAGE_SIZE);
1005 if (!vmms)
1006 panic("Failed to allocate memory for sysinfo structure\n");
1007
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
1009 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
1010 memblock_free((unsigned long) vmms, PAGE_SIZE);
1011}
1012
1013/*
1014 * Find the correct size for the task_struct. This depends on
1015 * the size of the struct fpu at the end of the thread_struct
1016 * which is embedded in the task_struct.
1017 */
1018static void __init setup_task_size(void)
1019{
1020 int task_size = sizeof(struct task_struct);
1021
1022 if (!MACHINE_HAS_VX) {
1023 task_size -= sizeof(__vector128) * __NUM_VXRS;
1024 task_size += sizeof(freg_t) * __NUM_FPRS;
1025 }
1026 arch_task_struct_size = task_size;
1027}
1028
1029/*
David Brazdil0f672f62019-12-10 10:32:29 +00001030 * Issue diagnose 318 to set the control program name and
1031 * version codes.
1032 */
1033static void __init setup_control_program_code(void)
1034{
1035 union diag318_info diag318_info = {
1036 .cpnc = CPNC_LINUX,
1037 .cpvc_linux = 0,
1038 .cpvc_distro = {0},
1039 };
1040
1041 if (!sclp.has_diag318)
1042 return;
1043
1044 diag_stat_inc(DIAG_STAT_X318);
1045 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
1046}
1047
1048/*
1049 * Print the component list from the IPL report
1050 */
1051static void __init log_component_list(void)
1052{
1053 struct ipl_rb_component_entry *ptr, *end;
1054 char *str;
1055
1056 if (!early_ipl_comp_list_addr)
1057 return;
Olivier Deprez0e641232021-09-23 10:07:05 +02001058 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
David Brazdil0f672f62019-12-10 10:32:29 +00001059 pr_info("Linux is running with Secure-IPL enabled\n");
1060 else
1061 pr_info("Linux is running with Secure-IPL disabled\n");
1062 ptr = (void *) early_ipl_comp_list_addr;
1063 end = (void *) ptr + early_ipl_comp_list_size;
1064 pr_info("The IPL report contains the following components:\n");
1065 while (ptr < end) {
1066 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
1067 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
1068 str = "signed, verified";
1069 else
1070 str = "signed, verification failed";
1071 } else {
1072 str = "not signed";
1073 }
1074 pr_info("%016llx - %016llx (%s)\n",
1075 ptr->addr, ptr->addr + ptr->len, str);
1076 ptr++;
1077 }
1078}
1079
1080/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 * Setup function called from init/main.c just after the banner
1082 * was printed.
1083 */
1084
1085void __init setup_arch(char **cmdline_p)
1086{
1087 /*
1088 * print what head.S has found out about the machine
1089 */
1090 if (MACHINE_IS_VM)
1091 pr_info("Linux is running as a z/VM "
1092 "guest operating system in 64-bit mode\n");
1093 else if (MACHINE_IS_KVM)
1094 pr_info("Linux is running under KVM in 64-bit mode\n");
1095 else if (MACHINE_IS_LPAR)
1096 pr_info("Linux is running natively in 64-bit mode\n");
David Brazdil0f672f62019-12-10 10:32:29 +00001097 else
1098 pr_info("Linux is running as a guest in 64-bit mode\n");
1099
1100 log_component_list();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101
1102 /* Have one command line that is parsed and saved in /proc/cmdline */
1103 /* boot_command_line has been already set up in early.c */
1104 *cmdline_p = boot_command_line;
1105
1106 ROOT_DEV = Root_RAM0;
1107
David Brazdil0f672f62019-12-10 10:32:29 +00001108 init_mm.start_code = (unsigned long) _text;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001109 init_mm.end_code = (unsigned long) _etext;
1110 init_mm.end_data = (unsigned long) _edata;
1111 init_mm.brk = (unsigned long) _end;
1112
1113 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1114 nospec_auto_detect();
1115
Olivier Deprez0e641232021-09-23 10:07:05 +02001116 jump_label_init();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001117 parse_early_param();
1118#ifdef CONFIG_CRASH_DUMP
1119 /* Deactivate elfcorehdr= kernel parameter */
1120 elfcorehdr_addr = ELFCORE_ADDR_MAX;
1121#endif
1122
1123 os_info_init();
1124 setup_ipl();
1125 setup_task_size();
David Brazdil0f672f62019-12-10 10:32:29 +00001126 setup_control_program_code();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001127
1128 /* Do some memory reservations *before* memory is added to memblock */
1129 reserve_memory_end();
1130 reserve_oldmem();
1131 reserve_kernel();
1132 reserve_initrd();
David Brazdil0f672f62019-12-10 10:32:29 +00001133 reserve_certificate_list();
1134 reserve_mem_detect_info();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001135 memblock_allow_resize();
1136
1137 /* Get information about *all* installed memory */
David Brazdil0f672f62019-12-10 10:32:29 +00001138 memblock_add_mem_detect_info();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001139
David Brazdil0f672f62019-12-10 10:32:29 +00001140 free_mem_detect_info();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001141 remove_oldmem();
1142
1143 /*
1144 * Make sure all chunks are MAX_ORDER aligned so we don't need the
1145 * extra checks that HOLES_IN_ZONE would require.
1146 *
1147 * Is this still required?
1148 */
1149 memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
1150
1151 setup_memory_end();
1152 setup_memory();
1153 dma_contiguous_reserve(memory_end);
1154 vmcp_cma_reserve();
1155
1156 check_initrd();
1157 reserve_crashkernel();
1158#ifdef CONFIG_CRASH_DUMP
1159 /*
1160 * Be aware that smp_save_dump_cpus() triggers a system reset.
1161 * Therefore CPU and device initialization should be done afterwards.
1162 */
1163 smp_save_dump_cpus();
1164#endif
1165
1166 setup_resources();
David Brazdil0f672f62019-12-10 10:32:29 +00001167 setup_lowcore_dat_off();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001168 smp_fill_possible_mask();
1169 cpu_detect_mhz_feature();
1170 cpu_init();
1171 numa_setup();
1172 smp_detect_cpus();
1173 topology_init_early();
1174
1175 /*
1176 * Create kernel page tables and switch to virtual addressing.
1177 */
1178 paging_init();
1179
David Brazdil0f672f62019-12-10 10:32:29 +00001180 /*
1181 * After paging_init created the kernel page table, the new PSWs
1182 * in lowcore can now run with DAT enabled.
1183 */
1184 setup_lowcore_dat_on();
1185
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001186 /* Setup default console */
1187 conmode_default();
1188 set_preferred_console();
1189
1190 apply_alternative_instructions();
1191 if (IS_ENABLED(CONFIG_EXPOLINE))
1192 nospec_init_branches();
1193
1194 /* Setup zfcpdump support */
1195 setup_zfcpdump();
1196
1197 /* Add system specific data to the random pool */
1198 setup_randomness();
1199}