Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/init/main.c b/init/main.c
index 18f8f01..91f6ebb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/init/main.c
*
@@ -25,7 +26,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/console.h>
#include <linux/nmi.h>
@@ -105,7 +106,6 @@
static int kernel_init(void *);
extern void init_IRQ(void);
-extern void fork_init(void);
extern void radix_tree_init(void);
/*
@@ -374,11 +374,20 @@
*/
static void __init setup_command_line(char *command_line)
{
- saved_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
- initcall_command_line =
- memblock_virt_alloc(strlen(boot_command_line) + 1, 0);
- static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0);
+ size_t len = strlen(boot_command_line) + 1;
+
+ saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!saved_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ initcall_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!initcall_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
+ static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
+ if (!static_command_line)
+ panic("%s: Failed to allocate %zu bytes\n", __func__, len);
+
strcpy(saved_command_line, boot_command_line);
strcpy(static_command_line, command_line);
}
@@ -394,7 +403,7 @@
static __initdata DECLARE_COMPLETION(kthreadd_done);
-static noinline void __ref rest_init(void)
+noinline void __ref rest_init(void)
{
struct task_struct *tsk;
int pid;
@@ -424,7 +433,7 @@
/*
* Enable might_sleep() and smp_processor_id() checks.
- * They cannot be enabled earlier because with CONFIG_PREEMPT=y
+ * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
* kernel_thread() would trigger might_sleep() splats. With
* CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
* already, but it's stuck on the kthreadd_done completion.
@@ -496,6 +505,10 @@
void __init __weak mem_encrypt_init(void) { }
+void __init __weak poking_init(void) { }
+
+void __init __weak pgtable_cache_init(void) { }
+
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
@@ -507,6 +520,29 @@
}
#endif
+/* Report memory auto-initialization states for this boot. */
+static void __init report_meminit(void)
+{
+ const char *stack;
+
+ if (IS_ENABLED(CONFIG_INIT_STACK_ALL))
+ stack = "all";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL))
+ stack = "byref_all";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF))
+ stack = "byref";
+ else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER))
+ stack = "__user";
+ else
+ stack = "off";
+
+ pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n",
+ stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off",
+ want_init_on_free() ? "on" : "off");
+ if (want_init_on_free())
+ pr_info("mem auto-init: clearing system memory may take some time...\n");
+}
+
/*
* Set up kernel memory allocators
*/
@@ -517,9 +553,12 @@
* bigger than MAX_ORDER unless SPARSEMEM.
*/
page_ext_init_flatmem();
+ report_meminit();
mem_init();
kmem_cache_init();
+ kmemleak_init();
pgtable_init();
+ debug_objects_mem_init();
vmalloc_init();
ioremap_huge_init();
/* Should be run before the first non-init thread is created */
@@ -528,6 +567,11 @@
pti_init();
}
+void __init __weak arch_call_rest_init(void)
+{
+ rest_init();
+}
+
asmlinkage __visible void __init start_kernel(void)
{
char *command_line;
@@ -549,15 +593,8 @@
boot_cpu_init();
page_address_init();
pr_notice("%s", linux_banner);
+ early_security_init();
setup_arch(&command_line);
- /*
- * Set up the the initial canary and entropy after arch
- * and after adding latent and command line entropy.
- */
- add_latent_entropy();
- add_device_randomness(command_line, strlen(command_line));
- boot_init_stack_canary();
- mm_init_cpumask(&init_mm);
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -568,6 +605,8 @@
page_alloc_init();
pr_notice("Kernel command line: %s\n", boot_command_line);
+ /* parameters may set static keys */
+ jump_label_init();
parse_early_param();
after_dashes = parse_args("Booting kernel",
static_command_line, __start___param,
@@ -577,8 +616,6 @@
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
- jump_label_init();
-
/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
@@ -641,6 +678,20 @@
hrtimers_init();
softirq_init();
timekeeping_init();
+
+ /*
+ * For best initial stack canary entropy, prepare it after:
+ * - setup_arch() for any UEFI RNG entropy and boot cmdline access
+ * - timekeeping_init() for ktime entropy used in rand_initialize()
+ * - rand_initialize() to get any arch-specific entropy like RDRAND
+ * - add_latent_entropy() to get any latent entropy
+ * - adding command line entropy
+ */
+ rand_initialize();
+ add_latent_entropy();
+ add_device_randomness(command_line, strlen(command_line));
+ boot_init_stack_canary();
+
time_init();
printk_safe_init();
perf_event_init();
@@ -689,9 +740,6 @@
initrd_start = 0;
}
#endif
- page_ext_init();
- kmemleak_init();
- debug_objects_mem_init();
setup_per_cpu_pageset();
numa_policy_init();
acpi_early_init();
@@ -725,18 +773,15 @@
taskstats_init_early();
delayacct_init();
+ poking_init();
check_bugs();
acpi_subsystem_init();
arch_post_acpi_subsys_init();
sfi_init_late();
- if (efi_enabled(EFI_RUNTIME_SERVICES)) {
- efi_free_boot_services();
- }
-
/* Do the rest non-__init'ed, we're now alive */
- rest_init();
+ arch_call_rest_init();
}
/* Call all constructor functions linked into the kernel. */
@@ -768,8 +813,16 @@
str_entry = strsep(&str, ",");
if (str_entry) {
pr_debug("blacklisting initcall %s\n", str_entry);
- entry = alloc_bootmem(sizeof(*entry));
- entry->buf = alloc_bootmem(strlen(str_entry) + 1);
+ entry = memblock_alloc(sizeof(*entry),
+ SMP_CACHE_BYTES);
+ if (!entry)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, sizeof(*entry));
+ entry->buf = memblock_alloc(strlen(str_entry) + 1,
+ SMP_CACHE_BYTES);
+ if (!entry->buf)
+ panic("%s: Failed to allocate %zu bytes\n",
+ __func__, strlen(str_entry) + 1);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
@@ -824,7 +877,7 @@
{
ktime_t *calltime = (ktime_t *)data;
- printk(KERN_DEBUG "calling %pF @ %i\n", fn, task_pid_nr(current));
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
@@ -838,7 +891,7 @@
rettime = ktime_get();
delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n",
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
fn, ret, duration);
}
@@ -895,7 +948,7 @@
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
local_irq_enable();
}
- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
+ WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf);
add_latent_entropy();
return ret;
@@ -926,7 +979,7 @@
};
/* Keep these in sync with initcalls in include/linux/init.h */
-static char *initcall_level_names[] __initdata = {
+static const char *initcall_level_names[] __initdata = {
"pure",
"core",
"postcore",
@@ -971,7 +1024,6 @@
static void __init do_basic_setup(void)
{
cpuset_init_smp();
- shmem_init();
driver_init();
init_irq_proc();
do_ctors();
@@ -988,17 +1040,6 @@
do_one_initcall(initcall_from_entry(fn));
}
-/*
- * This function requests modules which should be loaded by default and is
- * called twice right after initrd is mounted and right before init is
- * exec'd. If such modules are on either initrd or rootfs, they will be
- * loaded before control is passed to userland.
- */
-void __init load_default_modules(void)
-{
- load_default_elevator_module();
-}
-
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
@@ -1038,12 +1079,12 @@
{
if (rodata_enabled) {
/*
- * load_module() results in W+X mappings, which are cleaned up
- * with call_rcu_sched(). Let's make sure that queued work is
+ * load_module() results in W+X mappings, which are cleaned
+ * up with call_rcu(). Let's make sure that queued work is
* flushed so that we don't hit false positives looking for
* insecure pages which are W+X.
*/
- rcu_barrier_sched();
+ rcu_barrier();
mark_rodata_ro();
rodata_test();
} else
@@ -1056,6 +1097,11 @@
}
#endif
+void __weak free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
static int __ref kernel_init(void *unused)
{
int ret;
@@ -1064,7 +1110,6 @@
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
ftrace_free_init_mem();
- jump_label_invalidate_initmem();
free_initmem();
mark_readonly();
@@ -1140,6 +1185,8 @@
sched_init_smp();
page_alloc_init_late();
+ /* Initialize page ext after all struct pages are initialized. */
+ page_ext_init();
do_basic_setup();
@@ -1173,5 +1220,4 @@
*/
integrity_load_keys();
- load_default_modules();
}