Update Linux to v5.10.109
Sourced from [1]
[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz
Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/kernel/module.c b/kernel/module.c
index 59d487b..5f44031 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4,6 +4,9 @@
Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
*/
+
+#define INCLUDE_VERMAGIC
+
#include <linux/export.h>
#include <linux/extable.h>
#include <linux/moduleloader.h>
@@ -15,6 +18,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/kernel.h>
+#include <linux/kernel_read_file.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
@@ -1411,7 +1415,7 @@
char *imported_namespace;
namespace = kernel_symbol_namespace(sym);
- if (namespace) {
+ if (namespace && namespace[0]) {
imported_namespace = get_modinfo(info, "import_ns");
while (imported_namespace) {
if (strcmp(namespace, imported_namespace) == 0)
@@ -1547,7 +1551,7 @@
struct module_sect_attrs {
struct attribute_group grp;
unsigned int nsections;
- struct module_sect_attr attrs[0];
+ struct module_sect_attr attrs[];
};
#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
@@ -1660,7 +1664,7 @@
struct module_notes_attrs {
struct kobject *dir;
unsigned int notes;
- struct bin_attribute attrs[0];
+ struct bin_attribute attrs[];
};
static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
@@ -1993,7 +1997,6 @@
mod_sysfs_fini(mod);
}
-#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
/*
* LKM RO/NX protection: protect module's text/ro-data
* from modification and any data from execution.
@@ -2007,6 +2010,14 @@
*
* These values are always page-aligned (as is base)
*/
+
+/*
+ * Since some arches are moving towards PAGE_KERNEL module allocations instead
+ * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
+ * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
+ * whether we are strict.
+ */
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
static void frob_text(const struct module_layout *layout,
int (*set_memory)(unsigned long start, int num_pages))
{
@@ -2016,6 +2027,15 @@
layout->text_size >> PAGE_SHIFT);
}
+static void module_enable_x(const struct module *mod)
+{
+ frob_text(&mod->core_layout, set_memory_x);
+ frob_text(&mod->init_layout, set_memory_x);
+}
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+static void module_enable_x(const struct module *mod) { }
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
+
#ifdef CONFIG_STRICT_MODULE_RWX
static void frob_rodata(const struct module_layout *layout,
int (*set_memory)(unsigned long start, int num_pages))
@@ -2047,20 +2067,7 @@
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
}
-/* livepatching wants to disable read-only so it can frob module. */
-void module_disable_ro(const struct module *mod)
-{
- if (!rodata_enabled)
- return;
-
- frob_text(&mod->core_layout, set_memory_rw);
- frob_rodata(&mod->core_layout, set_memory_rw);
- frob_ro_after_init(&mod->core_layout, set_memory_rw);
- frob_text(&mod->init_layout, set_memory_rw);
- frob_rodata(&mod->init_layout, set_memory_rw);
-}
-
-void module_enable_ro(const struct module *mod, bool after_init)
+static void module_enable_ro(const struct module *mod, bool after_init)
{
if (!rodata_enabled)
return;
@@ -2086,62 +2093,32 @@
frob_writable_data(&mod->init_layout, set_memory_nx);
}
-/* Iterate through all modules and set each module's text as RW */
-void set_all_modules_text_rw(void)
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
{
- struct module *mod;
+ const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
+ int i;
- if (!rodata_enabled)
- return;
-
- mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
- if (mod->state == MODULE_STATE_UNFORMED)
- continue;
-
- frob_text(&mod->core_layout, set_memory_rw);
- frob_text(&mod->init_layout, set_memory_rw);
+ for (i = 0; i < hdr->e_shnum; i++) {
+ if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
+ pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
+ mod->name, secstrings + sechdrs[i].sh_name, i);
+ return -ENOEXEC;
+ }
}
- mutex_unlock(&module_mutex);
+
+ return 0;
}
-/* Iterate through all modules and set each module's text as RO */
-void set_all_modules_text_ro(void)
-{
- struct module *mod;
-
- if (!rodata_enabled)
- return;
-
- mutex_lock(&module_mutex);
- list_for_each_entry_rcu(mod, &modules, list) {
- /*
- * Ignore going modules since it's possible that ro
- * protection has already been disabled, otherwise we'll
- * run into protection faults at module deallocation.
- */
- if (mod->state == MODULE_STATE_UNFORMED ||
- mod->state == MODULE_STATE_GOING)
- continue;
-
- frob_text(&mod->core_layout, set_memory_ro);
- frob_text(&mod->init_layout, set_memory_ro);
- }
- mutex_unlock(&module_mutex);
-}
#else /* !CONFIG_STRICT_MODULE_RWX */
static void module_enable_nx(const struct module *mod) { }
-#endif /* CONFIG_STRICT_MODULE_RWX */
-static void module_enable_x(const struct module *mod)
+static void module_enable_ro(const struct module *mod, bool after_init) {}
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
{
- frob_text(&mod->core_layout, set_memory_x);
- frob_text(&mod->init_layout, set_memory_x);
+ return 0;
}
-#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
-static void module_enable_nx(const struct module *mod) { }
-static void module_enable_x(const struct module *mod) { }
-#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
-
+#endif /* CONFIG_STRICT_MODULE_RWX */
#ifdef CONFIG_LIVEPATCH
/*
@@ -2444,11 +2421,13 @@
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue;
- /* Livepatch relocation sections are applied by livepatch */
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
- continue;
-
- if (info->sechdrs[i].sh_type == SHT_REL)
+ err = klp_apply_section_relocs(mod, info->sechdrs,
+ info->secstrings,
+ info->strtab,
+ info->index.sym, i,
+ NULL);
+ else if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA)
@@ -2510,7 +2489,7 @@
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || strstarts(sname, ".init"))
+ || module_init_section(sname))
continue;
s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
pr_debug("\t%s\n", sname);
@@ -2543,7 +2522,7 @@
if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL
- || !strstarts(sname, ".init"))
+ || !module_init_section(sname))
continue;
s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
| INIT_OFFSET_MASK);
@@ -2875,7 +2854,14 @@
void * __weak module_alloc(unsigned long size)
{
- return vmalloc_exec(size);
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+
+bool __weak module_init_section(const char *name)
+{
+ return strstarts(name, ".init");
}
bool __weak module_exit_section(const char *name)
@@ -3143,22 +3129,27 @@
if (info->len < sizeof(*(info->hdr)))
return -ENOEXEC;
- err = security_kernel_load_data(LOADING_MODULE);
+ err = security_kernel_load_data(LOADING_MODULE, true);
if (err)
return err;
/* Suck in entire file: we'll want most of it. */
- info->hdr = __vmalloc(info->len,
- GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
+ info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
if (!info->hdr)
return -ENOMEM;
if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
- vfree(info->hdr);
- return -EFAULT;
+ err = -EFAULT;
+ goto out;
}
- return 0;
+ err = security_kernel_post_load_data((char *)info->hdr, info->len,
+ LOADING_MODULE, "init_module");
+out:
+ if (err)
+ vfree(info->hdr);
+
+ return err;
}
static void free_copy(struct load_info *info)
@@ -3342,6 +3333,9 @@
}
#endif
+ mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
+ &mod->noinstr_text_size);
+
#ifdef CONFIG_TRACEPOINTS
mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
sizeof(*mod->tracepoints_ptrs),
@@ -3377,7 +3371,7 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/* sechdrs[0].sh_size is always zero */
- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
+ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
sizeof(*mod->ftrace_callsites),
&mod->num_ftrace_callsites);
#endif
@@ -3386,13 +3380,25 @@
sizeof(*mod->ei_funcs),
&mod->num_ei_funcs);
#endif
+#ifdef CONFIG_KPROBES
+ mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
+ &mod->kprobes_text_size);
+ mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
+ sizeof(unsigned long),
+ &mod->num_kprobe_blacklist);
+#endif
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+ mod->static_call_sites = section_objs(info, ".static_call_sites",
+ sizeof(*mod->static_call_sites),
+ &mod->num_static_call_sites);
+#endif
mod->extable = section_objs(info, "__ex_table",
sizeof(*mod->extable), &mod->num_exentries);
if (section_addr(info, "__obsparm"))
pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
- info->debug = section_objs(info, "__verbose",
+ info->debug = section_objs(info, "__dyndbg",
sizeof(*info->debug), &info->num_debug);
return 0;
@@ -3504,12 +3510,6 @@
static void flush_module_icache(const struct module *mod)
{
- mm_segment_t old_fs;
-
- /* flush the icache in correct context */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
-
/*
* Flush the instruction cache, since we've played with text.
* Do it before processing of module parameters, so the module
@@ -3521,8 +3521,6 @@
+ mod->init_layout.size);
flush_icache_range((unsigned long)mod->core_layout.base,
(unsigned long)mod->core_layout.base + mod->core_layout.size);
-
- set_fs(old_fs);
}
int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
@@ -3570,6 +3568,11 @@
if (err < 0)
return ERR_PTR(err);
+ err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
+ info->secstrings, info->mod);
+ if (err < 0)
+ return ERR_PTR(err);
+
/* We will do a special allocation for per-cpu sections later. */
info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
@@ -3711,12 +3714,6 @@
}
freeinit->module_init = mod->init_layout.base;
- /*
- * We want to find out whether @mod uses async during init. Clear
- * PF_USED_ASYNC. async_schedule*() will set it.
- */
- current->flags &= ~PF_USED_ASYNC;
-
do_mod_ctors(mod);
/* Start the module */
if (mod->init != NULL)
@@ -3742,22 +3739,13 @@
/*
* We need to finish all async code before the module init sequence
- * is done. This has potential to deadlock. For example, a newly
- * detected block device can trigger request_module() of the
- * default iosched from async probing task. Once userland helper
- * reaches here, async_synchronize_full() will wait on the async
- * task waiting on request_module() and deadlock.
+ * is done. This has potential to deadlock if synchronous module
+ * loading is requested from async (which is not allowed!).
*
- * This deadlock is avoided by perfomring async_synchronize_full()
- * iff module init queued any async jobs. This isn't a full
- * solution as it will deadlock the same if module loading from
- * async jobs nests more than once; however, due to the various
- * constraints, this hack seems to be the best option for now.
- * Please refer to the following thread for details.
- *
- * http://thread.gmane.org/gmane.linux.kernel/1420814
+ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
+ * request_module() from async workers") for more details.
*/
- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
+ if (!mod->async_probe_requested)
async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
@@ -3901,9 +3889,13 @@
if (err)
return err;
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_COMING, mod);
- return 0;
+ err = blocking_notifier_call_chain_robust(&module_notify_list,
+ MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
+ err = notifier_to_errno(err);
+ if (err)
+ klp_module_going(mod);
+
+ return err;
}
static int unknown_module_param_cb(char *param, char *val, const char *modname,
@@ -3973,6 +3965,7 @@
*/
if (blacklisted(info->name)) {
err = -EPERM;
+ pr_err("Module %s is blacklisted\n", info->name);
goto free_copy;
}
@@ -4170,8 +4163,7 @@
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
{
struct load_info info = { };
- loff_t size;
- void *hdr;
+ void *hdr = NULL;
int err;
err = may_init_module();
@@ -4184,12 +4176,12 @@
|MODULE_INIT_IGNORE_VERMAGIC))
return -EINVAL;
- err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
+ err = kernel_read_file_from_fd(fd, 0, &hdr, INT_MAX, NULL,
READING_MODULE);
- if (err)
+ if (err < 0)
return err;
info.hdr = hdr;
- info.len = size;
+ info.len = err;
return load_module(&info, uargs, flags);
}
@@ -4568,16 +4560,17 @@
return err;
}
-static const struct file_operations proc_modules_operations = {
- .open = modules_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
+static const struct proc_ops modules_proc_ops = {
+ .proc_flags = PROC_ENTRY_PERMANENT,
+ .proc_open = modules_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = seq_release,
};
static int __init proc_modules_init(void)
{
- proc_create("modules", 0, NULL, &proc_modules_operations);
+ proc_create("modules", 0, NULL, &modules_proc_ops);
return 0;
}
module_init(proc_modules_init);