blob: 59d487b8d8dad86644bb7cff420c24847f4af323 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 Copyright (C) 2002 Richard Henderson
4 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006*/
7#include <linux/export.h>
8#include <linux/extable.h>
9#include <linux/moduleloader.h>
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <linux/module_signature.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011#include <linux/trace_events.h>
12#include <linux/init.h>
13#include <linux/kallsyms.h>
14#include <linux/file.h>
15#include <linux/fs.h>
16#include <linux/sysfs.h>
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/elf.h>
21#include <linux/proc_fs.h>
22#include <linux/security.h>
23#include <linux/seq_file.h>
24#include <linux/syscalls.h>
25#include <linux/fcntl.h>
26#include <linux/rcupdate.h>
27#include <linux/capability.h>
28#include <linux/cpu.h>
29#include <linux/moduleparam.h>
30#include <linux/errno.h>
31#include <linux/err.h>
32#include <linux/vermagic.h>
33#include <linux/notifier.h>
34#include <linux/sched.h>
35#include <linux/device.h>
36#include <linux/string.h>
37#include <linux/mutex.h>
38#include <linux/rculist.h>
39#include <linux/uaccess.h>
40#include <asm/cacheflush.h>
41#include <linux/set_memory.h>
42#include <asm/mmu_context.h>
43#include <linux/license.h>
44#include <asm/sections.h>
45#include <linux/tracepoint.h>
46#include <linux/ftrace.h>
47#include <linux/livepatch.h>
48#include <linux/async.h>
49#include <linux/percpu.h>
50#include <linux/kmemleak.h>
51#include <linux/jump_label.h>
52#include <linux/pfn.h>
53#include <linux/bsearch.h>
54#include <linux/dynamic_debug.h>
55#include <linux/audit.h>
56#include <uapi/linux/module.h>
57#include "module-internal.h"
58
59#define CREATE_TRACE_POINTS
60#include <trace/events/module.h>
61
62#ifndef ARCH_SHF_SMALL
63#define ARCH_SHF_SMALL 0
64#endif
65
66/*
67 * Modules' sections will be aligned on page boundaries
68 * to ensure complete separation of code and data, but
David Brazdil0f672f62019-12-10 10:32:29 +000069 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000070 */
David Brazdil0f672f62019-12-10 10:32:29 +000071#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072# define debug_align(X) ALIGN(X, PAGE_SIZE)
73#else
74# define debug_align(X) (X)
75#endif
76
77/* If this is set, the section belongs in the init part of the module */
78#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
79
80/*
81 * Mutex protects:
82 * 1) List of modules (also safely readable with preempt_disable),
83 * 2) module_use links,
84 * 3) module_addr_min/module_addr_max.
85 * (delete and add uses RCU list operations). */
86DEFINE_MUTEX(module_mutex);
87EXPORT_SYMBOL_GPL(module_mutex);
88static LIST_HEAD(modules);
89
David Brazdil0f672f62019-12-10 10:32:29 +000090/* Work queue for freeing init sections in success case */
Olivier Deprez0e641232021-09-23 10:07:05 +020091static void do_free_init(struct work_struct *w);
92static DECLARE_WORK(init_free_wq, do_free_init);
93static LLIST_HEAD(init_free_list);
David Brazdil0f672f62019-12-10 10:32:29 +000094
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000095#ifdef CONFIG_MODULES_TREE_LOOKUP
96
97/*
98 * Use a latched RB-tree for __module_address(); this allows us to use
99 * RCU-sched lookups of the address from any context.
100 *
101 * This is conditional on PERF_EVENTS || TRACING because those can really hit
102 * __module_address() hard by doing a lot of stack unwinding; potentially from
103 * NMI context.
104 */
105
106static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
107{
108 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
109
110 return (unsigned long)layout->base;
111}
112
113static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
114{
115 struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
116
117 return (unsigned long)layout->size;
118}
119
120static __always_inline bool
121mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
122{
123 return __mod_tree_val(a) < __mod_tree_val(b);
124}
125
126static __always_inline int
127mod_tree_comp(void *key, struct latch_tree_node *n)
128{
129 unsigned long val = (unsigned long)key;
130 unsigned long start, end;
131
132 start = __mod_tree_val(n);
133 if (val < start)
134 return -1;
135
136 end = start + __mod_tree_size(n);
137 if (val >= end)
138 return 1;
139
140 return 0;
141}
142
143static const struct latch_tree_ops mod_tree_ops = {
144 .less = mod_tree_less,
145 .comp = mod_tree_comp,
146};
147
148static struct mod_tree_root {
149 struct latch_tree_root root;
150 unsigned long addr_min;
151 unsigned long addr_max;
152} mod_tree __cacheline_aligned = {
153 .addr_min = -1UL,
154};
155
156#define module_addr_min mod_tree.addr_min
157#define module_addr_max mod_tree.addr_max
158
159static noinline void __mod_tree_insert(struct mod_tree_node *node)
160{
161 latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
162}
163
164static void __mod_tree_remove(struct mod_tree_node *node)
165{
166 latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
167}
168
169/*
170 * These modifications: insert, remove_init and remove; are serialized by the
171 * module_mutex.
172 */
173static void mod_tree_insert(struct module *mod)
174{
175 mod->core_layout.mtn.mod = mod;
176 mod->init_layout.mtn.mod = mod;
177
178 __mod_tree_insert(&mod->core_layout.mtn);
179 if (mod->init_layout.size)
180 __mod_tree_insert(&mod->init_layout.mtn);
181}
182
183static void mod_tree_remove_init(struct module *mod)
184{
185 if (mod->init_layout.size)
186 __mod_tree_remove(&mod->init_layout.mtn);
187}
188
189static void mod_tree_remove(struct module *mod)
190{
191 __mod_tree_remove(&mod->core_layout.mtn);
192 mod_tree_remove_init(mod);
193}
194
195static struct module *mod_find(unsigned long addr)
196{
197 struct latch_tree_node *ltn;
198
199 ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
200 if (!ltn)
201 return NULL;
202
203 return container_of(ltn, struct mod_tree_node, node)->mod;
204}
205
206#else /* MODULES_TREE_LOOKUP */
207
208static unsigned long module_addr_min = -1UL, module_addr_max = 0;
209
210static void mod_tree_insert(struct module *mod) { }
211static void mod_tree_remove_init(struct module *mod) { }
212static void mod_tree_remove(struct module *mod) { }
213
214static struct module *mod_find(unsigned long addr)
215{
216 struct module *mod;
217
Olivier Deprez0e641232021-09-23 10:07:05 +0200218 list_for_each_entry_rcu(mod, &modules, list,
219 lockdep_is_held(&module_mutex)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000220 if (within_module(addr, mod))
221 return mod;
222 }
223
224 return NULL;
225}
226
227#endif /* MODULES_TREE_LOOKUP */
228
229/*
230 * Bounds of module text, for speeding up __module_address.
231 * Protected by module_mutex.
232 */
233static void __mod_update_bounds(void *base, unsigned int size)
234{
235 unsigned long min = (unsigned long)base;
236 unsigned long max = min + size;
237
238 if (min < module_addr_min)
239 module_addr_min = min;
240 if (max > module_addr_max)
241 module_addr_max = max;
242}
243
244static void mod_update_bounds(struct module *mod)
245{
246 __mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
247 if (mod->init_layout.size)
248 __mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
249}
250
251#ifdef CONFIG_KGDB_KDB
252struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
253#endif /* CONFIG_KGDB_KDB */
254
255static void module_assert_mutex(void)
256{
257 lockdep_assert_held(&module_mutex);
258}
259
260static void module_assert_mutex_or_preempt(void)
261{
262#ifdef CONFIG_LOCKDEP
263 if (unlikely(!debug_locks))
264 return;
265
266 WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
267 !lockdep_is_held(&module_mutex));
268#endif
269}
270
Olivier Deprez0e641232021-09-23 10:07:05 +0200271#ifdef CONFIG_MODULE_SIG
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000272static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
273module_param(sig_enforce, bool_enable_only, 0644);
274
Olivier Deprez0e641232021-09-23 10:07:05 +0200275void set_module_sig_enforced(void)
276{
277 sig_enforce = true;
278}
279#else
280#define sig_enforce false
281#endif
282
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000283/*
284 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
285 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
286 */
287bool is_module_sig_enforced(void)
288{
289 return sig_enforce;
290}
291EXPORT_SYMBOL(is_module_sig_enforced);
292
293/* Block module loading/unloading? */
294int modules_disabled = 0;
295core_param(nomodule, modules_disabled, bint, 0);
296
297/* Waiting for a module to finish initializing? */
298static DECLARE_WAIT_QUEUE_HEAD(module_wq);
299
300static BLOCKING_NOTIFIER_HEAD(module_notify_list);
301
302int register_module_notifier(struct notifier_block *nb)
303{
304 return blocking_notifier_chain_register(&module_notify_list, nb);
305}
306EXPORT_SYMBOL(register_module_notifier);
307
308int unregister_module_notifier(struct notifier_block *nb)
309{
310 return blocking_notifier_chain_unregister(&module_notify_list, nb);
311}
312EXPORT_SYMBOL(unregister_module_notifier);
313
314/*
315 * We require a truly strong try_module_get(): 0 means success.
316 * Otherwise an error is returned due to ongoing or failed
317 * initialization etc.
318 */
319static inline int strong_try_module_get(struct module *mod)
320{
321 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
322 if (mod && mod->state == MODULE_STATE_COMING)
323 return -EBUSY;
324 if (try_module_get(mod))
325 return 0;
326 else
327 return -ENOENT;
328}
329
330static inline void add_taint_module(struct module *mod, unsigned flag,
331 enum lockdep_ok lockdep_ok)
332{
333 add_taint(flag, lockdep_ok);
334 set_bit(flag, &mod->taints);
335}
336
337/*
338 * A thread that wants to hold a reference to a module only while it
339 * is running can call this to safely exit. nfsd and lockd use this.
340 */
341void __noreturn __module_put_and_exit(struct module *mod, long code)
342{
343 module_put(mod);
344 do_exit(code);
345}
346EXPORT_SYMBOL(__module_put_and_exit);
347
348/* Find a module section: 0 means not found. */
349static unsigned int find_sec(const struct load_info *info, const char *name)
350{
351 unsigned int i;
352
353 for (i = 1; i < info->hdr->e_shnum; i++) {
354 Elf_Shdr *shdr = &info->sechdrs[i];
355 /* Alloc bit cleared means "ignore it." */
356 if ((shdr->sh_flags & SHF_ALLOC)
357 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
358 return i;
359 }
360 return 0;
361}
362
363/* Find a module section, or NULL. */
364static void *section_addr(const struct load_info *info, const char *name)
365{
366 /* Section 0 has sh_addr 0. */
367 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
368}
369
370/* Find a module section, or NULL. Fill in number of "objects" in section. */
371static void *section_objs(const struct load_info *info,
372 const char *name,
373 size_t object_size,
374 unsigned int *num)
375{
376 unsigned int sec = find_sec(info, name);
377
378 /* Section 0 has sh_addr 0 and sh_size 0. */
379 *num = info->sechdrs[sec].sh_size / object_size;
380 return (void *)info->sechdrs[sec].sh_addr;
381}
382
383/* Provided by the linker */
384extern const struct kernel_symbol __start___ksymtab[];
385extern const struct kernel_symbol __stop___ksymtab[];
386extern const struct kernel_symbol __start___ksymtab_gpl[];
387extern const struct kernel_symbol __stop___ksymtab_gpl[];
388extern const struct kernel_symbol __start___ksymtab_gpl_future[];
389extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
390extern const s32 __start___kcrctab[];
391extern const s32 __start___kcrctab_gpl[];
392extern const s32 __start___kcrctab_gpl_future[];
393#ifdef CONFIG_UNUSED_SYMBOLS
394extern const struct kernel_symbol __start___ksymtab_unused[];
395extern const struct kernel_symbol __stop___ksymtab_unused[];
396extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
397extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
398extern const s32 __start___kcrctab_unused[];
399extern const s32 __start___kcrctab_unused_gpl[];
400#endif
401
402#ifndef CONFIG_MODVERSIONS
403#define symversion(base, idx) NULL
404#else
405#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
406#endif
407
408static bool each_symbol_in_section(const struct symsearch *arr,
409 unsigned int arrsize,
410 struct module *owner,
411 bool (*fn)(const struct symsearch *syms,
412 struct module *owner,
413 void *data),
414 void *data)
415{
416 unsigned int j;
417
418 for (j = 0; j < arrsize; j++) {
419 if (fn(&arr[j], owner, data))
420 return true;
421 }
422
423 return false;
424}
425
426/* Returns true as soon as fn returns true, otherwise false. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200427static bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428 struct module *owner,
429 void *data),
430 void *data)
431{
432 struct module *mod;
433 static const struct symsearch arr[] = {
434 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
435 NOT_GPL_ONLY, false },
436 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
437 __start___kcrctab_gpl,
438 GPL_ONLY, false },
439 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
440 __start___kcrctab_gpl_future,
441 WILL_BE_GPL_ONLY, false },
442#ifdef CONFIG_UNUSED_SYMBOLS
443 { __start___ksymtab_unused, __stop___ksymtab_unused,
444 __start___kcrctab_unused,
445 NOT_GPL_ONLY, true },
446 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
447 __start___kcrctab_unused_gpl,
448 GPL_ONLY, true },
449#endif
450 };
451
452 module_assert_mutex_or_preempt();
453
454 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
455 return true;
456
Olivier Deprez0e641232021-09-23 10:07:05 +0200457 list_for_each_entry_rcu(mod, &modules, list,
458 lockdep_is_held(&module_mutex)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459 struct symsearch arr[] = {
460 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
461 NOT_GPL_ONLY, false },
462 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
463 mod->gpl_crcs,
464 GPL_ONLY, false },
465 { mod->gpl_future_syms,
466 mod->gpl_future_syms + mod->num_gpl_future_syms,
467 mod->gpl_future_crcs,
468 WILL_BE_GPL_ONLY, false },
469#ifdef CONFIG_UNUSED_SYMBOLS
470 { mod->unused_syms,
471 mod->unused_syms + mod->num_unused_syms,
472 mod->unused_crcs,
473 NOT_GPL_ONLY, true },
474 { mod->unused_gpl_syms,
475 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
476 mod->unused_gpl_crcs,
477 GPL_ONLY, true },
478#endif
479 };
480
481 if (mod->state == MODULE_STATE_UNFORMED)
482 continue;
483
484 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
485 return true;
486 }
487 return false;
488}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489
490struct find_symbol_arg {
491 /* Input */
492 const char *name;
493 bool gplok;
494 bool warn;
495
496 /* Output */
497 struct module *owner;
498 const s32 *crc;
499 const struct kernel_symbol *sym;
Olivier Deprez0e641232021-09-23 10:07:05 +0200500 enum mod_license license;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000501};
502
David Brazdil0f672f62019-12-10 10:32:29 +0000503static bool check_exported_symbol(const struct symsearch *syms,
504 struct module *owner,
505 unsigned int symnum, void *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000506{
507 struct find_symbol_arg *fsa = data;
508
509 if (!fsa->gplok) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200510 if (syms->license == GPL_ONLY)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000511 return false;
Olivier Deprez0e641232021-09-23 10:07:05 +0200512 if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513 pr_warn("Symbol %s is being used by a non-GPL module, "
514 "which will not be allowed in the future\n",
515 fsa->name);
516 }
517 }
518
519#ifdef CONFIG_UNUSED_SYMBOLS
520 if (syms->unused && fsa->warn) {
521 pr_warn("Symbol %s is marked as UNUSED, however this module is "
522 "using it.\n", fsa->name);
523 pr_warn("This symbol will go away in the future.\n");
524 pr_warn("Please evaluate if this is the right api to use and "
525 "if it really is, submit a report to the linux kernel "
526 "mailing list together with submitting your code for "
527 "inclusion.\n");
528 }
529#endif
530
531 fsa->owner = owner;
532 fsa->crc = symversion(syms->crcs, symnum);
533 fsa->sym = &syms->start[symnum];
Olivier Deprez0e641232021-09-23 10:07:05 +0200534 fsa->license = syms->license;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535 return true;
536}
537
538static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
539{
540#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
541 return (unsigned long)offset_to_ptr(&sym->value_offset);
542#else
543 return sym->value;
544#endif
545}
546
547static const char *kernel_symbol_name(const struct kernel_symbol *sym)
548{
549#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
550 return offset_to_ptr(&sym->name_offset);
551#else
552 return sym->name;
553#endif
554}
555
David Brazdil0f672f62019-12-10 10:32:29 +0000556static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557{
David Brazdil0f672f62019-12-10 10:32:29 +0000558#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
559 if (!sym->namespace_offset)
560 return NULL;
561 return offset_to_ptr(&sym->namespace_offset);
562#else
563 return sym->namespace;
564#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000565}
566
David Brazdil0f672f62019-12-10 10:32:29 +0000567static int cmp_name(const void *name, const void *sym)
568{
569 return strcmp(name, kernel_symbol_name(sym));
570}
571
572static bool find_exported_symbol_in_section(const struct symsearch *syms,
573 struct module *owner,
574 void *data)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000575{
576 struct find_symbol_arg *fsa = data;
577 struct kernel_symbol *sym;
578
579 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
580 sizeof(struct kernel_symbol), cmp_name);
581
David Brazdil0f672f62019-12-10 10:32:29 +0000582 if (sym != NULL && check_exported_symbol(syms, owner,
583 sym - syms->start, data))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584 return true;
585
586 return false;
587}
588
David Brazdil0f672f62019-12-10 10:32:29 +0000589/* Find an exported symbol and return it, along with, (optional) crc and
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000590 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200591static const struct kernel_symbol *find_symbol(const char *name,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000592 struct module **owner,
593 const s32 **crc,
Olivier Deprez0e641232021-09-23 10:07:05 +0200594 enum mod_license *license,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000595 bool gplok,
596 bool warn)
597{
598 struct find_symbol_arg fsa;
599
600 fsa.name = name;
601 fsa.gplok = gplok;
602 fsa.warn = warn;
603
David Brazdil0f672f62019-12-10 10:32:29 +0000604 if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000605 if (owner)
606 *owner = fsa.owner;
607 if (crc)
608 *crc = fsa.crc;
Olivier Deprez0e641232021-09-23 10:07:05 +0200609 if (license)
610 *license = fsa.license;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000611 return fsa.sym;
612 }
613
614 pr_debug("Failed to find symbol %s\n", name);
615 return NULL;
616}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000617
618/*
619 * Search for module by name: must hold module_mutex (or preempt disabled
620 * for read-only access).
621 */
622static struct module *find_module_all(const char *name, size_t len,
623 bool even_unformed)
624{
625 struct module *mod;
626
627 module_assert_mutex_or_preempt();
628
Olivier Deprez0e641232021-09-23 10:07:05 +0200629 list_for_each_entry_rcu(mod, &modules, list,
630 lockdep_is_held(&module_mutex)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000631 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
632 continue;
633 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
634 return mod;
635 }
636 return NULL;
637}
638
639struct module *find_module(const char *name)
640{
641 module_assert_mutex();
642 return find_module_all(name, strlen(name), false);
643}
644EXPORT_SYMBOL_GPL(find_module);
645
646#ifdef CONFIG_SMP
647
648static inline void __percpu *mod_percpu(struct module *mod)
649{
650 return mod->percpu;
651}
652
653static int percpu_modalloc(struct module *mod, struct load_info *info)
654{
655 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
656 unsigned long align = pcpusec->sh_addralign;
657
658 if (!pcpusec->sh_size)
659 return 0;
660
661 if (align > PAGE_SIZE) {
662 pr_warn("%s: per-cpu alignment %li > %li\n",
663 mod->name, align, PAGE_SIZE);
664 align = PAGE_SIZE;
665 }
666
667 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
668 if (!mod->percpu) {
669 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
670 mod->name, (unsigned long)pcpusec->sh_size);
671 return -ENOMEM;
672 }
673 mod->percpu_size = pcpusec->sh_size;
674 return 0;
675}
676
677static void percpu_modfree(struct module *mod)
678{
679 free_percpu(mod->percpu);
680}
681
682static unsigned int find_pcpusec(struct load_info *info)
683{
684 return find_sec(info, ".data..percpu");
685}
686
687static void percpu_modcopy(struct module *mod,
688 const void *from, unsigned long size)
689{
690 int cpu;
691
692 for_each_possible_cpu(cpu)
693 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
694}
695
696bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
697{
698 struct module *mod;
699 unsigned int cpu;
700
701 preempt_disable();
702
703 list_for_each_entry_rcu(mod, &modules, list) {
704 if (mod->state == MODULE_STATE_UNFORMED)
705 continue;
706 if (!mod->percpu_size)
707 continue;
708 for_each_possible_cpu(cpu) {
709 void *start = per_cpu_ptr(mod->percpu, cpu);
710 void *va = (void *)addr;
711
712 if (va >= start && va < start + mod->percpu_size) {
713 if (can_addr) {
714 *can_addr = (unsigned long) (va - start);
715 *can_addr += (unsigned long)
716 per_cpu_ptr(mod->percpu,
717 get_boot_cpu_id());
718 }
719 preempt_enable();
720 return true;
721 }
722 }
723 }
724
725 preempt_enable();
726 return false;
727}
728
729/**
730 * is_module_percpu_address - test whether address is from module static percpu
731 * @addr: address to test
732 *
733 * Test whether @addr belongs to module static percpu area.
734 *
735 * RETURNS:
736 * %true if @addr is from module static percpu area
737 */
738bool is_module_percpu_address(unsigned long addr)
739{
740 return __is_module_percpu_address(addr, NULL);
741}
742
743#else /* ... !CONFIG_SMP */
744
745static inline void __percpu *mod_percpu(struct module *mod)
746{
747 return NULL;
748}
749static int percpu_modalloc(struct module *mod, struct load_info *info)
750{
751 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
752 if (info->sechdrs[info->index.pcpu].sh_size != 0)
753 return -ENOMEM;
754 return 0;
755}
756static inline void percpu_modfree(struct module *mod)
757{
758}
759static unsigned int find_pcpusec(struct load_info *info)
760{
761 return 0;
762}
763static inline void percpu_modcopy(struct module *mod,
764 const void *from, unsigned long size)
765{
766 /* pcpusec should be 0, and size of that section should be 0. */
767 BUG_ON(size != 0);
768}
769bool is_module_percpu_address(unsigned long addr)
770{
771 return false;
772}
773
774bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
775{
776 return false;
777}
778
779#endif /* CONFIG_SMP */
780
781#define MODINFO_ATTR(field) \
782static void setup_modinfo_##field(struct module *mod, const char *s) \
783{ \
784 mod->field = kstrdup(s, GFP_KERNEL); \
785} \
786static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
787 struct module_kobject *mk, char *buffer) \
788{ \
789 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
790} \
791static int modinfo_##field##_exists(struct module *mod) \
792{ \
793 return mod->field != NULL; \
794} \
795static void free_modinfo_##field(struct module *mod) \
796{ \
797 kfree(mod->field); \
798 mod->field = NULL; \
799} \
800static struct module_attribute modinfo_##field = { \
801 .attr = { .name = __stringify(field), .mode = 0444 }, \
802 .show = show_modinfo_##field, \
803 .setup = setup_modinfo_##field, \
804 .test = modinfo_##field##_exists, \
805 .free = free_modinfo_##field, \
806};
807
808MODINFO_ATTR(version);
809MODINFO_ATTR(srcversion);
810
811static char last_unloaded_module[MODULE_NAME_LEN+1];
812
813#ifdef CONFIG_MODULE_UNLOAD
814
815EXPORT_TRACEPOINT_SYMBOL(module_get);
816
817/* MODULE_REF_BASE is the base reference count by kmodule loader. */
818#define MODULE_REF_BASE 1
819
820/* Init the unload section of the module. */
821static int module_unload_init(struct module *mod)
822{
823 /*
824 * Initialize reference counter to MODULE_REF_BASE.
825 * refcnt == 0 means module is going.
826 */
827 atomic_set(&mod->refcnt, MODULE_REF_BASE);
828
829 INIT_LIST_HEAD(&mod->source_list);
830 INIT_LIST_HEAD(&mod->target_list);
831
832 /* Hold reference count during initialization. */
833 atomic_inc(&mod->refcnt);
834
835 return 0;
836}
837
838/* Does a already use b? */
839static int already_uses(struct module *a, struct module *b)
840{
841 struct module_use *use;
842
843 list_for_each_entry(use, &b->source_list, source_list) {
844 if (use->source == a) {
845 pr_debug("%s uses %s!\n", a->name, b->name);
846 return 1;
847 }
848 }
849 pr_debug("%s does not use %s!\n", a->name, b->name);
850 return 0;
851}
852
853/*
854 * Module a uses b
855 * - we add 'a' as a "source", 'b' as a "target" of module use
856 * - the module_use is added to the list of 'b' sources (so
857 * 'b' can walk the list to see who sourced them), and of 'a'
858 * targets (so 'a' can see what modules it targets).
859 */
860static int add_module_usage(struct module *a, struct module *b)
861{
862 struct module_use *use;
863
864 pr_debug("Allocating new usage for %s.\n", a->name);
865 use = kmalloc(sizeof(*use), GFP_ATOMIC);
866 if (!use)
867 return -ENOMEM;
868
869 use->source = a;
870 use->target = b;
871 list_add(&use->source_list, &b->source_list);
872 list_add(&use->target_list, &a->target_list);
873 return 0;
874}
875
876/* Module a uses b: caller needs module_mutex() */
Olivier Deprez0e641232021-09-23 10:07:05 +0200877static int ref_module(struct module *a, struct module *b)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000878{
879 int err;
880
881 if (b == NULL || already_uses(a, b))
882 return 0;
883
884 /* If module isn't available, we fail. */
885 err = strong_try_module_get(b);
886 if (err)
887 return err;
888
889 err = add_module_usage(a, b);
890 if (err) {
891 module_put(b);
892 return err;
893 }
894 return 0;
895}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000896
897/* Clear the unload stuff of the module. */
898static void module_unload_free(struct module *mod)
899{
900 struct module_use *use, *tmp;
901
902 mutex_lock(&module_mutex);
903 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
904 struct module *i = use->target;
905 pr_debug("%s unusing %s\n", mod->name, i->name);
906 module_put(i);
907 list_del(&use->source_list);
908 list_del(&use->target_list);
909 kfree(use);
910 }
911 mutex_unlock(&module_mutex);
912}
913
914#ifdef CONFIG_MODULE_FORCE_UNLOAD
915static inline int try_force_unload(unsigned int flags)
916{
917 int ret = (flags & O_TRUNC);
918 if (ret)
919 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
920 return ret;
921}
922#else
923static inline int try_force_unload(unsigned int flags)
924{
925 return 0;
926}
927#endif /* CONFIG_MODULE_FORCE_UNLOAD */
928
929/* Try to release refcount of module, 0 means success. */
930static int try_release_module_ref(struct module *mod)
931{
932 int ret;
933
934 /* Try to decrement refcnt which we set at loading */
935 ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
936 BUG_ON(ret < 0);
937 if (ret)
938 /* Someone can put this right now, recover with checking */
939 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
940
941 return ret;
942}
943
944static int try_stop_module(struct module *mod, int flags, int *forced)
945{
946 /* If it's not unused, quit unless we're forcing. */
947 if (try_release_module_ref(mod) != 0) {
948 *forced = try_force_unload(flags);
949 if (!(*forced))
950 return -EWOULDBLOCK;
951 }
952
953 /* Mark it as dying. */
954 mod->state = MODULE_STATE_GOING;
955
956 return 0;
957}
958
959/**
960 * module_refcount - return the refcount or -1 if unloading
961 *
962 * @mod: the module we're checking
963 *
964 * Returns:
965 * -1 if the module is in the process of unloading
966 * otherwise the number of references in the kernel to the module
967 */
968int module_refcount(struct module *mod)
969{
970 return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
971}
972EXPORT_SYMBOL(module_refcount);
973
974/* This exists whether we can unload or not */
975static void free_module(struct module *mod);
976
977SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
978 unsigned int, flags)
979{
980 struct module *mod;
981 char name[MODULE_NAME_LEN];
982 int ret, forced = 0;
983
984 if (!capable(CAP_SYS_MODULE) || modules_disabled)
985 return -EPERM;
986
987 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
988 return -EFAULT;
989 name[MODULE_NAME_LEN-1] = '\0';
990
991 audit_log_kern_module(name);
992
993 if (mutex_lock_interruptible(&module_mutex) != 0)
994 return -EINTR;
995
996 mod = find_module(name);
997 if (!mod) {
998 ret = -ENOENT;
999 goto out;
1000 }
1001
1002 if (!list_empty(&mod->source_list)) {
1003 /* Other modules depend on us: get rid of them first. */
1004 ret = -EWOULDBLOCK;
1005 goto out;
1006 }
1007
1008 /* Doing init or already dying? */
1009 if (mod->state != MODULE_STATE_LIVE) {
1010 /* FIXME: if (force), slam module count damn the torpedoes */
1011 pr_debug("%s already dying\n", mod->name);
1012 ret = -EBUSY;
1013 goto out;
1014 }
1015
1016 /* If it has an init func, it must have an exit func to unload */
1017 if (mod->init && !mod->exit) {
1018 forced = try_force_unload(flags);
1019 if (!forced) {
1020 /* This module can't be removed */
1021 ret = -EBUSY;
1022 goto out;
1023 }
1024 }
1025
1026 /* Stop the machine so refcounts can't move and disable module. */
1027 ret = try_stop_module(mod, flags, &forced);
1028 if (ret != 0)
1029 goto out;
1030
1031 mutex_unlock(&module_mutex);
1032 /* Final destruction now no one is using it. */
1033 if (mod->exit != NULL)
1034 mod->exit();
1035 blocking_notifier_call_chain(&module_notify_list,
1036 MODULE_STATE_GOING, mod);
1037 klp_module_going(mod);
1038 ftrace_release_mod(mod);
1039
1040 async_synchronize_full();
1041
1042 /* Store the name of the last unloaded module for diagnostic purposes */
1043 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1044
1045 free_module(mod);
Olivier Deprez0e641232021-09-23 10:07:05 +02001046 /* someone could wait for the module in add_unformed_module() */
1047 wake_up_all(&module_wq);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001048 return 0;
1049out:
1050 mutex_unlock(&module_mutex);
1051 return ret;
1052}
1053
1054static inline void print_unload_info(struct seq_file *m, struct module *mod)
1055{
1056 struct module_use *use;
1057 int printed_something = 0;
1058
1059 seq_printf(m, " %i ", module_refcount(mod));
1060
1061 /*
1062 * Always include a trailing , so userspace can differentiate
1063 * between this and the old multi-field proc format.
1064 */
1065 list_for_each_entry(use, &mod->source_list, source_list) {
1066 printed_something = 1;
1067 seq_printf(m, "%s,", use->source->name);
1068 }
1069
1070 if (mod->init != NULL && mod->exit == NULL) {
1071 printed_something = 1;
1072 seq_puts(m, "[permanent],");
1073 }
1074
1075 if (!printed_something)
1076 seq_puts(m, "-");
1077}
1078
1079void __symbol_put(const char *symbol)
1080{
1081 struct module *owner;
1082
1083 preempt_disable();
Olivier Deprez0e641232021-09-23 10:07:05 +02001084 if (!find_symbol(symbol, &owner, NULL, NULL, true, false))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001085 BUG();
1086 module_put(owner);
1087 preempt_enable();
1088}
1089EXPORT_SYMBOL(__symbol_put);
1090
1091/* Note this assumes addr is a function, which it currently always is. */
1092void symbol_put_addr(void *addr)
1093{
1094 struct module *modaddr;
1095 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1096
1097 if (core_kernel_text(a))
1098 return;
1099
1100 /*
1101 * Even though we hold a reference on the module; we still need to
1102 * disable preemption in order to safely traverse the data structure.
1103 */
1104 preempt_disable();
1105 modaddr = __module_text_address(a);
1106 BUG_ON(!modaddr);
1107 module_put(modaddr);
1108 preempt_enable();
1109}
1110EXPORT_SYMBOL_GPL(symbol_put_addr);
1111
1112static ssize_t show_refcnt(struct module_attribute *mattr,
1113 struct module_kobject *mk, char *buffer)
1114{
1115 return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1116}
1117
1118static struct module_attribute modinfo_refcnt =
1119 __ATTR(refcnt, 0444, show_refcnt, NULL);
1120
1121void __module_get(struct module *module)
1122{
1123 if (module) {
1124 preempt_disable();
1125 atomic_inc(&module->refcnt);
1126 trace_module_get(module, _RET_IP_);
1127 preempt_enable();
1128 }
1129}
1130EXPORT_SYMBOL(__module_get);
1131
1132bool try_module_get(struct module *module)
1133{
1134 bool ret = true;
1135
1136 if (module) {
1137 preempt_disable();
1138 /* Note: here, we can fail to get a reference */
1139 if (likely(module_is_live(module) &&
1140 atomic_inc_not_zero(&module->refcnt) != 0))
1141 trace_module_get(module, _RET_IP_);
1142 else
1143 ret = false;
1144
1145 preempt_enable();
1146 }
1147 return ret;
1148}
1149EXPORT_SYMBOL(try_module_get);
1150
1151void module_put(struct module *module)
1152{
1153 int ret;
1154
1155 if (module) {
1156 preempt_disable();
1157 ret = atomic_dec_if_positive(&module->refcnt);
1158 WARN_ON(ret < 0); /* Failed to put refcount */
1159 trace_module_put(module, _RET_IP_);
1160 preempt_enable();
1161 }
1162}
1163EXPORT_SYMBOL(module_put);
1164
1165#else /* !CONFIG_MODULE_UNLOAD */
1166static inline void print_unload_info(struct seq_file *m, struct module *mod)
1167{
1168 /* We don't know the usage count, or what modules are using. */
1169 seq_puts(m, " - -");
1170}
1171
1172static inline void module_unload_free(struct module *mod)
1173{
1174}
1175
Olivier Deprez0e641232021-09-23 10:07:05 +02001176static int ref_module(struct module *a, struct module *b)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001177{
1178 return strong_try_module_get(b);
1179}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001180
1181static inline int module_unload_init(struct module *mod)
1182{
1183 return 0;
1184}
1185#endif /* CONFIG_MODULE_UNLOAD */
1186
1187static size_t module_flags_taint(struct module *mod, char *buf)
1188{
1189 size_t l = 0;
1190 int i;
1191
1192 for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1193 if (taint_flags[i].module && test_bit(i, &mod->taints))
1194 buf[l++] = taint_flags[i].c_true;
1195 }
1196
1197 return l;
1198}
1199
1200static ssize_t show_initstate(struct module_attribute *mattr,
1201 struct module_kobject *mk, char *buffer)
1202{
1203 const char *state = "unknown";
1204
1205 switch (mk->mod->state) {
1206 case MODULE_STATE_LIVE:
1207 state = "live";
1208 break;
1209 case MODULE_STATE_COMING:
1210 state = "coming";
1211 break;
1212 case MODULE_STATE_GOING:
1213 state = "going";
1214 break;
1215 default:
1216 BUG();
1217 }
1218 return sprintf(buffer, "%s\n", state);
1219}
1220
1221static struct module_attribute modinfo_initstate =
1222 __ATTR(initstate, 0444, show_initstate, NULL);
1223
1224static ssize_t store_uevent(struct module_attribute *mattr,
1225 struct module_kobject *mk,
1226 const char *buffer, size_t count)
1227{
David Brazdil0f672f62019-12-10 10:32:29 +00001228 int rc;
1229
1230 rc = kobject_synth_uevent(&mk->kobj, buffer, count);
1231 return rc ? rc : count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001232}
1233
1234struct module_attribute module_uevent =
1235 __ATTR(uevent, 0200, NULL, store_uevent);
1236
1237static ssize_t show_coresize(struct module_attribute *mattr,
1238 struct module_kobject *mk, char *buffer)
1239{
1240 return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1241}
1242
1243static struct module_attribute modinfo_coresize =
1244 __ATTR(coresize, 0444, show_coresize, NULL);
1245
1246static ssize_t show_initsize(struct module_attribute *mattr,
1247 struct module_kobject *mk, char *buffer)
1248{
1249 return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1250}
1251
1252static struct module_attribute modinfo_initsize =
1253 __ATTR(initsize, 0444, show_initsize, NULL);
1254
1255static ssize_t show_taint(struct module_attribute *mattr,
1256 struct module_kobject *mk, char *buffer)
1257{
1258 size_t l;
1259
1260 l = module_flags_taint(mk->mod, buffer);
1261 buffer[l++] = '\n';
1262 return l;
1263}
1264
1265static struct module_attribute modinfo_taint =
1266 __ATTR(taint, 0444, show_taint, NULL);
1267
1268static struct module_attribute *modinfo_attrs[] = {
1269 &module_uevent,
1270 &modinfo_version,
1271 &modinfo_srcversion,
1272 &modinfo_initstate,
1273 &modinfo_coresize,
1274 &modinfo_initsize,
1275 &modinfo_taint,
1276#ifdef CONFIG_MODULE_UNLOAD
1277 &modinfo_refcnt,
1278#endif
1279 NULL,
1280};
1281
1282static const char vermagic[] = VERMAGIC_STRING;
1283
1284static int try_to_force_load(struct module *mod, const char *reason)
1285{
1286#ifdef CONFIG_MODULE_FORCE_LOAD
1287 if (!test_taint(TAINT_FORCED_MODULE))
1288 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1289 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1290 return 0;
1291#else
1292 return -ENOEXEC;
1293#endif
1294}
1295
1296#ifdef CONFIG_MODVERSIONS
1297
1298static u32 resolve_rel_crc(const s32 *crc)
1299{
1300 return *(u32 *)((void *)crc + *crc);
1301}
1302
1303static int check_version(const struct load_info *info,
1304 const char *symname,
1305 struct module *mod,
1306 const s32 *crc)
1307{
1308 Elf_Shdr *sechdrs = info->sechdrs;
1309 unsigned int versindex = info->index.vers;
1310 unsigned int i, num_versions;
1311 struct modversion_info *versions;
1312
1313 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1314 if (!crc)
1315 return 1;
1316
1317 /* No versions at all? modprobe --force does this. */
1318 if (versindex == 0)
1319 return try_to_force_load(mod, symname) == 0;
1320
1321 versions = (void *) sechdrs[versindex].sh_addr;
1322 num_versions = sechdrs[versindex].sh_size
1323 / sizeof(struct modversion_info);
1324
1325 for (i = 0; i < num_versions; i++) {
1326 u32 crcval;
1327
1328 if (strcmp(versions[i].name, symname) != 0)
1329 continue;
1330
1331 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1332 crcval = resolve_rel_crc(crc);
1333 else
1334 crcval = *crc;
1335 if (versions[i].crc == crcval)
1336 return 1;
1337 pr_debug("Found checksum %X vs module %lX\n",
1338 crcval, versions[i].crc);
1339 goto bad_version;
1340 }
1341
1342 /* Broken toolchain. Warn once, then let it go.. */
1343 pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
1344 return 1;
1345
1346bad_version:
1347 pr_warn("%s: disagrees about version of symbol %s\n",
1348 info->name, symname);
1349 return 0;
1350}
1351
1352static inline int check_modstruct_version(const struct load_info *info,
1353 struct module *mod)
1354{
1355 const s32 *crc;
1356
1357 /*
1358 * Since this should be found in kernel (which can't be removed), no
1359 * locking is necessary -- use preempt_disable() to placate lockdep.
1360 */
1361 preempt_disable();
Olivier Deprez0e641232021-09-23 10:07:05 +02001362 if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001363 preempt_enable();
1364 BUG();
1365 }
1366 preempt_enable();
1367 return check_version(info, "module_layout", mod, crc);
1368}
1369
1370/* First part is kernel version, which we ignore if module has crcs. */
1371static inline int same_magic(const char *amagic, const char *bmagic,
1372 bool has_crcs)
1373{
1374 if (has_crcs) {
1375 amagic += strcspn(amagic, " ");
1376 bmagic += strcspn(bmagic, " ");
1377 }
1378 return strcmp(amagic, bmagic) == 0;
1379}
1380#else
1381static inline int check_version(const struct load_info *info,
1382 const char *symname,
1383 struct module *mod,
1384 const s32 *crc)
1385{
1386 return 1;
1387}
1388
1389static inline int check_modstruct_version(const struct load_info *info,
1390 struct module *mod)
1391{
1392 return 1;
1393}
1394
1395static inline int same_magic(const char *amagic, const char *bmagic,
1396 bool has_crcs)
1397{
1398 return strcmp(amagic, bmagic) == 0;
1399}
1400#endif /* CONFIG_MODVERSIONS */
1401
David Brazdil0f672f62019-12-10 10:32:29 +00001402static char *get_modinfo(const struct load_info *info, const char *tag);
1403static char *get_next_modinfo(const struct load_info *info, const char *tag,
1404 char *prev);
1405
1406static int verify_namespace_is_imported(const struct load_info *info,
1407 const struct kernel_symbol *sym,
1408 struct module *mod)
1409{
1410 const char *namespace;
1411 char *imported_namespace;
1412
1413 namespace = kernel_symbol_namespace(sym);
1414 if (namespace) {
1415 imported_namespace = get_modinfo(info, "import_ns");
1416 while (imported_namespace) {
1417 if (strcmp(namespace, imported_namespace) == 0)
1418 return 0;
1419 imported_namespace = get_next_modinfo(
1420 info, "import_ns", imported_namespace);
1421 }
1422#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1423 pr_warn(
1424#else
1425 pr_err(
1426#endif
1427 "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1428 mod->name, kernel_symbol_name(sym), namespace);
1429#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1430 return -EINVAL;
1431#endif
1432 }
1433 return 0;
1434}
1435
Olivier Deprez0e641232021-09-23 10:07:05 +02001436static bool inherit_taint(struct module *mod, struct module *owner)
1437{
1438 if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1439 return true;
1440
1441 if (mod->using_gplonly_symbols) {
1442 pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1443 mod->name, owner->name);
1444 return false;
1445 }
1446
1447 if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1448 pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1449 mod->name, owner->name);
1450 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1451 }
1452 return true;
1453}
David Brazdil0f672f62019-12-10 10:32:29 +00001454
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001455/* Resolve a symbol for this module. I.e. if we find one, record usage. */
1456static const struct kernel_symbol *resolve_symbol(struct module *mod,
1457 const struct load_info *info,
1458 const char *name,
1459 char ownername[])
1460{
1461 struct module *owner;
1462 const struct kernel_symbol *sym;
1463 const s32 *crc;
Olivier Deprez0e641232021-09-23 10:07:05 +02001464 enum mod_license license;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001465 int err;
1466
1467 /*
1468 * The module_mutex should not be a heavily contended lock;
1469 * if we get the occasional sleep here, we'll go an extra iteration
1470 * in the wait_event_interruptible(), which is harmless.
1471 */
1472 sched_annotate_sleep();
1473 mutex_lock(&module_mutex);
Olivier Deprez0e641232021-09-23 10:07:05 +02001474 sym = find_symbol(name, &owner, &crc, &license,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001475 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1476 if (!sym)
1477 goto unlock;
1478
Olivier Deprez0e641232021-09-23 10:07:05 +02001479 if (license == GPL_ONLY)
1480 mod->using_gplonly_symbols = true;
1481
1482 if (!inherit_taint(mod, owner)) {
1483 sym = NULL;
1484 goto getname;
1485 }
1486
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001487 if (!check_version(info, name, mod, crc)) {
1488 sym = ERR_PTR(-EINVAL);
1489 goto getname;
1490 }
1491
David Brazdil0f672f62019-12-10 10:32:29 +00001492 err = verify_namespace_is_imported(info, sym, mod);
1493 if (err) {
1494 sym = ERR_PTR(err);
1495 goto getname;
1496 }
1497
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001498 err = ref_module(mod, owner);
1499 if (err) {
1500 sym = ERR_PTR(err);
1501 goto getname;
1502 }
1503
1504getname:
1505 /* We must make copy under the lock if we failed to get ref. */
1506 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1507unlock:
1508 mutex_unlock(&module_mutex);
1509 return sym;
1510}
1511
1512static const struct kernel_symbol *
1513resolve_symbol_wait(struct module *mod,
1514 const struct load_info *info,
1515 const char *name)
1516{
1517 const struct kernel_symbol *ksym;
1518 char owner[MODULE_NAME_LEN];
1519
1520 if (wait_event_interruptible_timeout(module_wq,
1521 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1522 || PTR_ERR(ksym) != -EBUSY,
1523 30 * HZ) <= 0) {
1524 pr_warn("%s: gave up waiting for init of module %s.\n",
1525 mod->name, owner);
1526 }
1527 return ksym;
1528}
1529
1530/*
1531 * /sys/module/foo/sections stuff
1532 * J. Corbet <corbet@lwn.net>
1533 */
1534#ifdef CONFIG_SYSFS
1535
1536#ifdef CONFIG_KALLSYMS
1537static inline bool sect_empty(const Elf_Shdr *sect)
1538{
1539 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1540}
1541
1542struct module_sect_attr {
Olivier Deprez0e641232021-09-23 10:07:05 +02001543 struct bin_attribute battr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544 unsigned long address;
1545};
1546
1547struct module_sect_attrs {
1548 struct attribute_group grp;
1549 unsigned int nsections;
1550 struct module_sect_attr attrs[0];
1551};
1552
Olivier Deprez0e641232021-09-23 10:07:05 +02001553#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
1554static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
1555 struct bin_attribute *battr,
1556 char *buf, loff_t pos, size_t count)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001557{
1558 struct module_sect_attr *sattr =
Olivier Deprez0e641232021-09-23 10:07:05 +02001559 container_of(battr, struct module_sect_attr, battr);
1560 char bounce[MODULE_SECT_READ_SIZE + 1];
1561 size_t wrote;
1562
1563 if (pos != 0)
1564 return -EINVAL;
1565
1566 /*
1567 * Since we're a binary read handler, we must account for the
1568 * trailing NUL byte that sprintf will write: if "buf" is
1569 * too small to hold the NUL, or the NUL is exactly the last
1570 * byte, the read will look like it got truncated by one byte.
1571 * Since there is no way to ask sprintf nicely to not write
1572 * the NUL, we have to use a bounce buffer.
1573 */
1574 wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
1575 kallsyms_show_value(file->f_cred)
1576 ? (void *)sattr->address : NULL);
1577 count = min(count, wrote);
1578 memcpy(buf, bounce, count);
1579
1580 return count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001581}
1582
1583static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1584{
1585 unsigned int section;
1586
1587 for (section = 0; section < sect_attrs->nsections; section++)
Olivier Deprez0e641232021-09-23 10:07:05 +02001588 kfree(sect_attrs->attrs[section].battr.attr.name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001589 kfree(sect_attrs);
1590}
1591
1592static void add_sect_attrs(struct module *mod, const struct load_info *info)
1593{
1594 unsigned int nloaded = 0, i, size[2];
1595 struct module_sect_attrs *sect_attrs;
1596 struct module_sect_attr *sattr;
Olivier Deprez0e641232021-09-23 10:07:05 +02001597 struct bin_attribute **gattr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001598
1599 /* Count loaded sections and allocate structures */
1600 for (i = 0; i < info->hdr->e_shnum; i++)
1601 if (!sect_empty(&info->sechdrs[i]))
1602 nloaded++;
David Brazdil0f672f62019-12-10 10:32:29 +00001603 size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
Olivier Deprez0e641232021-09-23 10:07:05 +02001604 sizeof(sect_attrs->grp.bin_attrs[0]));
1605 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001606 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1607 if (sect_attrs == NULL)
1608 return;
1609
1610 /* Setup section attributes. */
1611 sect_attrs->grp.name = "sections";
Olivier Deprez0e641232021-09-23 10:07:05 +02001612 sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001613
1614 sect_attrs->nsections = 0;
1615 sattr = &sect_attrs->attrs[0];
Olivier Deprez0e641232021-09-23 10:07:05 +02001616 gattr = &sect_attrs->grp.bin_attrs[0];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001617 for (i = 0; i < info->hdr->e_shnum; i++) {
1618 Elf_Shdr *sec = &info->sechdrs[i];
1619 if (sect_empty(sec))
1620 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +02001621 sysfs_bin_attr_init(&sattr->battr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001622 sattr->address = sec->sh_addr;
Olivier Deprez0e641232021-09-23 10:07:05 +02001623 sattr->battr.attr.name =
1624 kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
1625 if (sattr->battr.attr.name == NULL)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001626 goto out;
1627 sect_attrs->nsections++;
Olivier Deprez0e641232021-09-23 10:07:05 +02001628 sattr->battr.read = module_sect_read;
1629 sattr->battr.size = MODULE_SECT_READ_SIZE;
1630 sattr->battr.attr.mode = 0400;
1631 *(gattr++) = &(sattr++)->battr;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001632 }
1633 *gattr = NULL;
1634
1635 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1636 goto out;
1637
1638 mod->sect_attrs = sect_attrs;
1639 return;
1640 out:
1641 free_sect_attrs(sect_attrs);
1642}
1643
1644static void remove_sect_attrs(struct module *mod)
1645{
1646 if (mod->sect_attrs) {
1647 sysfs_remove_group(&mod->mkobj.kobj,
1648 &mod->sect_attrs->grp);
1649 /* We are positive that no one is using any sect attrs
1650 * at this point. Deallocate immediately. */
1651 free_sect_attrs(mod->sect_attrs);
1652 mod->sect_attrs = NULL;
1653 }
1654}
1655
1656/*
1657 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1658 */
1659
1660struct module_notes_attrs {
1661 struct kobject *dir;
1662 unsigned int notes;
1663 struct bin_attribute attrs[0];
1664};
1665
1666static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1667 struct bin_attribute *bin_attr,
1668 char *buf, loff_t pos, size_t count)
1669{
1670 /*
1671 * The caller checked the pos and count against our size.
1672 */
1673 memcpy(buf, bin_attr->private + pos, count);
1674 return count;
1675}
1676
1677static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1678 unsigned int i)
1679{
1680 if (notes_attrs->dir) {
1681 while (i-- > 0)
1682 sysfs_remove_bin_file(notes_attrs->dir,
1683 &notes_attrs->attrs[i]);
1684 kobject_put(notes_attrs->dir);
1685 }
1686 kfree(notes_attrs);
1687}
1688
1689static void add_notes_attrs(struct module *mod, const struct load_info *info)
1690{
1691 unsigned int notes, loaded, i;
1692 struct module_notes_attrs *notes_attrs;
1693 struct bin_attribute *nattr;
1694
1695 /* failed to create section attributes, so can't create notes */
1696 if (!mod->sect_attrs)
1697 return;
1698
1699 /* Count notes sections and allocate structures. */
1700 notes = 0;
1701 for (i = 0; i < info->hdr->e_shnum; i++)
1702 if (!sect_empty(&info->sechdrs[i]) &&
1703 (info->sechdrs[i].sh_type == SHT_NOTE))
1704 ++notes;
1705
1706 if (notes == 0)
1707 return;
1708
1709 notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
1710 GFP_KERNEL);
1711 if (notes_attrs == NULL)
1712 return;
1713
1714 notes_attrs->notes = notes;
1715 nattr = &notes_attrs->attrs[0];
1716 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1717 if (sect_empty(&info->sechdrs[i]))
1718 continue;
1719 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1720 sysfs_bin_attr_init(nattr);
Olivier Deprez0e641232021-09-23 10:07:05 +02001721 nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001722 nattr->attr.mode = S_IRUGO;
1723 nattr->size = info->sechdrs[i].sh_size;
1724 nattr->private = (void *) info->sechdrs[i].sh_addr;
1725 nattr->read = module_notes_read;
1726 ++nattr;
1727 }
1728 ++loaded;
1729 }
1730
1731 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1732 if (!notes_attrs->dir)
1733 goto out;
1734
1735 for (i = 0; i < notes; ++i)
1736 if (sysfs_create_bin_file(notes_attrs->dir,
1737 &notes_attrs->attrs[i]))
1738 goto out;
1739
1740 mod->notes_attrs = notes_attrs;
1741 return;
1742
1743 out:
1744 free_notes_attrs(notes_attrs, i);
1745}
1746
1747static void remove_notes_attrs(struct module *mod)
1748{
1749 if (mod->notes_attrs)
1750 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1751}
1752
1753#else
1754
1755static inline void add_sect_attrs(struct module *mod,
1756 const struct load_info *info)
1757{
1758}
1759
1760static inline void remove_sect_attrs(struct module *mod)
1761{
1762}
1763
1764static inline void add_notes_attrs(struct module *mod,
1765 const struct load_info *info)
1766{
1767}
1768
1769static inline void remove_notes_attrs(struct module *mod)
1770{
1771}
1772#endif /* CONFIG_KALLSYMS */
1773
1774static void del_usage_links(struct module *mod)
1775{
1776#ifdef CONFIG_MODULE_UNLOAD
1777 struct module_use *use;
1778
1779 mutex_lock(&module_mutex);
1780 list_for_each_entry(use, &mod->target_list, target_list)
1781 sysfs_remove_link(use->target->holders_dir, mod->name);
1782 mutex_unlock(&module_mutex);
1783#endif
1784}
1785
1786static int add_usage_links(struct module *mod)
1787{
1788 int ret = 0;
1789#ifdef CONFIG_MODULE_UNLOAD
1790 struct module_use *use;
1791
1792 mutex_lock(&module_mutex);
1793 list_for_each_entry(use, &mod->target_list, target_list) {
1794 ret = sysfs_create_link(use->target->holders_dir,
1795 &mod->mkobj.kobj, mod->name);
1796 if (ret)
1797 break;
1798 }
1799 mutex_unlock(&module_mutex);
1800 if (ret)
1801 del_usage_links(mod);
1802#endif
1803 return ret;
1804}
1805
David Brazdil0f672f62019-12-10 10:32:29 +00001806static void module_remove_modinfo_attrs(struct module *mod, int end);
1807
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001808static int module_add_modinfo_attrs(struct module *mod)
1809{
1810 struct module_attribute *attr;
1811 struct module_attribute *temp_attr;
1812 int error = 0;
1813 int i;
1814
1815 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1816 (ARRAY_SIZE(modinfo_attrs) + 1)),
1817 GFP_KERNEL);
1818 if (!mod->modinfo_attrs)
1819 return -ENOMEM;
1820
1821 temp_attr = mod->modinfo_attrs;
David Brazdil0f672f62019-12-10 10:32:29 +00001822 for (i = 0; (attr = modinfo_attrs[i]); i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001823 if (!attr->test || attr->test(mod)) {
1824 memcpy(temp_attr, attr, sizeof(*temp_attr));
1825 sysfs_attr_init(&temp_attr->attr);
1826 error = sysfs_create_file(&mod->mkobj.kobj,
1827 &temp_attr->attr);
David Brazdil0f672f62019-12-10 10:32:29 +00001828 if (error)
1829 goto error_out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001830 ++temp_attr;
1831 }
1832 }
David Brazdil0f672f62019-12-10 10:32:29 +00001833
1834 return 0;
1835
1836error_out:
1837 if (i > 0)
1838 module_remove_modinfo_attrs(mod, --i);
Olivier Deprez0e641232021-09-23 10:07:05 +02001839 else
1840 kfree(mod->modinfo_attrs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001841 return error;
1842}
1843
David Brazdil0f672f62019-12-10 10:32:29 +00001844static void module_remove_modinfo_attrs(struct module *mod, int end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001845{
1846 struct module_attribute *attr;
1847 int i;
1848
1849 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00001850 if (end >= 0 && i > end)
1851 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001852 /* pick a field to test for end of list */
1853 if (!attr->attr.name)
1854 break;
1855 sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1856 if (attr->free)
1857 attr->free(mod);
1858 }
1859 kfree(mod->modinfo_attrs);
1860}
1861
1862static void mod_kobject_put(struct module *mod)
1863{
1864 DECLARE_COMPLETION_ONSTACK(c);
1865 mod->mkobj.kobj_completion = &c;
1866 kobject_put(&mod->mkobj.kobj);
1867 wait_for_completion(&c);
1868}
1869
1870static int mod_sysfs_init(struct module *mod)
1871{
1872 int err;
1873 struct kobject *kobj;
1874
1875 if (!module_sysfs_initialized) {
1876 pr_err("%s: module sysfs not initialized\n", mod->name);
1877 err = -EINVAL;
1878 goto out;
1879 }
1880
1881 kobj = kset_find_obj(module_kset, mod->name);
1882 if (kobj) {
1883 pr_err("%s: module is already loaded\n", mod->name);
1884 kobject_put(kobj);
1885 err = -EINVAL;
1886 goto out;
1887 }
1888
1889 mod->mkobj.mod = mod;
1890
1891 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1892 mod->mkobj.kobj.kset = module_kset;
1893 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1894 "%s", mod->name);
1895 if (err)
1896 mod_kobject_put(mod);
1897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001898out:
1899 return err;
1900}
1901
1902static int mod_sysfs_setup(struct module *mod,
1903 const struct load_info *info,
1904 struct kernel_param *kparam,
1905 unsigned int num_params)
1906{
1907 int err;
1908
1909 err = mod_sysfs_init(mod);
1910 if (err)
1911 goto out;
1912
1913 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1914 if (!mod->holders_dir) {
1915 err = -ENOMEM;
1916 goto out_unreg;
1917 }
1918
1919 err = module_param_sysfs_setup(mod, kparam, num_params);
1920 if (err)
1921 goto out_unreg_holders;
1922
1923 err = module_add_modinfo_attrs(mod);
1924 if (err)
1925 goto out_unreg_param;
1926
1927 err = add_usage_links(mod);
1928 if (err)
1929 goto out_unreg_modinfo_attrs;
1930
1931 add_sect_attrs(mod, info);
1932 add_notes_attrs(mod, info);
1933
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001934 return 0;
1935
1936out_unreg_modinfo_attrs:
David Brazdil0f672f62019-12-10 10:32:29 +00001937 module_remove_modinfo_attrs(mod, -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001938out_unreg_param:
1939 module_param_sysfs_remove(mod);
1940out_unreg_holders:
1941 kobject_put(mod->holders_dir);
1942out_unreg:
1943 mod_kobject_put(mod);
1944out:
1945 return err;
1946}
1947
1948static void mod_sysfs_fini(struct module *mod)
1949{
1950 remove_notes_attrs(mod);
1951 remove_sect_attrs(mod);
1952 mod_kobject_put(mod);
1953}
1954
1955static void init_param_lock(struct module *mod)
1956{
1957 mutex_init(&mod->param_lock);
1958}
1959#else /* !CONFIG_SYSFS */
1960
1961static int mod_sysfs_setup(struct module *mod,
1962 const struct load_info *info,
1963 struct kernel_param *kparam,
1964 unsigned int num_params)
1965{
1966 return 0;
1967}
1968
1969static void mod_sysfs_fini(struct module *mod)
1970{
1971}
1972
David Brazdil0f672f62019-12-10 10:32:29 +00001973static void module_remove_modinfo_attrs(struct module *mod, int end)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001974{
1975}
1976
1977static void del_usage_links(struct module *mod)
1978{
1979}
1980
1981static void init_param_lock(struct module *mod)
1982{
1983}
1984#endif /* CONFIG_SYSFS */
1985
1986static void mod_sysfs_teardown(struct module *mod)
1987{
1988 del_usage_links(mod);
David Brazdil0f672f62019-12-10 10:32:29 +00001989 module_remove_modinfo_attrs(mod, -1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001990 module_param_sysfs_remove(mod);
1991 kobject_put(mod->mkobj.drivers_dir);
1992 kobject_put(mod->holders_dir);
1993 mod_sysfs_fini(mod);
1994}
1995
David Brazdil0f672f62019-12-10 10:32:29 +00001996#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001997/*
1998 * LKM RO/NX protection: protect module's text/ro-data
1999 * from modification and any data from execution.
2000 *
2001 * General layout of module is:
2002 * [text] [read-only-data] [ro-after-init] [writable data]
2003 * text_size -----^ ^ ^ ^
2004 * ro_size ------------------------| | |
2005 * ro_after_init_size -----------------------------| |
2006 * size -----------------------------------------------------------|
2007 *
2008 * These values are always page-aligned (as is base)
2009 */
2010static void frob_text(const struct module_layout *layout,
2011 int (*set_memory)(unsigned long start, int num_pages))
2012{
2013 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2014 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2015 set_memory((unsigned long)layout->base,
2016 layout->text_size >> PAGE_SHIFT);
2017}
2018
David Brazdil0f672f62019-12-10 10:32:29 +00002019#ifdef CONFIG_STRICT_MODULE_RWX
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002020static void frob_rodata(const struct module_layout *layout,
2021 int (*set_memory)(unsigned long start, int num_pages))
2022{
2023 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2024 BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2025 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2026 set_memory((unsigned long)layout->base + layout->text_size,
2027 (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
2028}
2029
2030static void frob_ro_after_init(const struct module_layout *layout,
2031 int (*set_memory)(unsigned long start, int num_pages))
2032{
2033 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2034 BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2035 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2036 set_memory((unsigned long)layout->base + layout->ro_size,
2037 (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
2038}
2039
2040static void frob_writable_data(const struct module_layout *layout,
2041 int (*set_memory)(unsigned long start, int num_pages))
2042{
2043 BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2044 BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2045 BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
2046 set_memory((unsigned long)layout->base + layout->ro_after_init_size,
2047 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
2048}
2049
2050/* livepatching wants to disable read-only so it can frob module. */
2051void module_disable_ro(const struct module *mod)
2052{
2053 if (!rodata_enabled)
2054 return;
2055
2056 frob_text(&mod->core_layout, set_memory_rw);
2057 frob_rodata(&mod->core_layout, set_memory_rw);
2058 frob_ro_after_init(&mod->core_layout, set_memory_rw);
2059 frob_text(&mod->init_layout, set_memory_rw);
2060 frob_rodata(&mod->init_layout, set_memory_rw);
2061}
2062
2063void module_enable_ro(const struct module *mod, bool after_init)
2064{
2065 if (!rodata_enabled)
2066 return;
2067
David Brazdil0f672f62019-12-10 10:32:29 +00002068 set_vm_flush_reset_perms(mod->core_layout.base);
2069 set_vm_flush_reset_perms(mod->init_layout.base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002070 frob_text(&mod->core_layout, set_memory_ro);
David Brazdil0f672f62019-12-10 10:32:29 +00002071
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002072 frob_rodata(&mod->core_layout, set_memory_ro);
2073 frob_text(&mod->init_layout, set_memory_ro);
2074 frob_rodata(&mod->init_layout, set_memory_ro);
2075
2076 if (after_init)
2077 frob_ro_after_init(&mod->core_layout, set_memory_ro);
2078}
2079
2080static void module_enable_nx(const struct module *mod)
2081{
2082 frob_rodata(&mod->core_layout, set_memory_nx);
2083 frob_ro_after_init(&mod->core_layout, set_memory_nx);
2084 frob_writable_data(&mod->core_layout, set_memory_nx);
2085 frob_rodata(&mod->init_layout, set_memory_nx);
2086 frob_writable_data(&mod->init_layout, set_memory_nx);
2087}
2088
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002089/* Iterate through all modules and set each module's text as RW */
2090void set_all_modules_text_rw(void)
2091{
2092 struct module *mod;
2093
2094 if (!rodata_enabled)
2095 return;
2096
2097 mutex_lock(&module_mutex);
2098 list_for_each_entry_rcu(mod, &modules, list) {
2099 if (mod->state == MODULE_STATE_UNFORMED)
2100 continue;
2101
2102 frob_text(&mod->core_layout, set_memory_rw);
2103 frob_text(&mod->init_layout, set_memory_rw);
2104 }
2105 mutex_unlock(&module_mutex);
2106}
2107
2108/* Iterate through all modules and set each module's text as RO */
2109void set_all_modules_text_ro(void)
2110{
2111 struct module *mod;
2112
2113 if (!rodata_enabled)
2114 return;
2115
2116 mutex_lock(&module_mutex);
2117 list_for_each_entry_rcu(mod, &modules, list) {
2118 /*
2119 * Ignore going modules since it's possible that ro
2120 * protection has already been disabled, otherwise we'll
2121 * run into protection faults at module deallocation.
2122 */
2123 if (mod->state == MODULE_STATE_UNFORMED ||
2124 mod->state == MODULE_STATE_GOING)
2125 continue;
2126
2127 frob_text(&mod->core_layout, set_memory_ro);
2128 frob_text(&mod->init_layout, set_memory_ro);
2129 }
2130 mutex_unlock(&module_mutex);
2131}
David Brazdil0f672f62019-12-10 10:32:29 +00002132#else /* !CONFIG_STRICT_MODULE_RWX */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002133static void module_enable_nx(const struct module *mod) { }
David Brazdil0f672f62019-12-10 10:32:29 +00002134#endif /* CONFIG_STRICT_MODULE_RWX */
2135static void module_enable_x(const struct module *mod)
2136{
2137 frob_text(&mod->core_layout, set_memory_x);
2138 frob_text(&mod->init_layout, set_memory_x);
2139}
2140#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2141static void module_enable_nx(const struct module *mod) { }
2142static void module_enable_x(const struct module *mod) { }
2143#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2144
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002145
2146#ifdef CONFIG_LIVEPATCH
2147/*
2148 * Persist Elf information about a module. Copy the Elf header,
2149 * section header table, section string table, and symtab section
2150 * index from info to mod->klp_info.
2151 */
2152static int copy_module_elf(struct module *mod, struct load_info *info)
2153{
2154 unsigned int size, symndx;
2155 int ret;
2156
2157 size = sizeof(*mod->klp_info);
2158 mod->klp_info = kmalloc(size, GFP_KERNEL);
2159 if (mod->klp_info == NULL)
2160 return -ENOMEM;
2161
2162 /* Elf header */
2163 size = sizeof(mod->klp_info->hdr);
2164 memcpy(&mod->klp_info->hdr, info->hdr, size);
2165
2166 /* Elf section header table */
2167 size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2168 mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL);
2169 if (mod->klp_info->sechdrs == NULL) {
2170 ret = -ENOMEM;
2171 goto free_info;
2172 }
2173
2174 /* Elf section name string table */
2175 size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2176 mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL);
2177 if (mod->klp_info->secstrings == NULL) {
2178 ret = -ENOMEM;
2179 goto free_sechdrs;
2180 }
2181
2182 /* Elf symbol section index */
2183 symndx = info->index.sym;
2184 mod->klp_info->symndx = symndx;
2185
2186 /*
2187 * For livepatch modules, core_kallsyms.symtab is a complete
2188 * copy of the original symbol table. Adjust sh_addr to point
2189 * to core_kallsyms.symtab since the copy of the symtab in module
2190 * init memory is freed at the end of do_init_module().
2191 */
2192 mod->klp_info->sechdrs[symndx].sh_addr = \
2193 (unsigned long) mod->core_kallsyms.symtab;
2194
2195 return 0;
2196
2197free_sechdrs:
2198 kfree(mod->klp_info->sechdrs);
2199free_info:
2200 kfree(mod->klp_info);
2201 return ret;
2202}
2203
2204static void free_module_elf(struct module *mod)
2205{
2206 kfree(mod->klp_info->sechdrs);
2207 kfree(mod->klp_info->secstrings);
2208 kfree(mod->klp_info);
2209}
2210#else /* !CONFIG_LIVEPATCH */
2211static int copy_module_elf(struct module *mod, struct load_info *info)
2212{
2213 return 0;
2214}
2215
2216static void free_module_elf(struct module *mod)
2217{
2218}
2219#endif /* CONFIG_LIVEPATCH */
2220
2221void __weak module_memfree(void *module_region)
2222{
David Brazdil0f672f62019-12-10 10:32:29 +00002223 /*
2224 * This memory may be RO, and freeing RO memory in an interrupt is not
2225 * supported by vmalloc.
2226 */
2227 WARN_ON(in_interrupt());
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002228 vfree(module_region);
2229}
2230
2231void __weak module_arch_cleanup(struct module *mod)
2232{
2233}
2234
2235void __weak module_arch_freeing_init(struct module *mod)
2236{
2237}
2238
2239/* Free a module, remove from lists, etc. */
2240static void free_module(struct module *mod)
2241{
2242 trace_module_free(mod);
2243
2244 mod_sysfs_teardown(mod);
2245
2246 /* We leave it in list to prevent duplicate loads, but make sure
2247 * that noone uses it while it's being deconstructed. */
2248 mutex_lock(&module_mutex);
2249 mod->state = MODULE_STATE_UNFORMED;
2250 mutex_unlock(&module_mutex);
2251
2252 /* Remove dynamic debug info */
2253 ddebug_remove_module(mod->name);
2254
2255 /* Arch-specific cleanup. */
2256 module_arch_cleanup(mod);
2257
2258 /* Module unload stuff */
2259 module_unload_free(mod);
2260
2261 /* Free any allocated parameters. */
2262 destroy_params(mod->kp, mod->num_kp);
2263
2264 if (is_livepatch_module(mod))
2265 free_module_elf(mod);
2266
2267 /* Now we can delete it from the lists */
2268 mutex_lock(&module_mutex);
2269 /* Unlink carefully: kallsyms could be walking list. */
2270 list_del_rcu(&mod->list);
2271 mod_tree_remove(mod);
2272 /* Remove this module from bug list, this uses list_del_rcu */
2273 module_bug_cleanup(mod);
2274 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
David Brazdil0f672f62019-12-10 10:32:29 +00002275 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002276 mutex_unlock(&module_mutex);
2277
2278 /* This may be empty, but that's OK */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002279 module_arch_freeing_init(mod);
2280 module_memfree(mod->init_layout.base);
2281 kfree(mod->args);
2282 percpu_modfree(mod);
2283
2284 /* Free lock-classes; relies on the preceding sync_rcu(). */
2285 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2286
2287 /* Finally, free the core (containing the module structure) */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002288 module_memfree(mod->core_layout.base);
2289}
2290
2291void *__symbol_get(const char *symbol)
2292{
2293 struct module *owner;
2294 const struct kernel_symbol *sym;
2295
2296 preempt_disable();
Olivier Deprez0e641232021-09-23 10:07:05 +02002297 sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002298 if (sym && strong_try_module_get(owner))
2299 sym = NULL;
2300 preempt_enable();
2301
2302 return sym ? (void *)kernel_symbol_value(sym) : NULL;
2303}
2304EXPORT_SYMBOL_GPL(__symbol_get);
2305
2306/*
2307 * Ensure that an exported symbol [global namespace] does not already exist
2308 * in the kernel or in some other module's exported symbol table.
2309 *
2310 * You must hold the module_mutex.
2311 */
David Brazdil0f672f62019-12-10 10:32:29 +00002312static int verify_exported_symbols(struct module *mod)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002313{
2314 unsigned int i;
2315 struct module *owner;
2316 const struct kernel_symbol *s;
2317 struct {
2318 const struct kernel_symbol *sym;
2319 unsigned int num;
2320 } arr[] = {
2321 { mod->syms, mod->num_syms },
2322 { mod->gpl_syms, mod->num_gpl_syms },
2323 { mod->gpl_future_syms, mod->num_gpl_future_syms },
2324#ifdef CONFIG_UNUSED_SYMBOLS
2325 { mod->unused_syms, mod->num_unused_syms },
2326 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2327#endif
2328 };
2329
2330 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2331 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2332 if (find_symbol(kernel_symbol_name(s), &owner, NULL,
Olivier Deprez0e641232021-09-23 10:07:05 +02002333 NULL, true, false)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002334 pr_err("%s: exports duplicate symbol %s"
2335 " (owned by %s)\n",
2336 mod->name, kernel_symbol_name(s),
2337 module_name(owner));
2338 return -ENOEXEC;
2339 }
2340 }
2341 }
2342 return 0;
2343}
2344
Olivier Deprez0e641232021-09-23 10:07:05 +02002345static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
2346{
2347 /*
2348 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
2349 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
2350 * i386 has a similar problem but may not deserve a fix.
2351 *
2352 * If we ever have to ignore many symbols, consider refactoring the code to
2353 * only warn if referenced by a relocation.
2354 */
2355 if (emachine == EM_386 || emachine == EM_X86_64)
2356 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
2357 return false;
2358}
2359
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002360/* Change all symbols so that st_value encodes the pointer directly. */
2361static int simplify_symbols(struct module *mod, const struct load_info *info)
2362{
2363 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2364 Elf_Sym *sym = (void *)symsec->sh_addr;
2365 unsigned long secbase;
2366 unsigned int i;
2367 int ret = 0;
2368 const struct kernel_symbol *ksym;
2369
2370 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2371 const char *name = info->strtab + sym[i].st_name;
2372
2373 switch (sym[i].st_shndx) {
2374 case SHN_COMMON:
2375 /* Ignore common symbols */
2376 if (!strncmp(name, "__gnu_lto", 9))
2377 break;
2378
2379 /* We compiled with -fno-common. These are not
2380 supposed to happen. */
2381 pr_debug("Common symbol: %s\n", name);
2382 pr_warn("%s: please compile with -fno-common\n",
2383 mod->name);
2384 ret = -ENOEXEC;
2385 break;
2386
2387 case SHN_ABS:
2388 /* Don't need to do anything */
2389 pr_debug("Absolute symbol: 0x%08lx\n",
2390 (long)sym[i].st_value);
2391 break;
2392
2393 case SHN_LIVEPATCH:
2394 /* Livepatch symbols are resolved by livepatch */
2395 break;
2396
2397 case SHN_UNDEF:
2398 ksym = resolve_symbol_wait(mod, info, name);
2399 /* Ok if resolved. */
2400 if (ksym && !IS_ERR(ksym)) {
2401 sym[i].st_value = kernel_symbol_value(ksym);
2402 break;
2403 }
2404
Olivier Deprez0e641232021-09-23 10:07:05 +02002405 /* Ok if weak or ignored. */
2406 if (!ksym &&
2407 (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
2408 ignore_undef_symbol(info->hdr->e_machine, name)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002409 break;
2410
2411 ret = PTR_ERR(ksym) ?: -ENOENT;
2412 pr_warn("%s: Unknown symbol %s (err %d)\n",
2413 mod->name, name, ret);
2414 break;
2415
2416 default:
2417 /* Divert to percpu allocation if a percpu var. */
2418 if (sym[i].st_shndx == info->index.pcpu)
2419 secbase = (unsigned long)mod_percpu(mod);
2420 else
2421 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2422 sym[i].st_value += secbase;
2423 break;
2424 }
2425 }
2426
2427 return ret;
2428}
2429
2430static int apply_relocations(struct module *mod, const struct load_info *info)
2431{
2432 unsigned int i;
2433 int err = 0;
2434
2435 /* Now do relocations. */
2436 for (i = 1; i < info->hdr->e_shnum; i++) {
2437 unsigned int infosec = info->sechdrs[i].sh_info;
2438
2439 /* Not a valid relocation section? */
2440 if (infosec >= info->hdr->e_shnum)
2441 continue;
2442
2443 /* Don't bother with non-allocated sections */
2444 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2445 continue;
2446
2447 /* Livepatch relocation sections are applied by livepatch */
2448 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2449 continue;
2450
2451 if (info->sechdrs[i].sh_type == SHT_REL)
2452 err = apply_relocate(info->sechdrs, info->strtab,
2453 info->index.sym, i, mod);
2454 else if (info->sechdrs[i].sh_type == SHT_RELA)
2455 err = apply_relocate_add(info->sechdrs, info->strtab,
2456 info->index.sym, i, mod);
2457 if (err < 0)
2458 break;
2459 }
2460 return err;
2461}
2462
2463/* Additional bytes needed by arch in front of individual sections */
2464unsigned int __weak arch_mod_section_prepend(struct module *mod,
2465 unsigned int section)
2466{
2467 /* default implementation just returns zero */
2468 return 0;
2469}
2470
2471/* Update size with this section: return offset. */
2472static long get_offset(struct module *mod, unsigned int *size,
2473 Elf_Shdr *sechdr, unsigned int section)
2474{
2475 long ret;
2476
2477 *size += arch_mod_section_prepend(mod, section);
2478 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2479 *size = ret + sechdr->sh_size;
2480 return ret;
2481}
2482
2483/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2484 might -- code, read-only data, read-write data, small data. Tally
2485 sizes, and place the offsets into sh_entsize fields: high bit means it
2486 belongs in init. */
2487static void layout_sections(struct module *mod, struct load_info *info)
2488{
2489 static unsigned long const masks[][2] = {
2490 /* NOTE: all executable code must be the first section
2491 * in this array; otherwise modify the text_size
2492 * finder in the two loops below */
2493 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2494 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2495 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2496 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2497 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2498 };
2499 unsigned int m, i;
2500
2501 for (i = 0; i < info->hdr->e_shnum; i++)
2502 info->sechdrs[i].sh_entsize = ~0UL;
2503
2504 pr_debug("Core section allocation order:\n");
2505 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2506 for (i = 0; i < info->hdr->e_shnum; ++i) {
2507 Elf_Shdr *s = &info->sechdrs[i];
2508 const char *sname = info->secstrings + s->sh_name;
2509
2510 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2511 || (s->sh_flags & masks[m][1])
2512 || s->sh_entsize != ~0UL
2513 || strstarts(sname, ".init"))
2514 continue;
2515 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2516 pr_debug("\t%s\n", sname);
2517 }
2518 switch (m) {
2519 case 0: /* executable */
2520 mod->core_layout.size = debug_align(mod->core_layout.size);
2521 mod->core_layout.text_size = mod->core_layout.size;
2522 break;
2523 case 1: /* RO: text and ro-data */
2524 mod->core_layout.size = debug_align(mod->core_layout.size);
2525 mod->core_layout.ro_size = mod->core_layout.size;
2526 break;
2527 case 2: /* RO after init */
2528 mod->core_layout.size = debug_align(mod->core_layout.size);
2529 mod->core_layout.ro_after_init_size = mod->core_layout.size;
2530 break;
2531 case 4: /* whole core */
2532 mod->core_layout.size = debug_align(mod->core_layout.size);
2533 break;
2534 }
2535 }
2536
2537 pr_debug("Init section allocation order:\n");
2538 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2539 for (i = 0; i < info->hdr->e_shnum; ++i) {
2540 Elf_Shdr *s = &info->sechdrs[i];
2541 const char *sname = info->secstrings + s->sh_name;
2542
2543 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2544 || (s->sh_flags & masks[m][1])
2545 || s->sh_entsize != ~0UL
2546 || !strstarts(sname, ".init"))
2547 continue;
2548 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2549 | INIT_OFFSET_MASK);
2550 pr_debug("\t%s\n", sname);
2551 }
2552 switch (m) {
2553 case 0: /* executable */
2554 mod->init_layout.size = debug_align(mod->init_layout.size);
2555 mod->init_layout.text_size = mod->init_layout.size;
2556 break;
2557 case 1: /* RO: text and ro-data */
2558 mod->init_layout.size = debug_align(mod->init_layout.size);
2559 mod->init_layout.ro_size = mod->init_layout.size;
2560 break;
2561 case 2:
2562 /*
2563 * RO after init doesn't apply to init_layout (only
2564 * core_layout), so it just takes the value of ro_size.
2565 */
2566 mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2567 break;
2568 case 4: /* whole init */
2569 mod->init_layout.size = debug_align(mod->init_layout.size);
2570 break;
2571 }
2572 }
2573}
2574
2575static void set_license(struct module *mod, const char *license)
2576{
2577 if (!license)
2578 license = "unspecified";
2579
2580 if (!license_is_gpl_compatible(license)) {
2581 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2582 pr_warn("%s: module license '%s' taints kernel.\n",
2583 mod->name, license);
2584 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2585 LOCKDEP_NOW_UNRELIABLE);
2586 }
2587}
2588
2589/* Parse tag=value strings from .modinfo section */
2590static char *next_string(char *string, unsigned long *secsize)
2591{
2592 /* Skip non-zero chars */
2593 while (string[0]) {
2594 string++;
2595 if ((*secsize)-- <= 1)
2596 return NULL;
2597 }
2598
2599 /* Skip any zero padding. */
2600 while (!string[0]) {
2601 string++;
2602 if ((*secsize)-- <= 1)
2603 return NULL;
2604 }
2605 return string;
2606}
2607
David Brazdil0f672f62019-12-10 10:32:29 +00002608static char *get_next_modinfo(const struct load_info *info, const char *tag,
2609 char *prev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002610{
2611 char *p;
2612 unsigned int taglen = strlen(tag);
2613 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2614 unsigned long size = infosec->sh_size;
2615
2616 /*
2617 * get_modinfo() calls made before rewrite_section_headers()
2618 * must use sh_offset, as sh_addr isn't set!
2619 */
David Brazdil0f672f62019-12-10 10:32:29 +00002620 char *modinfo = (char *)info->hdr + infosec->sh_offset;
2621
2622 if (prev) {
2623 size -= prev - modinfo;
2624 modinfo = next_string(prev, &size);
2625 }
2626
2627 for (p = modinfo; p; p = next_string(p, &size)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002628 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2629 return p + taglen + 1;
2630 }
2631 return NULL;
2632}
2633
David Brazdil0f672f62019-12-10 10:32:29 +00002634static char *get_modinfo(const struct load_info *info, const char *tag)
2635{
2636 return get_next_modinfo(info, tag, NULL);
2637}
2638
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002639static void setup_modinfo(struct module *mod, struct load_info *info)
2640{
2641 struct module_attribute *attr;
2642 int i;
2643
2644 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2645 if (attr->setup)
2646 attr->setup(mod, get_modinfo(info, attr->attr.name));
2647 }
2648}
2649
2650static void free_modinfo(struct module *mod)
2651{
2652 struct module_attribute *attr;
2653 int i;
2654
2655 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2656 if (attr->free)
2657 attr->free(mod);
2658 }
2659}
2660
2661#ifdef CONFIG_KALLSYMS
2662
David Brazdil0f672f62019-12-10 10:32:29 +00002663/* Lookup exported symbol in given range of kernel_symbols */
2664static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2665 const struct kernel_symbol *start,
2666 const struct kernel_symbol *stop)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002667{
2668 return bsearch(name, start, stop - start,
2669 sizeof(struct kernel_symbol), cmp_name);
2670}
2671
2672static int is_exported(const char *name, unsigned long value,
2673 const struct module *mod)
2674{
2675 const struct kernel_symbol *ks;
2676 if (!mod)
David Brazdil0f672f62019-12-10 10:32:29 +00002677 ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002678 else
David Brazdil0f672f62019-12-10 10:32:29 +00002679 ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2680
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002681 return ks != NULL && kernel_symbol_value(ks) == value;
2682}
2683
2684/* As per nm */
2685static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2686{
2687 const Elf_Shdr *sechdrs = info->sechdrs;
2688
2689 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2690 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2691 return 'v';
2692 else
2693 return 'w';
2694 }
2695 if (sym->st_shndx == SHN_UNDEF)
2696 return 'U';
2697 if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2698 return 'a';
2699 if (sym->st_shndx >= SHN_LORESERVE)
2700 return '?';
2701 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2702 return 't';
2703 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2704 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2705 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2706 return 'r';
2707 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2708 return 'g';
2709 else
2710 return 'd';
2711 }
2712 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2713 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2714 return 's';
2715 else
2716 return 'b';
2717 }
2718 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2719 ".debug")) {
2720 return 'n';
2721 }
2722 return '?';
2723}
2724
2725static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2726 unsigned int shnum, unsigned int pcpundx)
2727{
2728 const Elf_Shdr *sec;
2729
2730 if (src->st_shndx == SHN_UNDEF
2731 || src->st_shndx >= shnum
2732 || !src->st_name)
2733 return false;
2734
2735#ifdef CONFIG_KALLSYMS_ALL
2736 if (src->st_shndx == pcpundx)
2737 return true;
2738#endif
2739
2740 sec = sechdrs + src->st_shndx;
2741 if (!(sec->sh_flags & SHF_ALLOC)
2742#ifndef CONFIG_KALLSYMS_ALL
2743 || !(sec->sh_flags & SHF_EXECINSTR)
2744#endif
2745 || (sec->sh_entsize & INIT_OFFSET_MASK))
2746 return false;
2747
2748 return true;
2749}
2750
2751/*
2752 * We only allocate and copy the strings needed by the parts of symtab
2753 * we keep. This is simple, but has the effect of making multiple
2754 * copies of duplicates. We could be more sophisticated, see
2755 * linux-kernel thread starting with
2756 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2757 */
2758static void layout_symtab(struct module *mod, struct load_info *info)
2759{
2760 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2761 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2762 const Elf_Sym *src;
2763 unsigned int i, nsrc, ndst, strtab_size = 0;
2764
2765 /* Put symbol section at end of init part of module. */
2766 symsect->sh_flags |= SHF_ALLOC;
2767 symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2768 info->index.sym) | INIT_OFFSET_MASK;
2769 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2770
2771 src = (void *)info->hdr + symsect->sh_offset;
2772 nsrc = symsect->sh_size / sizeof(*src);
2773
2774 /* Compute total space required for the core symbols' strtab. */
2775 for (ndst = i = 0; i < nsrc; i++) {
2776 if (i == 0 || is_livepatch_module(mod) ||
2777 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2778 info->index.pcpu)) {
2779 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2780 ndst++;
2781 }
2782 }
2783
2784 /* Append room for core symbols at end of core part. */
2785 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2786 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2787 mod->core_layout.size += strtab_size;
David Brazdil0f672f62019-12-10 10:32:29 +00002788 info->core_typeoffs = mod->core_layout.size;
2789 mod->core_layout.size += ndst * sizeof(char);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002790 mod->core_layout.size = debug_align(mod->core_layout.size);
2791
2792 /* Put string table section at end of init part of module. */
2793 strsect->sh_flags |= SHF_ALLOC;
2794 strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2795 info->index.str) | INIT_OFFSET_MASK;
2796 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2797
2798 /* We'll tack temporary mod_kallsyms on the end. */
2799 mod->init_layout.size = ALIGN(mod->init_layout.size,
2800 __alignof__(struct mod_kallsyms));
2801 info->mod_kallsyms_init_off = mod->init_layout.size;
2802 mod->init_layout.size += sizeof(struct mod_kallsyms);
David Brazdil0f672f62019-12-10 10:32:29 +00002803 info->init_typeoffs = mod->init_layout.size;
2804 mod->init_layout.size += nsrc * sizeof(char);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002805 mod->init_layout.size = debug_align(mod->init_layout.size);
2806}
2807
2808/*
2809 * We use the full symtab and strtab which layout_symtab arranged to
2810 * be appended to the init section. Later we switch to the cut-down
2811 * core-only ones.
2812 */
2813static void add_kallsyms(struct module *mod, const struct load_info *info)
2814{
2815 unsigned int i, ndst;
2816 const Elf_Sym *src;
2817 Elf_Sym *dst;
2818 char *s;
2819 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2820
2821 /* Set up to point into init section. */
2822 mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2823
2824 mod->kallsyms->symtab = (void *)symsec->sh_addr;
2825 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2826 /* Make sure we get permanent strtab: don't use info->strtab. */
2827 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
David Brazdil0f672f62019-12-10 10:32:29 +00002828 mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002829
David Brazdil0f672f62019-12-10 10:32:29 +00002830 /*
2831 * Now populate the cut down core kallsyms for after init
2832 * and set types up while we still have access to sections.
2833 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002834 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2835 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
David Brazdil0f672f62019-12-10 10:32:29 +00002836 mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002837 src = mod->kallsyms->symtab;
2838 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00002839 mod->kallsyms->typetab[i] = elf_type(src + i, info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002840 if (i == 0 || is_livepatch_module(mod) ||
2841 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2842 info->index.pcpu)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002843 mod->core_kallsyms.typetab[ndst] =
2844 mod->kallsyms->typetab[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002845 dst[ndst] = src[i];
2846 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2847 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2848 KSYM_NAME_LEN) + 1;
2849 }
2850 }
2851 mod->core_kallsyms.num_symtab = ndst;
2852}
2853#else
2854static inline void layout_symtab(struct module *mod, struct load_info *info)
2855{
2856}
2857
2858static void add_kallsyms(struct module *mod, const struct load_info *info)
2859{
2860}
2861#endif /* CONFIG_KALLSYMS */
2862
2863static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
2864{
2865 if (!debug)
2866 return;
David Brazdil0f672f62019-12-10 10:32:29 +00002867 ddebug_add_module(debug, num, mod->name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002868}
2869
2870static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
2871{
2872 if (debug)
2873 ddebug_remove_module(mod->name);
2874}
2875
2876void * __weak module_alloc(unsigned long size)
2877{
2878 return vmalloc_exec(size);
2879}
2880
David Brazdil0f672f62019-12-10 10:32:29 +00002881bool __weak module_exit_section(const char *name)
2882{
2883 return strstarts(name, ".exit");
2884}
2885
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002886#ifdef CONFIG_DEBUG_KMEMLEAK
2887static void kmemleak_load_module(const struct module *mod,
2888 const struct load_info *info)
2889{
2890 unsigned int i;
2891
2892 /* only scan the sections containing data */
2893 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2894
2895 for (i = 1; i < info->hdr->e_shnum; i++) {
2896 /* Scan all writable sections that's not executable */
2897 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2898 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2899 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2900 continue;
2901
2902 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2903 info->sechdrs[i].sh_size, GFP_KERNEL);
2904 }
2905}
2906#else
2907static inline void kmemleak_load_module(const struct module *mod,
2908 const struct load_info *info)
2909{
2910}
2911#endif
2912
2913#ifdef CONFIG_MODULE_SIG
2914static int module_sig_check(struct load_info *info, int flags)
2915{
David Brazdil0f672f62019-12-10 10:32:29 +00002916 int err = -ENODATA;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002917 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
David Brazdil0f672f62019-12-10 10:32:29 +00002918 const char *reason;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002919 const void *mod = info->hdr;
2920
2921 /*
2922 * Require flags == 0, as a module with version information
2923 * removed is no longer the module that was signed
2924 */
2925 if (flags == 0 &&
2926 info->len > markerlen &&
2927 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2928 /* We truncate the module to discard the signature */
2929 info->len -= markerlen;
2930 err = mod_verify_sig(mod, info);
2931 }
2932
David Brazdil0f672f62019-12-10 10:32:29 +00002933 switch (err) {
2934 case 0:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002935 info->sig_ok = true;
2936 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +00002937
2938 /* We don't permit modules to be loaded into trusted kernels
2939 * without a valid signature on them, but if we're not
2940 * enforcing, certain errors are non-fatal.
2941 */
2942 case -ENODATA:
Olivier Deprez0e641232021-09-23 10:07:05 +02002943 reason = "unsigned module";
2944 break;
David Brazdil0f672f62019-12-10 10:32:29 +00002945 case -ENOPKG:
Olivier Deprez0e641232021-09-23 10:07:05 +02002946 reason = "module with unsupported crypto";
2947 break;
David Brazdil0f672f62019-12-10 10:32:29 +00002948 case -ENOKEY:
Olivier Deprez0e641232021-09-23 10:07:05 +02002949 reason = "module with unavailable key";
2950 break;
David Brazdil0f672f62019-12-10 10:32:29 +00002951
2952 /* All other errors are fatal, including nomem, unparseable
2953 * signatures and signature check failures - even if signatures
2954 * aren't required.
2955 */
2956 default:
2957 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002958 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002959
2960 if (is_module_sig_enforced()) {
2961 pr_notice("Loading of %s is rejected\n", reason);
2962 return -EKEYREJECTED;
2963 }
2964
2965 return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002966}
2967#else /* !CONFIG_MODULE_SIG */
2968static int module_sig_check(struct load_info *info, int flags)
2969{
2970 return 0;
2971}
2972#endif /* !CONFIG_MODULE_SIG */
2973
Olivier Deprez0e641232021-09-23 10:07:05 +02002974static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002975{
Olivier Deprez0e641232021-09-23 10:07:05 +02002976 unsigned long secend;
2977
2978 /*
2979 * Check for both overflow and offset/size being
2980 * too large.
2981 */
2982 secend = shdr->sh_offset + shdr->sh_size;
2983 if (secend < shdr->sh_offset || secend > info->len)
2984 return -ENOEXEC;
2985
2986 return 0;
2987}
2988
2989/*
2990 * Sanity checks against invalid binaries, wrong arch, weird elf version.
2991 *
2992 * Also do basic validity checks against section offsets and sizes, the
2993 * section name string table, and the indices used for it (sh_name).
2994 */
2995static int elf_validity_check(struct load_info *info)
2996{
2997 unsigned int i;
2998 Elf_Shdr *shdr, *strhdr;
2999 int err;
3000
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003001 if (info->len < sizeof(*(info->hdr)))
3002 return -ENOEXEC;
3003
3004 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
3005 || info->hdr->e_type != ET_REL
3006 || !elf_check_arch(info->hdr)
3007 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
3008 return -ENOEXEC;
3009
Olivier Deprez0e641232021-09-23 10:07:05 +02003010 /*
3011 * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
3012 * known and small. So e_shnum * sizeof(Elf_Shdr)
3013 * will not overflow unsigned long on any platform.
3014 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003015 if (info->hdr->e_shoff >= info->len
3016 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
3017 info->len - info->hdr->e_shoff))
3018 return -ENOEXEC;
3019
Olivier Deprez0e641232021-09-23 10:07:05 +02003020 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3021
3022 /*
3023 * Verify if the section name table index is valid.
3024 */
3025 if (info->hdr->e_shstrndx == SHN_UNDEF
3026 || info->hdr->e_shstrndx >= info->hdr->e_shnum)
3027 return -ENOEXEC;
3028
3029 strhdr = &info->sechdrs[info->hdr->e_shstrndx];
3030 err = validate_section_offset(info, strhdr);
3031 if (err < 0)
3032 return err;
3033
3034 /*
3035 * The section name table must be NUL-terminated, as required
3036 * by the spec. This makes strcmp and pr_* calls that access
3037 * strings in the section safe.
3038 */
3039 info->secstrings = (void *)info->hdr + strhdr->sh_offset;
3040 if (info->secstrings[strhdr->sh_size - 1] != '\0')
3041 return -ENOEXEC;
3042
3043 /*
3044 * The code assumes that section 0 has a length of zero and
3045 * an addr of zero, so check for it.
3046 */
3047 if (info->sechdrs[0].sh_type != SHT_NULL
3048 || info->sechdrs[0].sh_size != 0
3049 || info->sechdrs[0].sh_addr != 0)
3050 return -ENOEXEC;
3051
3052 for (i = 1; i < info->hdr->e_shnum; i++) {
3053 shdr = &info->sechdrs[i];
3054 switch (shdr->sh_type) {
3055 case SHT_NULL:
3056 case SHT_NOBITS:
3057 continue;
3058 case SHT_SYMTAB:
3059 if (shdr->sh_link == SHN_UNDEF
3060 || shdr->sh_link >= info->hdr->e_shnum)
3061 return -ENOEXEC;
3062 fallthrough;
3063 default:
3064 err = validate_section_offset(info, shdr);
3065 if (err < 0) {
3066 pr_err("Invalid ELF section in module (section %u type %u)\n",
3067 i, shdr->sh_type);
3068 return err;
3069 }
3070
3071 if (shdr->sh_flags & SHF_ALLOC) {
3072 if (shdr->sh_name >= strhdr->sh_size) {
3073 pr_err("Invalid ELF section name in module (section %u type %u)\n",
3074 i, shdr->sh_type);
3075 return -ENOEXEC;
3076 }
3077 }
3078 break;
3079 }
3080 }
3081
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003082 return 0;
3083}
3084
3085#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
3086
3087static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
3088{
3089 do {
3090 unsigned long n = min(len, COPY_CHUNK_SIZE);
3091
3092 if (copy_from_user(dst, usrc, n) != 0)
3093 return -EFAULT;
3094 cond_resched();
3095 dst += n;
3096 usrc += n;
3097 len -= n;
3098 } while (len);
3099 return 0;
3100}
3101
3102#ifdef CONFIG_LIVEPATCH
3103static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
3104{
3105 if (get_modinfo(info, "livepatch")) {
3106 mod->klp = true;
3107 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
3108 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
3109 mod->name);
3110 }
3111
3112 return 0;
3113}
3114#else /* !CONFIG_LIVEPATCH */
3115static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
3116{
3117 if (get_modinfo(info, "livepatch")) {
3118 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
3119 mod->name);
3120 return -ENOEXEC;
3121 }
3122
3123 return 0;
3124}
3125#endif /* CONFIG_LIVEPATCH */
3126
3127static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
3128{
3129 if (retpoline_module_ok(get_modinfo(info, "retpoline")))
3130 return;
3131
3132 pr_warn("%s: loading module not compiled with retpoline compiler.\n",
3133 mod->name);
3134}
3135
3136/* Sets info->hdr and info->len. */
3137static int copy_module_from_user(const void __user *umod, unsigned long len,
3138 struct load_info *info)
3139{
3140 int err;
3141
3142 info->len = len;
3143 if (info->len < sizeof(*(info->hdr)))
3144 return -ENOEXEC;
3145
3146 err = security_kernel_load_data(LOADING_MODULE);
3147 if (err)
3148 return err;
3149
3150 /* Suck in entire file: we'll want most of it. */
3151 info->hdr = __vmalloc(info->len,
3152 GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
3153 if (!info->hdr)
3154 return -ENOMEM;
3155
3156 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
3157 vfree(info->hdr);
3158 return -EFAULT;
3159 }
3160
3161 return 0;
3162}
3163
3164static void free_copy(struct load_info *info)
3165{
3166 vfree(info->hdr);
3167}
3168
3169static int rewrite_section_headers(struct load_info *info, int flags)
3170{
3171 unsigned int i;
3172
3173 /* This should always be true, but let's be sure. */
3174 info->sechdrs[0].sh_addr = 0;
3175
3176 for (i = 1; i < info->hdr->e_shnum; i++) {
3177 Elf_Shdr *shdr = &info->sechdrs[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003178
3179 /* Mark all sections sh_addr with their address in the
3180 temporary image. */
3181 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
3182
3183#ifndef CONFIG_MODULE_UNLOAD
3184 /* Don't load .exit sections */
David Brazdil0f672f62019-12-10 10:32:29 +00003185 if (module_exit_section(info->secstrings+shdr->sh_name))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003186 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3187#endif
3188 }
3189
3190 /* Track but don't keep modinfo and version sections. */
3191 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
3192 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
3193
3194 return 0;
3195}
3196
3197/*
3198 * Set up our basic convenience variables (pointers to section headers,
3199 * search for module section index etc), and do some basic section
3200 * verification.
3201 *
3202 * Set info->mod to the temporary copy of the module in info->hdr. The final one
3203 * will be allocated in move_module().
3204 */
3205static int setup_load_info(struct load_info *info, int flags)
3206{
3207 unsigned int i;
3208
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003209 /* Try to find a name early so we can log errors with a module name */
3210 info->index.info = find_sec(info, ".modinfo");
Olivier Deprez0e641232021-09-23 10:07:05 +02003211 if (info->index.info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003212 info->name = get_modinfo(info, "name");
3213
3214 /* Find internal symbols and strings. */
3215 for (i = 1; i < info->hdr->e_shnum; i++) {
3216 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
3217 info->index.sym = i;
3218 info->index.str = info->sechdrs[i].sh_link;
3219 info->strtab = (char *)info->hdr
3220 + info->sechdrs[info->index.str].sh_offset;
3221 break;
3222 }
3223 }
3224
3225 if (info->index.sym == 0) {
Olivier Deprez0e641232021-09-23 10:07:05 +02003226 pr_warn("%s: module has no symbols (stripped?)\n",
3227 info->name ?: "(missing .modinfo section or name field)");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003228 return -ENOEXEC;
3229 }
3230
3231 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
3232 if (!info->index.mod) {
3233 pr_warn("%s: No module found in object\n",
Olivier Deprez0e641232021-09-23 10:07:05 +02003234 info->name ?: "(missing .modinfo section or name field)");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003235 return -ENOEXEC;
3236 }
3237 /* This is temporary: point mod into copy of data. */
3238 info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
3239
3240 /*
3241 * If we didn't load the .modinfo 'name' field earlier, fall back to
3242 * on-disk struct mod 'name' field.
3243 */
3244 if (!info->name)
3245 info->name = info->mod->name;
3246
3247 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
3248 info->index.vers = 0; /* Pretend no __versions section! */
3249 else
3250 info->index.vers = find_sec(info, "__versions");
3251
3252 info->index.pcpu = find_pcpusec(info);
3253
3254 return 0;
3255}
3256
3257static int check_modinfo(struct module *mod, struct load_info *info, int flags)
3258{
3259 const char *modmagic = get_modinfo(info, "vermagic");
3260 int err;
3261
3262 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
3263 modmagic = NULL;
3264
3265 /* This is allowed: modprobe --force will invalidate it. */
3266 if (!modmagic) {
3267 err = try_to_force_load(mod, "bad vermagic");
3268 if (err)
3269 return err;
3270 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
3271 pr_err("%s: version magic '%s' should be '%s'\n",
3272 info->name, modmagic, vermagic);
3273 return -ENOEXEC;
3274 }
3275
3276 if (!get_modinfo(info, "intree")) {
3277 if (!test_taint(TAINT_OOT_MODULE))
3278 pr_warn("%s: loading out-of-tree module taints kernel.\n",
3279 mod->name);
3280 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
3281 }
3282
3283 check_modinfo_retpoline(mod, info);
3284
3285 if (get_modinfo(info, "staging")) {
3286 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3287 pr_warn("%s: module is from the staging directory, the quality "
3288 "is unknown, you have been warned.\n", mod->name);
3289 }
3290
3291 err = check_modinfo_livepatch(mod, info);
3292 if (err)
3293 return err;
3294
3295 /* Set up license info based on the info section */
3296 set_license(mod, get_modinfo(info, "license"));
3297
3298 return 0;
3299}
3300
3301static int find_module_sections(struct module *mod, struct load_info *info)
3302{
3303 mod->kp = section_objs(info, "__param",
3304 sizeof(*mod->kp), &mod->num_kp);
3305 mod->syms = section_objs(info, "__ksymtab",
3306 sizeof(*mod->syms), &mod->num_syms);
3307 mod->crcs = section_addr(info, "__kcrctab");
3308 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3309 sizeof(*mod->gpl_syms),
3310 &mod->num_gpl_syms);
3311 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3312 mod->gpl_future_syms = section_objs(info,
3313 "__ksymtab_gpl_future",
3314 sizeof(*mod->gpl_future_syms),
3315 &mod->num_gpl_future_syms);
3316 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3317
3318#ifdef CONFIG_UNUSED_SYMBOLS
3319 mod->unused_syms = section_objs(info, "__ksymtab_unused",
3320 sizeof(*mod->unused_syms),
3321 &mod->num_unused_syms);
3322 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3323 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3324 sizeof(*mod->unused_gpl_syms),
3325 &mod->num_unused_gpl_syms);
3326 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3327#endif
3328#ifdef CONFIG_CONSTRUCTORS
3329 mod->ctors = section_objs(info, ".ctors",
3330 sizeof(*mod->ctors), &mod->num_ctors);
3331 if (!mod->ctors)
3332 mod->ctors = section_objs(info, ".init_array",
3333 sizeof(*mod->ctors), &mod->num_ctors);
3334 else if (find_sec(info, ".init_array")) {
3335 /*
3336 * This shouldn't happen with same compiler and binutils
3337 * building all parts of the module.
3338 */
3339 pr_warn("%s: has both .ctors and .init_array.\n",
3340 mod->name);
3341 return -EINVAL;
3342 }
3343#endif
3344
3345#ifdef CONFIG_TRACEPOINTS
3346 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3347 sizeof(*mod->tracepoints_ptrs),
3348 &mod->num_tracepoints);
3349#endif
David Brazdil0f672f62019-12-10 10:32:29 +00003350#ifdef CONFIG_TREE_SRCU
3351 mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3352 sizeof(*mod->srcu_struct_ptrs),
3353 &mod->num_srcu_structs);
3354#endif
3355#ifdef CONFIG_BPF_EVENTS
3356 mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3357 sizeof(*mod->bpf_raw_events),
3358 &mod->num_bpf_raw_events);
3359#endif
3360#ifdef CONFIG_JUMP_LABEL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003361 mod->jump_entries = section_objs(info, "__jump_table",
3362 sizeof(*mod->jump_entries),
3363 &mod->num_jump_entries);
3364#endif
3365#ifdef CONFIG_EVENT_TRACING
3366 mod->trace_events = section_objs(info, "_ftrace_events",
3367 sizeof(*mod->trace_events),
3368 &mod->num_trace_events);
3369 mod->trace_evals = section_objs(info, "_ftrace_eval_map",
3370 sizeof(*mod->trace_evals),
3371 &mod->num_trace_evals);
3372#endif
3373#ifdef CONFIG_TRACING
3374 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3375 sizeof(*mod->trace_bprintk_fmt_start),
3376 &mod->num_trace_bprintk_fmt);
3377#endif
3378#ifdef CONFIG_FTRACE_MCOUNT_RECORD
3379 /* sechdrs[0].sh_size is always zero */
3380 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3381 sizeof(*mod->ftrace_callsites),
3382 &mod->num_ftrace_callsites);
3383#endif
3384#ifdef CONFIG_FUNCTION_ERROR_INJECTION
3385 mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
3386 sizeof(*mod->ei_funcs),
3387 &mod->num_ei_funcs);
3388#endif
3389 mod->extable = section_objs(info, "__ex_table",
3390 sizeof(*mod->extable), &mod->num_exentries);
3391
3392 if (section_addr(info, "__obsparm"))
3393 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3394
3395 info->debug = section_objs(info, "__verbose",
3396 sizeof(*info->debug), &info->num_debug);
3397
3398 return 0;
3399}
3400
3401static int move_module(struct module *mod, struct load_info *info)
3402{
3403 int i;
3404 void *ptr;
3405
3406 /* Do the allocs. */
3407 ptr = module_alloc(mod->core_layout.size);
3408 /*
3409 * The pointer to this block is stored in the module structure
3410 * which is inside the block. Just mark it as not being a
3411 * leak.
3412 */
3413 kmemleak_not_leak(ptr);
3414 if (!ptr)
3415 return -ENOMEM;
3416
3417 memset(ptr, 0, mod->core_layout.size);
3418 mod->core_layout.base = ptr;
3419
3420 if (mod->init_layout.size) {
3421 ptr = module_alloc(mod->init_layout.size);
3422 /*
3423 * The pointer to this block is stored in the module structure
3424 * which is inside the block. This block doesn't need to be
3425 * scanned as it contains data and code that will be freed
3426 * after the module is initialized.
3427 */
3428 kmemleak_ignore(ptr);
3429 if (!ptr) {
3430 module_memfree(mod->core_layout.base);
3431 return -ENOMEM;
3432 }
3433 memset(ptr, 0, mod->init_layout.size);
3434 mod->init_layout.base = ptr;
3435 } else
3436 mod->init_layout.base = NULL;
3437
3438 /* Transfer each section which specifies SHF_ALLOC */
3439 pr_debug("final section addresses:\n");
3440 for (i = 0; i < info->hdr->e_shnum; i++) {
3441 void *dest;
3442 Elf_Shdr *shdr = &info->sechdrs[i];
3443
3444 if (!(shdr->sh_flags & SHF_ALLOC))
3445 continue;
3446
3447 if (shdr->sh_entsize & INIT_OFFSET_MASK)
3448 dest = mod->init_layout.base
3449 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3450 else
3451 dest = mod->core_layout.base + shdr->sh_entsize;
3452
3453 if (shdr->sh_type != SHT_NOBITS)
3454 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3455 /* Update sh_addr to point to copy in image. */
3456 shdr->sh_addr = (unsigned long)dest;
3457 pr_debug("\t0x%lx %s\n",
3458 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3459 }
3460
3461 return 0;
3462}
3463
3464static int check_module_license_and_versions(struct module *mod)
3465{
3466 int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3467
3468 /*
3469 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3470 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3471 * using GPL-only symbols it needs.
3472 */
3473 if (strcmp(mod->name, "ndiswrapper") == 0)
3474 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3475
3476 /* driverloader was caught wrongly pretending to be under GPL */
3477 if (strcmp(mod->name, "driverloader") == 0)
3478 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3479 LOCKDEP_NOW_UNRELIABLE);
3480
3481 /* lve claims to be GPL but upstream won't provide source */
3482 if (strcmp(mod->name, "lve") == 0)
3483 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3484 LOCKDEP_NOW_UNRELIABLE);
3485
3486 if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3487 pr_warn("%s: module license taints kernel.\n", mod->name);
3488
3489#ifdef CONFIG_MODVERSIONS
3490 if ((mod->num_syms && !mod->crcs)
3491 || (mod->num_gpl_syms && !mod->gpl_crcs)
3492 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3493#ifdef CONFIG_UNUSED_SYMBOLS
3494 || (mod->num_unused_syms && !mod->unused_crcs)
3495 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3496#endif
3497 ) {
3498 return try_to_force_load(mod,
3499 "no versions for exported symbols");
3500 }
3501#endif
3502 return 0;
3503}
3504
3505static void flush_module_icache(const struct module *mod)
3506{
3507 mm_segment_t old_fs;
3508
3509 /* flush the icache in correct context */
3510 old_fs = get_fs();
3511 set_fs(KERNEL_DS);
3512
3513 /*
3514 * Flush the instruction cache, since we've played with text.
3515 * Do it before processing of module parameters, so the module
3516 * can provide parameter accessor functions of its own.
3517 */
3518 if (mod->init_layout.base)
3519 flush_icache_range((unsigned long)mod->init_layout.base,
3520 (unsigned long)mod->init_layout.base
3521 + mod->init_layout.size);
3522 flush_icache_range((unsigned long)mod->core_layout.base,
3523 (unsigned long)mod->core_layout.base + mod->core_layout.size);
3524
3525 set_fs(old_fs);
3526}
3527
3528int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3529 Elf_Shdr *sechdrs,
3530 char *secstrings,
3531 struct module *mod)
3532{
3533 return 0;
3534}
3535
3536/* module_blacklist is a comma-separated list of module names */
3537static char *module_blacklist;
3538static bool blacklisted(const char *module_name)
3539{
3540 const char *p;
3541 size_t len;
3542
3543 if (!module_blacklist)
3544 return false;
3545
3546 for (p = module_blacklist; *p; p += len) {
3547 len = strcspn(p, ",");
3548 if (strlen(module_name) == len && !memcmp(module_name, p, len))
3549 return true;
3550 if (p[len] == ',')
3551 len++;
3552 }
3553 return false;
3554}
3555core_param(module_blacklist, module_blacklist, charp, 0400);
3556
3557static struct module *layout_and_allocate(struct load_info *info, int flags)
3558{
3559 struct module *mod;
3560 unsigned int ndx;
3561 int err;
3562
3563 err = check_modinfo(info->mod, info, flags);
3564 if (err)
3565 return ERR_PTR(err);
3566
3567 /* Allow arches to frob section contents and sizes. */
3568 err = module_frob_arch_sections(info->hdr, info->sechdrs,
3569 info->secstrings, info->mod);
3570 if (err < 0)
3571 return ERR_PTR(err);
3572
3573 /* We will do a special allocation for per-cpu sections later. */
3574 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3575
3576 /*
3577 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3578 * layout_sections() can put it in the right place.
3579 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3580 */
3581 ndx = find_sec(info, ".data..ro_after_init");
3582 if (ndx)
3583 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
David Brazdil0f672f62019-12-10 10:32:29 +00003584 /*
3585 * Mark the __jump_table section as ro_after_init as well: these data
3586 * structures are never modified, with the exception of entries that
3587 * refer to code in the __init section, which are annotated as such
3588 * at module load time.
3589 */
3590 ndx = find_sec(info, "__jump_table");
3591 if (ndx)
3592 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003593
3594 /* Determine total sizes, and put offsets in sh_entsize. For now
3595 this is done generically; there doesn't appear to be any
3596 special cases for the architectures. */
3597 layout_sections(info->mod, info);
3598 layout_symtab(info->mod, info);
3599
3600 /* Allocate and move to the final place */
3601 err = move_module(info->mod, info);
3602 if (err)
3603 return ERR_PTR(err);
3604
3605 /* Module has been copied to its final place now: return it. */
3606 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3607 kmemleak_load_module(mod, info);
3608 return mod;
3609}
3610
3611/* mod is no longer valid after this! */
3612static void module_deallocate(struct module *mod, struct load_info *info)
3613{
3614 percpu_modfree(mod);
3615 module_arch_freeing_init(mod);
3616 module_memfree(mod->init_layout.base);
3617 module_memfree(mod->core_layout.base);
3618}
3619
3620int __weak module_finalize(const Elf_Ehdr *hdr,
3621 const Elf_Shdr *sechdrs,
3622 struct module *me)
3623{
3624 return 0;
3625}
3626
3627static int post_relocation(struct module *mod, const struct load_info *info)
3628{
3629 /* Sort exception table now relocations are done. */
3630 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3631
3632 /* Copy relocated percpu area over. */
3633 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3634 info->sechdrs[info->index.pcpu].sh_size);
3635
3636 /* Setup kallsyms-specific fields. */
3637 add_kallsyms(mod, info);
3638
3639 /* Arch-specific module finalizing. */
3640 return module_finalize(info->hdr, info->sechdrs, mod);
3641}
3642
3643/* Is this module of this name done loading? No locks held. */
3644static bool finished_loading(const char *name)
3645{
3646 struct module *mod;
3647 bool ret;
3648
3649 /*
3650 * The module_mutex should not be a heavily contended lock;
3651 * if we get the occasional sleep here, we'll go an extra iteration
3652 * in the wait_event_interruptible(), which is harmless.
3653 */
3654 sched_annotate_sleep();
3655 mutex_lock(&module_mutex);
3656 mod = find_module_all(name, strlen(name), true);
David Brazdil0f672f62019-12-10 10:32:29 +00003657 ret = !mod || mod->state == MODULE_STATE_LIVE;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003658 mutex_unlock(&module_mutex);
3659
3660 return ret;
3661}
3662
3663/* Call module constructors. */
3664static void do_mod_ctors(struct module *mod)
3665{
3666#ifdef CONFIG_CONSTRUCTORS
3667 unsigned long i;
3668
3669 for (i = 0; i < mod->num_ctors; i++)
3670 mod->ctors[i]();
3671#endif
3672}
3673
3674/* For freeing module_init on success, in case kallsyms traversing */
3675struct mod_initfree {
David Brazdil0f672f62019-12-10 10:32:29 +00003676 struct llist_node node;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003677 void *module_init;
3678};
3679
David Brazdil0f672f62019-12-10 10:32:29 +00003680static void do_free_init(struct work_struct *w)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003681{
David Brazdil0f672f62019-12-10 10:32:29 +00003682 struct llist_node *pos, *n, *list;
3683 struct mod_initfree *initfree;
3684
3685 list = llist_del_all(&init_free_list);
3686
3687 synchronize_rcu();
3688
3689 llist_for_each_safe(pos, n, list) {
3690 initfree = container_of(pos, struct mod_initfree, node);
3691 module_memfree(initfree->module_init);
3692 kfree(initfree);
3693 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003694}
3695
3696/*
3697 * This is where the real work happens.
3698 *
3699 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3700 * helper command 'lx-symbols'.
3701 */
3702static noinline int do_init_module(struct module *mod)
3703{
3704 int ret = 0;
3705 struct mod_initfree *freeinit;
3706
3707 freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3708 if (!freeinit) {
3709 ret = -ENOMEM;
3710 goto fail;
3711 }
3712 freeinit->module_init = mod->init_layout.base;
3713
3714 /*
3715 * We want to find out whether @mod uses async during init. Clear
3716 * PF_USED_ASYNC. async_schedule*() will set it.
3717 */
3718 current->flags &= ~PF_USED_ASYNC;
3719
3720 do_mod_ctors(mod);
3721 /* Start the module */
3722 if (mod->init != NULL)
3723 ret = do_one_initcall(mod->init);
3724 if (ret < 0) {
3725 goto fail_free_freeinit;
3726 }
3727 if (ret > 0) {
3728 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3729 "follow 0/-E convention\n"
3730 "%s: loading module anyway...\n",
3731 __func__, mod->name, ret, __func__);
3732 dump_stack();
3733 }
3734
3735 /* Now it's a first class citizen! */
3736 mod->state = MODULE_STATE_LIVE;
3737 blocking_notifier_call_chain(&module_notify_list,
3738 MODULE_STATE_LIVE, mod);
3739
Olivier Deprez0e641232021-09-23 10:07:05 +02003740 /* Delay uevent until module has finished its init routine */
3741 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
3742
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003743 /*
3744 * We need to finish all async code before the module init sequence
3745 * is done. This has potential to deadlock. For example, a newly
3746 * detected block device can trigger request_module() of the
3747 * default iosched from async probing task. Once userland helper
3748 * reaches here, async_synchronize_full() will wait on the async
3749 * task waiting on request_module() and deadlock.
3750 *
3751 * This deadlock is avoided by perfomring async_synchronize_full()
3752 * iff module init queued any async jobs. This isn't a full
3753 * solution as it will deadlock the same if module loading from
3754 * async jobs nests more than once; however, due to the various
3755 * constraints, this hack seems to be the best option for now.
3756 * Please refer to the following thread for details.
3757 *
3758 * http://thread.gmane.org/gmane.linux.kernel/1420814
3759 */
3760 if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3761 async_synchronize_full();
3762
3763 ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3764 mod->init_layout.size);
3765 mutex_lock(&module_mutex);
3766 /* Drop initial reference. */
3767 module_put(mod);
3768 trim_init_extable(mod);
3769#ifdef CONFIG_KALLSYMS
3770 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3771 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3772#endif
3773 module_enable_ro(mod, true);
3774 mod_tree_remove_init(mod);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003775 module_arch_freeing_init(mod);
3776 mod->init_layout.base = NULL;
3777 mod->init_layout.size = 0;
3778 mod->init_layout.ro_size = 0;
3779 mod->init_layout.ro_after_init_size = 0;
3780 mod->init_layout.text_size = 0;
3781 /*
3782 * We want to free module_init, but be aware that kallsyms may be
3783 * walking this with preempt disabled. In all the failure paths, we
David Brazdil0f672f62019-12-10 10:32:29 +00003784 * call synchronize_rcu(), but we don't want to slow down the success
3785 * path. module_memfree() cannot be called in an interrupt, so do the
3786 * work and call synchronize_rcu() in a work queue.
3787 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003788 * Note that module_alloc() on most architectures creates W+X page
3789 * mappings which won't be cleaned up until do_free_init() runs. Any
3790 * code such as mark_rodata_ro() which depends on those mappings to
3791 * be cleaned up needs to sync with the queued work - ie
David Brazdil0f672f62019-12-10 10:32:29 +00003792 * rcu_barrier()
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003793 */
David Brazdil0f672f62019-12-10 10:32:29 +00003794 if (llist_add(&freeinit->node, &init_free_list))
3795 schedule_work(&init_free_wq);
3796
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003797 mutex_unlock(&module_mutex);
3798 wake_up_all(&module_wq);
3799
3800 return 0;
3801
3802fail_free_freeinit:
3803 kfree(freeinit);
3804fail:
3805 /* Try to protect us from buggy refcounters. */
3806 mod->state = MODULE_STATE_GOING;
David Brazdil0f672f62019-12-10 10:32:29 +00003807 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003808 module_put(mod);
3809 blocking_notifier_call_chain(&module_notify_list,
3810 MODULE_STATE_GOING, mod);
3811 klp_module_going(mod);
3812 ftrace_release_mod(mod);
3813 free_module(mod);
3814 wake_up_all(&module_wq);
3815 return ret;
3816}
3817
3818static int may_init_module(void)
3819{
3820 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3821 return -EPERM;
3822
3823 return 0;
3824}
3825
3826/*
3827 * We try to place it in the list now to make sure it's unique before
3828 * we dedicate too many resources. In particular, temporary percpu
3829 * memory exhaustion.
3830 */
3831static int add_unformed_module(struct module *mod)
3832{
3833 int err;
3834 struct module *old;
3835
3836 mod->state = MODULE_STATE_UNFORMED;
3837
3838again:
3839 mutex_lock(&module_mutex);
3840 old = find_module_all(mod->name, strlen(mod->name), true);
3841 if (old != NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +00003842 if (old->state != MODULE_STATE_LIVE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003843 /* Wait in case it fails to load. */
3844 mutex_unlock(&module_mutex);
3845 err = wait_event_interruptible(module_wq,
3846 finished_loading(mod->name));
3847 if (err)
3848 goto out_unlocked;
3849 goto again;
3850 }
3851 err = -EEXIST;
3852 goto out;
3853 }
3854 mod_update_bounds(mod);
3855 list_add_rcu(&mod->list, &modules);
3856 mod_tree_insert(mod);
3857 err = 0;
3858
3859out:
3860 mutex_unlock(&module_mutex);
3861out_unlocked:
3862 return err;
3863}
3864
3865static int complete_formation(struct module *mod, struct load_info *info)
3866{
3867 int err;
3868
3869 mutex_lock(&module_mutex);
3870
3871 /* Find duplicate symbols (must be called under lock). */
David Brazdil0f672f62019-12-10 10:32:29 +00003872 err = verify_exported_symbols(mod);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003873 if (err < 0)
3874 goto out;
3875
3876 /* This relies on module_mutex for list integrity. */
3877 module_bug_finalize(info->hdr, info->sechdrs, mod);
3878
3879 module_enable_ro(mod, false);
3880 module_enable_nx(mod);
David Brazdil0f672f62019-12-10 10:32:29 +00003881 module_enable_x(mod);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003882
3883 /* Mark state as coming so strong_try_module_get() ignores us,
3884 * but kallsyms etc. can see us. */
3885 mod->state = MODULE_STATE_COMING;
3886 mutex_unlock(&module_mutex);
3887
3888 return 0;
3889
3890out:
3891 mutex_unlock(&module_mutex);
3892 return err;
3893}
3894
3895static int prepare_coming_module(struct module *mod)
3896{
3897 int err;
3898
3899 ftrace_module_enable(mod);
3900 err = klp_module_coming(mod);
3901 if (err)
3902 return err;
3903
3904 blocking_notifier_call_chain(&module_notify_list,
3905 MODULE_STATE_COMING, mod);
3906 return 0;
3907}
3908
3909static int unknown_module_param_cb(char *param, char *val, const char *modname,
3910 void *arg)
3911{
3912 struct module *mod = arg;
3913 int ret;
3914
3915 if (strcmp(param, "async_probe") == 0) {
3916 mod->async_probe_requested = true;
3917 return 0;
3918 }
3919
3920 /* Check for magic 'dyndbg' arg */
3921 ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3922 if (ret != 0)
3923 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3924 return 0;
3925}
3926
3927/* Allocate and load the module: note that size of section 0 is always
3928 zero, and we rely on this for optional sections. */
3929static int load_module(struct load_info *info, const char __user *uargs,
3930 int flags)
3931{
3932 struct module *mod;
3933 long err = 0;
3934 char *after_dashes;
3935
Olivier Deprez0e641232021-09-23 10:07:05 +02003936 /*
3937 * Do the signature check (if any) first. All that
3938 * the signature check needs is info->len, it does
3939 * not need any of the section info. That can be
3940 * set up later. This will minimize the chances
3941 * of a corrupt module causing problems before
3942 * we even get to the signature check.
3943 *
3944 * The check will also adjust info->len by stripping
3945 * off the sig length at the end of the module, making
3946 * checks against info->len more correct.
3947 */
3948 err = module_sig_check(info, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003949 if (err)
3950 goto free_copy;
3951
Olivier Deprez0e641232021-09-23 10:07:05 +02003952 /*
3953 * Do basic sanity checks against the ELF header and
3954 * sections.
3955 */
3956 err = elf_validity_check(info);
3957 if (err) {
3958 pr_err("Module has invalid ELF structures\n");
3959 goto free_copy;
3960 }
3961
3962 /*
3963 * Everything checks out, so set up the section info
3964 * in the info structure.
3965 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003966 err = setup_load_info(info, flags);
3967 if (err)
3968 goto free_copy;
3969
Olivier Deprez0e641232021-09-23 10:07:05 +02003970 /*
3971 * Now that we know we have the correct module name, check
3972 * if it's blacklisted.
3973 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003974 if (blacklisted(info->name)) {
3975 err = -EPERM;
3976 goto free_copy;
3977 }
3978
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003979 err = rewrite_section_headers(info, flags);
3980 if (err)
3981 goto free_copy;
3982
3983 /* Check module struct version now, before we try to use module. */
3984 if (!check_modstruct_version(info, info->mod)) {
3985 err = -ENOEXEC;
3986 goto free_copy;
3987 }
3988
3989 /* Figure out module layout, and allocate all the memory. */
3990 mod = layout_and_allocate(info, flags);
3991 if (IS_ERR(mod)) {
3992 err = PTR_ERR(mod);
3993 goto free_copy;
3994 }
3995
3996 audit_log_kern_module(mod->name);
3997
3998 /* Reserve our place in the list. */
3999 err = add_unformed_module(mod);
4000 if (err)
4001 goto free_module;
4002
4003#ifdef CONFIG_MODULE_SIG
4004 mod->sig_ok = info->sig_ok;
4005 if (!mod->sig_ok) {
4006 pr_notice_once("%s: module verification failed: signature "
4007 "and/or required key missing - tainting "
4008 "kernel\n", mod->name);
4009 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
4010 }
4011#endif
4012
4013 /* To avoid stressing percpu allocator, do this once we're unique. */
4014 err = percpu_modalloc(mod, info);
4015 if (err)
4016 goto unlink_mod;
4017
4018 /* Now module is in final location, initialize linked lists, etc. */
4019 err = module_unload_init(mod);
4020 if (err)
4021 goto unlink_mod;
4022
4023 init_param_lock(mod);
4024
4025 /* Now we've got everything in the final locations, we can
4026 * find optional sections. */
4027 err = find_module_sections(mod, info);
4028 if (err)
4029 goto free_unload;
4030
4031 err = check_module_license_and_versions(mod);
4032 if (err)
4033 goto free_unload;
4034
4035 /* Set up MODINFO_ATTR fields */
4036 setup_modinfo(mod, info);
4037
4038 /* Fix up syms, so that st_value is a pointer to location. */
4039 err = simplify_symbols(mod, info);
4040 if (err < 0)
4041 goto free_modinfo;
4042
4043 err = apply_relocations(mod, info);
4044 if (err < 0)
4045 goto free_modinfo;
4046
4047 err = post_relocation(mod, info);
4048 if (err < 0)
4049 goto free_modinfo;
4050
4051 flush_module_icache(mod);
4052
4053 /* Now copy in args */
4054 mod->args = strndup_user(uargs, ~0UL >> 1);
4055 if (IS_ERR(mod->args)) {
4056 err = PTR_ERR(mod->args);
4057 goto free_arch_cleanup;
4058 }
4059
4060 dynamic_debug_setup(mod, info->debug, info->num_debug);
4061
4062 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
4063 ftrace_module_init(mod);
4064
4065 /* Finally it's fully formed, ready to start executing. */
4066 err = complete_formation(mod, info);
4067 if (err)
4068 goto ddebug_cleanup;
4069
4070 err = prepare_coming_module(mod);
4071 if (err)
4072 goto bug_cleanup;
4073
4074 /* Module is ready to execute: parsing args may do that. */
4075 after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
4076 -32768, 32767, mod,
4077 unknown_module_param_cb);
4078 if (IS_ERR(after_dashes)) {
4079 err = PTR_ERR(after_dashes);
4080 goto coming_cleanup;
4081 } else if (after_dashes) {
4082 pr_warn("%s: parameters '%s' after `--' ignored\n",
4083 mod->name, after_dashes);
4084 }
4085
4086 /* Link in to sysfs. */
4087 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
4088 if (err < 0)
4089 goto coming_cleanup;
4090
4091 if (is_livepatch_module(mod)) {
4092 err = copy_module_elf(mod, info);
4093 if (err < 0)
4094 goto sysfs_cleanup;
4095 }
4096
4097 /* Get rid of temporary copy. */
4098 free_copy(info);
4099
4100 /* Done! */
4101 trace_module_load(mod);
4102
4103 return do_init_module(mod);
4104
4105 sysfs_cleanup:
4106 mod_sysfs_teardown(mod);
4107 coming_cleanup:
4108 mod->state = MODULE_STATE_GOING;
4109 destroy_params(mod->kp, mod->num_kp);
4110 blocking_notifier_call_chain(&module_notify_list,
4111 MODULE_STATE_GOING, mod);
4112 klp_module_going(mod);
4113 bug_cleanup:
Olivier Deprez0e641232021-09-23 10:07:05 +02004114 mod->state = MODULE_STATE_GOING;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004115 /* module_bug_cleanup needs module_mutex protection */
4116 mutex_lock(&module_mutex);
4117 module_bug_cleanup(mod);
4118 mutex_unlock(&module_mutex);
4119
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004120 ddebug_cleanup:
4121 ftrace_release_mod(mod);
4122 dynamic_debug_remove(mod, info->debug);
David Brazdil0f672f62019-12-10 10:32:29 +00004123 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004124 kfree(mod->args);
4125 free_arch_cleanup:
4126 module_arch_cleanup(mod);
4127 free_modinfo:
4128 free_modinfo(mod);
4129 free_unload:
4130 module_unload_free(mod);
4131 unlink_mod:
4132 mutex_lock(&module_mutex);
4133 /* Unlink carefully: kallsyms could be walking list. */
4134 list_del_rcu(&mod->list);
4135 mod_tree_remove(mod);
4136 wake_up_all(&module_wq);
4137 /* Wait for RCU-sched synchronizing before releasing mod->list. */
David Brazdil0f672f62019-12-10 10:32:29 +00004138 synchronize_rcu();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004139 mutex_unlock(&module_mutex);
4140 free_module:
4141 /* Free lock-classes; relies on the preceding sync_rcu() */
4142 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
4143
4144 module_deallocate(mod, info);
4145 free_copy:
4146 free_copy(info);
4147 return err;
4148}
4149
4150SYSCALL_DEFINE3(init_module, void __user *, umod,
4151 unsigned long, len, const char __user *, uargs)
4152{
4153 int err;
4154 struct load_info info = { };
4155
4156 err = may_init_module();
4157 if (err)
4158 return err;
4159
4160 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
4161 umod, len, uargs);
4162
4163 err = copy_module_from_user(umod, len, &info);
4164 if (err)
4165 return err;
4166
4167 return load_module(&info, uargs, 0);
4168}
4169
4170SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
4171{
4172 struct load_info info = { };
4173 loff_t size;
4174 void *hdr;
4175 int err;
4176
4177 err = may_init_module();
4178 if (err)
4179 return err;
4180
4181 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
4182
4183 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
4184 |MODULE_INIT_IGNORE_VERMAGIC))
4185 return -EINVAL;
4186
4187 err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4188 READING_MODULE);
4189 if (err)
4190 return err;
4191 info.hdr = hdr;
4192 info.len = size;
4193
4194 return load_module(&info, uargs, flags);
4195}
4196
4197static inline int within(unsigned long addr, void *start, unsigned long size)
4198{
4199 return ((void *)addr >= start && (void *)addr < start + size);
4200}
4201
4202#ifdef CONFIG_KALLSYMS
4203/*
4204 * This ignores the intensely annoying "mapping symbols" found
4205 * in ARM ELF files: $a, $t and $d.
4206 */
4207static inline int is_arm_mapping_symbol(const char *str)
4208{
4209 if (str[0] == '.' && str[1] == 'L')
4210 return true;
4211 return str[0] == '$' && strchr("axtd", str[1])
4212 && (str[2] == '\0' || str[2] == '.');
4213}
4214
David Brazdil0f672f62019-12-10 10:32:29 +00004215static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004216{
4217 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
4218}
4219
David Brazdil0f672f62019-12-10 10:32:29 +00004220/*
4221 * Given a module and address, find the corresponding symbol and return its name
4222 * while providing its size and offset if needed.
4223 */
4224static const char *find_kallsyms_symbol(struct module *mod,
4225 unsigned long addr,
4226 unsigned long *size,
4227 unsigned long *offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004228{
4229 unsigned int i, best = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00004230 unsigned long nextval, bestval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004231 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4232
4233 /* At worse, next value is at end of module */
4234 if (within_module_init(addr, mod))
4235 nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
4236 else
4237 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
4238
David Brazdil0f672f62019-12-10 10:32:29 +00004239 bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4240
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004241 /* Scan for closest preceding symbol, and next symbol. (ELF
4242 starts real symbols at 1). */
4243 for (i = 1; i < kallsyms->num_symtab; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00004244 const Elf_Sym *sym = &kallsyms->symtab[i];
4245 unsigned long thisval = kallsyms_symbol_value(sym);
4246
4247 if (sym->st_shndx == SHN_UNDEF)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004248 continue;
4249
4250 /* We ignore unnamed symbols: they're uninformative
4251 * and inserted at a whim. */
David Brazdil0f672f62019-12-10 10:32:29 +00004252 if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4253 || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004254 continue;
4255
David Brazdil0f672f62019-12-10 10:32:29 +00004256 if (thisval <= addr && thisval > bestval) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004257 best = i;
David Brazdil0f672f62019-12-10 10:32:29 +00004258 bestval = thisval;
4259 }
4260 if (thisval > addr && thisval < nextval)
4261 nextval = thisval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004262 }
4263
4264 if (!best)
4265 return NULL;
4266
4267 if (size)
David Brazdil0f672f62019-12-10 10:32:29 +00004268 *size = nextval - bestval;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004269 if (offset)
David Brazdil0f672f62019-12-10 10:32:29 +00004270 *offset = addr - bestval;
4271
4272 return kallsyms_symbol_name(kallsyms, best);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004273}
4274
4275void * __weak dereference_module_function_descriptor(struct module *mod,
4276 void *ptr)
4277{
4278 return ptr;
4279}
4280
4281/* For kallsyms to ask for address resolution. NULL means not found. Careful
4282 * not to lock to avoid deadlock on oopses, simply disable preemption. */
4283const char *module_address_lookup(unsigned long addr,
4284 unsigned long *size,
4285 unsigned long *offset,
4286 char **modname,
4287 char *namebuf)
4288{
4289 const char *ret = NULL;
4290 struct module *mod;
4291
4292 preempt_disable();
4293 mod = __module_address(addr);
4294 if (mod) {
4295 if (modname)
4296 *modname = mod->name;
David Brazdil0f672f62019-12-10 10:32:29 +00004297
4298 ret = find_kallsyms_symbol(mod, addr, size, offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004299 }
4300 /* Make a copy in here where it's safe */
4301 if (ret) {
4302 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
4303 ret = namebuf;
4304 }
4305 preempt_enable();
4306
4307 return ret;
4308}
4309
4310int lookup_module_symbol_name(unsigned long addr, char *symname)
4311{
4312 struct module *mod;
4313
4314 preempt_disable();
4315 list_for_each_entry_rcu(mod, &modules, list) {
4316 if (mod->state == MODULE_STATE_UNFORMED)
4317 continue;
4318 if (within_module(addr, mod)) {
4319 const char *sym;
4320
David Brazdil0f672f62019-12-10 10:32:29 +00004321 sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004322 if (!sym)
4323 goto out;
David Brazdil0f672f62019-12-10 10:32:29 +00004324
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004325 strlcpy(symname, sym, KSYM_NAME_LEN);
4326 preempt_enable();
4327 return 0;
4328 }
4329 }
4330out:
4331 preempt_enable();
4332 return -ERANGE;
4333}
4334
4335int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
4336 unsigned long *offset, char *modname, char *name)
4337{
4338 struct module *mod;
4339
4340 preempt_disable();
4341 list_for_each_entry_rcu(mod, &modules, list) {
4342 if (mod->state == MODULE_STATE_UNFORMED)
4343 continue;
4344 if (within_module(addr, mod)) {
4345 const char *sym;
4346
David Brazdil0f672f62019-12-10 10:32:29 +00004347 sym = find_kallsyms_symbol(mod, addr, size, offset);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004348 if (!sym)
4349 goto out;
4350 if (modname)
4351 strlcpy(modname, mod->name, MODULE_NAME_LEN);
4352 if (name)
4353 strlcpy(name, sym, KSYM_NAME_LEN);
4354 preempt_enable();
4355 return 0;
4356 }
4357 }
4358out:
4359 preempt_enable();
4360 return -ERANGE;
4361}
4362
4363int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
4364 char *name, char *module_name, int *exported)
4365{
4366 struct module *mod;
4367
4368 preempt_disable();
4369 list_for_each_entry_rcu(mod, &modules, list) {
4370 struct mod_kallsyms *kallsyms;
4371
4372 if (mod->state == MODULE_STATE_UNFORMED)
4373 continue;
4374 kallsyms = rcu_dereference_sched(mod->kallsyms);
4375 if (symnum < kallsyms->num_symtab) {
David Brazdil0f672f62019-12-10 10:32:29 +00004376 const Elf_Sym *sym = &kallsyms->symtab[symnum];
4377
4378 *value = kallsyms_symbol_value(sym);
4379 *type = kallsyms->typetab[symnum];
4380 strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004381 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
4382 *exported = is_exported(name, *value, mod);
4383 preempt_enable();
4384 return 0;
4385 }
4386 symnum -= kallsyms->num_symtab;
4387 }
4388 preempt_enable();
4389 return -ERANGE;
4390}
4391
David Brazdil0f672f62019-12-10 10:32:29 +00004392/* Given a module and name of symbol, find and return the symbol's value */
4393static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004394{
4395 unsigned int i;
4396 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4397
David Brazdil0f672f62019-12-10 10:32:29 +00004398 for (i = 0; i < kallsyms->num_symtab; i++) {
4399 const Elf_Sym *sym = &kallsyms->symtab[i];
4400
4401 if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4402 sym->st_shndx != SHN_UNDEF)
4403 return kallsyms_symbol_value(sym);
4404 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004405 return 0;
4406}
4407
4408/* Look for this name: can be of form module:name. */
4409unsigned long module_kallsyms_lookup_name(const char *name)
4410{
4411 struct module *mod;
4412 char *colon;
4413 unsigned long ret = 0;
4414
4415 /* Don't lock: we're in enough trouble already. */
4416 preempt_disable();
4417 if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
4418 if ((mod = find_module_all(name, colon - name, false)) != NULL)
David Brazdil0f672f62019-12-10 10:32:29 +00004419 ret = find_kallsyms_symbol_value(mod, colon+1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004420 } else {
4421 list_for_each_entry_rcu(mod, &modules, list) {
4422 if (mod->state == MODULE_STATE_UNFORMED)
4423 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00004424 if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004425 break;
4426 }
4427 }
4428 preempt_enable();
4429 return ret;
4430}
4431
4432int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4433 struct module *, unsigned long),
4434 void *data)
4435{
4436 struct module *mod;
4437 unsigned int i;
4438 int ret;
4439
4440 module_assert_mutex();
4441
4442 list_for_each_entry(mod, &modules, list) {
4443 /* We hold module_mutex: no need for rcu_dereference_sched */
4444 struct mod_kallsyms *kallsyms = mod->kallsyms;
4445
4446 if (mod->state == MODULE_STATE_UNFORMED)
4447 continue;
4448 for (i = 0; i < kallsyms->num_symtab; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +00004449 const Elf_Sym *sym = &kallsyms->symtab[i];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004450
David Brazdil0f672f62019-12-10 10:32:29 +00004451 if (sym->st_shndx == SHN_UNDEF)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004452 continue;
4453
David Brazdil0f672f62019-12-10 10:32:29 +00004454 ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4455 mod, kallsyms_symbol_value(sym));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004456 if (ret != 0)
4457 return ret;
4458 }
4459 }
4460 return 0;
4461}
4462#endif /* CONFIG_KALLSYMS */
4463
4464/* Maximum number of characters written by module_flags() */
4465#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4466
4467/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4468static char *module_flags(struct module *mod, char *buf)
4469{
4470 int bx = 0;
4471
4472 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4473 if (mod->taints ||
4474 mod->state == MODULE_STATE_GOING ||
4475 mod->state == MODULE_STATE_COMING) {
4476 buf[bx++] = '(';
4477 bx += module_flags_taint(mod, buf + bx);
4478 /* Show a - for module-is-being-unloaded */
4479 if (mod->state == MODULE_STATE_GOING)
4480 buf[bx++] = '-';
4481 /* Show a + for module-is-being-loaded */
4482 if (mod->state == MODULE_STATE_COMING)
4483 buf[bx++] = '+';
4484 buf[bx++] = ')';
4485 }
4486 buf[bx] = '\0';
4487
4488 return buf;
4489}
4490
4491#ifdef CONFIG_PROC_FS
4492/* Called by the /proc file system to return a list of modules. */
4493static void *m_start(struct seq_file *m, loff_t *pos)
4494{
4495 mutex_lock(&module_mutex);
4496 return seq_list_start(&modules, *pos);
4497}
4498
4499static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4500{
4501 return seq_list_next(p, &modules, pos);
4502}
4503
4504static void m_stop(struct seq_file *m, void *p)
4505{
4506 mutex_unlock(&module_mutex);
4507}
4508
4509static int m_show(struct seq_file *m, void *p)
4510{
4511 struct module *mod = list_entry(p, struct module, list);
4512 char buf[MODULE_FLAGS_BUF_SIZE];
4513 void *value;
4514
4515 /* We always ignore unformed modules. */
4516 if (mod->state == MODULE_STATE_UNFORMED)
4517 return 0;
4518
4519 seq_printf(m, "%s %u",
4520 mod->name, mod->init_layout.size + mod->core_layout.size);
4521 print_unload_info(m, mod);
4522
4523 /* Informative for users. */
4524 seq_printf(m, " %s",
4525 mod->state == MODULE_STATE_GOING ? "Unloading" :
4526 mod->state == MODULE_STATE_COMING ? "Loading" :
4527 "Live");
4528 /* Used by oprofile and other similar tools. */
4529 value = m->private ? NULL : mod->core_layout.base;
4530 seq_printf(m, " 0x%px", value);
4531
4532 /* Taints info */
4533 if (mod->taints)
4534 seq_printf(m, " %s", module_flags(mod, buf));
4535
4536 seq_puts(m, "\n");
4537 return 0;
4538}
4539
4540/* Format: modulename size refcount deps address
4541
4542 Where refcount is a number or -, and deps is a comma-separated list
4543 of depends or -.
4544*/
4545static const struct seq_operations modules_op = {
4546 .start = m_start,
4547 .next = m_next,
4548 .stop = m_stop,
4549 .show = m_show
4550};
4551
4552/*
4553 * This also sets the "private" pointer to non-NULL if the
4554 * kernel pointers should be hidden (so you can just test
4555 * "m->private" to see if you should keep the values private).
4556 *
4557 * We use the same logic as for /proc/kallsyms.
4558 */
4559static int modules_open(struct inode *inode, struct file *file)
4560{
4561 int err = seq_open(file, &modules_op);
4562
4563 if (!err) {
4564 struct seq_file *m = file->private_data;
Olivier Deprez0e641232021-09-23 10:07:05 +02004565 m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004566 }
4567
4568 return err;
4569}
4570
4571static const struct file_operations proc_modules_operations = {
4572 .open = modules_open,
4573 .read = seq_read,
4574 .llseek = seq_lseek,
4575 .release = seq_release,
4576};
4577
4578static int __init proc_modules_init(void)
4579{
4580 proc_create("modules", 0, NULL, &proc_modules_operations);
4581 return 0;
4582}
4583module_init(proc_modules_init);
4584#endif
4585
4586/* Given an address, look for it in the module exception tables. */
4587const struct exception_table_entry *search_module_extables(unsigned long addr)
4588{
4589 const struct exception_table_entry *e = NULL;
4590 struct module *mod;
4591
4592 preempt_disable();
4593 mod = __module_address(addr);
4594 if (!mod)
4595 goto out;
4596
4597 if (!mod->num_exentries)
4598 goto out;
4599
4600 e = search_extable(mod->extable,
4601 mod->num_exentries,
4602 addr);
4603out:
4604 preempt_enable();
4605
4606 /*
4607 * Now, if we found one, we are running inside it now, hence
4608 * we cannot unload the module, hence no refcnt needed.
4609 */
4610 return e;
4611}
4612
4613/*
4614 * is_module_address - is this address inside a module?
4615 * @addr: the address to check.
4616 *
4617 * See is_module_text_address() if you simply want to see if the address
4618 * is code (not data).
4619 */
4620bool is_module_address(unsigned long addr)
4621{
4622 bool ret;
4623
4624 preempt_disable();
4625 ret = __module_address(addr) != NULL;
4626 preempt_enable();
4627
4628 return ret;
4629}
4630
4631/*
4632 * __module_address - get the module which contains an address.
4633 * @addr: the address.
4634 *
4635 * Must be called with preempt disabled or module mutex held so that
4636 * module doesn't get freed during this.
4637 */
4638struct module *__module_address(unsigned long addr)
4639{
4640 struct module *mod;
4641
4642 if (addr < module_addr_min || addr > module_addr_max)
4643 return NULL;
4644
4645 module_assert_mutex_or_preempt();
4646
4647 mod = mod_find(addr);
4648 if (mod) {
4649 BUG_ON(!within_module(addr, mod));
4650 if (mod->state == MODULE_STATE_UNFORMED)
4651 mod = NULL;
4652 }
4653 return mod;
4654}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004655
4656/*
4657 * is_module_text_address - is this address inside module code?
4658 * @addr: the address to check.
4659 *
4660 * See is_module_address() if you simply want to see if the address is
4661 * anywhere in a module. See kernel_text_address() for testing if an
4662 * address corresponds to kernel or module code.
4663 */
4664bool is_module_text_address(unsigned long addr)
4665{
4666 bool ret;
4667
4668 preempt_disable();
4669 ret = __module_text_address(addr) != NULL;
4670 preempt_enable();
4671
4672 return ret;
4673}
4674
4675/*
4676 * __module_text_address - get the module whose code contains an address.
4677 * @addr: the address.
4678 *
4679 * Must be called with preempt disabled or module mutex held so that
4680 * module doesn't get freed during this.
4681 */
4682struct module *__module_text_address(unsigned long addr)
4683{
4684 struct module *mod = __module_address(addr);
4685 if (mod) {
4686 /* Make sure it's within the text section. */
4687 if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4688 && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4689 mod = NULL;
4690 }
4691 return mod;
4692}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00004693
4694/* Don't grab lock, we're oopsing. */
4695void print_modules(void)
4696{
4697 struct module *mod;
4698 char buf[MODULE_FLAGS_BUF_SIZE];
4699
4700 printk(KERN_DEFAULT "Modules linked in:");
4701 /* Most callers should already have preempt disabled, but make sure */
4702 preempt_disable();
4703 list_for_each_entry_rcu(mod, &modules, list) {
4704 if (mod->state == MODULE_STATE_UNFORMED)
4705 continue;
4706 pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4707 }
4708 preempt_enable();
4709 if (last_unloaded_module[0])
4710 pr_cont(" [last unloaded: %s]", last_unloaded_module);
4711 pr_cont("\n");
4712}
4713
4714#ifdef CONFIG_MODVERSIONS
4715/* Generate the signature for all relevant module structures here.
4716 * If these change, we don't want to try to parse the module. */
4717void module_layout(struct module *mod,
4718 struct modversion_info *ver,
4719 struct kernel_param *kp,
4720 struct kernel_symbol *ks,
4721 struct tracepoint * const *tp)
4722{
4723}
4724EXPORT_SYMBOL(module_layout);
4725#endif