Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index e23eb9f..f296d89 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -18,8 +18,8 @@
#include <linux/clocksource.h>
#include <linux/sched/task.h>
#include <linux/kallsyms.h>
+#include <linux/security.h>
#include <linux/seq_file.h>
-#include <linux/suspend.h>
#include <linux/tracefs.h>
#include <linux/hardirq.h>
#include <linux/kthread.h>
@@ -34,12 +34,14 @@
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
+#include <linux/kprobes.h>
#include <trace/events/sched.h>
#include <asm/sections.h>
#include <asm/setup.h>
+#include "ftrace_internal.h"
#include "trace_output.h"
#include "trace_stat.h"
@@ -69,15 +71,16 @@
#define INIT_OPS_HASH(opsname) \
.func_hash = &opsname.local_hash, \
.local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
-#define ASSIGN_OPS_HASH(opsname, val) \
- .func_hash = val, \
- .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
#else
#define INIT_OPS_HASH(opsname)
-#define ASSIGN_OPS_HASH(opsname, val)
#endif
-static struct ftrace_ops ftrace_list_end __read_mostly = {
+enum {
+ FTRACE_MODIFY_ENABLE_FL = (1 << 0),
+ FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1),
+};
+
+struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
INIT_OPS_HASH(ftrace_list_end)
@@ -112,11 +115,11 @@
*/
static int ftrace_disabled __read_mostly;
-static DEFINE_MUTEX(ftrace_lock);
+DEFINE_MUTEX(ftrace_lock);
-static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
+struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
-static struct ftrace_ops global_ops;
+struct ftrace_ops global_ops;
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
@@ -127,26 +130,6 @@
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
-/*
- * Traverse the ftrace_global_list, invoking all entries. The reason that we
- * can use rcu_dereference_raw_notrace() is that elements removed from this list
- * are simply leaked, so there is no need to interact with a grace-period
- * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
- * concurrent insertions into the ftrace_global_list.
- *
- * Silly Alpha and silly pointer-speculation compiler optimizations!
- */
-#define do_for_each_ftrace_op(op, list) \
- op = rcu_dereference_raw_notrace(list); \
- do
-
-/*
- * Optimized for just a single item in the list (as that is the normal case).
- */
-#define while_for_each_ftrace_op(op) \
- while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
- unlikely((op) != &ftrace_list_end))
-
static inline void ftrace_ops_init(struct ftrace_ops *ops)
{
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -173,7 +156,7 @@
{
/*
* This function is just a stub to implement a hard force
- * of synchronize_sched(). This requires synchronizing
+ * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*
* Yes, function tracing is rude.
@@ -186,18 +169,6 @@
smp_rmb();
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static void update_function_graph_func(void);
-
-/* Both enabled by default (can be cleared by function_graph tracer flags */
-static bool fgraph_sleep_time = true;
-static bool fgraph_graph_time = true;
-
-#else
-static inline void update_function_graph_func(void) { }
-#endif
-
-
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
/*
@@ -334,7 +305,7 @@
static void ftrace_update_trampoline(struct ftrace_ops *ops);
-static int __register_ftrace_function(struct ftrace_ops *ops)
+int __register_ftrace_function(struct ftrace_ops *ops)
{
if (ops->flags & FTRACE_OPS_FL_DELETED)
return -EINVAL;
@@ -375,7 +346,7 @@
return 0;
}
-static int __unregister_ftrace_function(struct ftrace_ops *ops)
+int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
@@ -815,9 +786,16 @@
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static bool fgraph_graph_time = true;
+
+void ftrace_graph_graph_time_control(bool enable)
+{
+ fgraph_graph_time = enable;
+}
+
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
- int index = current->curr_ret_stack;
+ struct ftrace_ret_stack *ret_stack;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -825,14 +803,16 @@
if (!current->ret_stack)
return 0;
- if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
- current->ret_stack[index].subtime = 0;
+ ret_stack = ftrace_graph_get_ret_stack(current, 0);
+ if (ret_stack)
+ ret_stack->subtime = 0;
return 1;
}
static void profile_graph_return(struct ftrace_graph_ret *trace)
{
+ struct ftrace_ret_stack *ret_stack;
struct ftrace_profile_stat *stat;
unsigned long long calltime;
struct ftrace_profile *rec;
@@ -850,16 +830,15 @@
calltime = trace->rettime - trace->calltime;
if (!fgraph_graph_time) {
- int index;
-
- index = current->curr_ret_stack;
/* Append this call time to the parent time to subtract */
- if (index)
- current->ret_stack[index - 1].subtime += calltime;
+ ret_stack = ftrace_graph_get_ret_stack(current, 1);
+ if (ret_stack)
+ ret_stack->subtime += calltime;
- if (current->ret_stack[index].subtime < calltime)
- calltime -= current->ret_stack[index].subtime;
+ ret_stack = ftrace_graph_get_ret_stack(current, 0);
+ if (ret_stack && ret_stack->subtime < calltime)
+ calltime -= ret_stack->subtime;
else
calltime = 0;
}
@@ -874,15 +853,19 @@
local_irq_restore(flags);
}
+static struct fgraph_ops fprofiler_ops = {
+ .entryfunc = &profile_graph_entry,
+ .retfunc = &profile_graph_return,
+};
+
static int register_ftrace_profiler(void)
{
- return register_ftrace_graph(&profile_graph_return,
- &profile_graph_entry);
+ return register_ftrace_graph(&fprofiler_ops);
}
static void unregister_ftrace_profiler(void)
{
- unregister_ftrace_graph();
+ unregister_ftrace_graph(&fprofiler_ops);
}
#else
static struct ftrace_ops ftrace_profile_ops __read_mostly = {
@@ -934,7 +917,7 @@
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
- * so this acts like an synchronize_sched.
+ * so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
@@ -1021,12 +1004,6 @@
}
#endif /* CONFIG_FUNCTION_PROFILER */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-static int ftrace_graph_active;
-#else
-# define ftrace_graph_active 0
-#endif
-
#ifdef CONFIG_DYNAMIC_FTRACE
static struct ftrace_ops *removed_ops;
@@ -1067,7 +1044,7 @@
};
#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
-static struct ftrace_ops global_ops = {
+struct ftrace_ops global_ops = {
.func = ftrace_stub,
.local_hash.notrace_hash = EMPTY_HASH,
.local_hash.filter_hash = EMPTY_HASH,
@@ -1086,7 +1063,7 @@
/*
* Some of the ops may be dynamically allocated,
- * they are freed after a synchronize_sched().
+ * they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
@@ -1286,7 +1263,7 @@
{
if (!hash || hash == EMPTY_HASH)
return;
- call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
+ call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,9 +1478,9 @@
* the ip is not in the ops->notrace_hash.
*
* This needs to be called with preemption disabled as
- * the hashes are freed with call_rcu_sched().
+ * the hashes are freed with call_rcu().
*/
-static int
+int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
struct ftrace_ops_hash hash;
@@ -1646,6 +1623,11 @@
return keep_regs;
}
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash,
bool inc)
@@ -1774,15 +1756,17 @@
}
/*
- * If the rec had TRAMP enabled, then it needs to
- * be cleared. As TRAMP can only be enabled iff
- * there is only a single ops attached to it.
- * In otherwords, always disable it on decrementing.
- * In the future, we may set it if rec count is
- * decremented to one, and the ops that is left
- * has a trampoline.
+ * The TRAMP needs to be set only if rec count
+ * is decremented to one, and the ops that is
+ * left has a trampoline. As TRAMP can only be
+ * enabled if there is only a single ops attached
+ * to it.
*/
- rec->flags &= ~FTRACE_FL_TRAMP;
+ if (ftrace_rec_count(rec) == 1 &&
+ ftrace_find_tramp_ops_any(rec))
+ rec->flags |= FTRACE_FL_TRAMP;
+ else
+ rec->flags &= ~FTRACE_FL_TRAMP;
/*
* flags will be cleared in ftrace_check_record()
@@ -1792,7 +1776,7 @@
count++;
/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
- update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
+ update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
/* Shortcut, if we handled all records, we are done. */
if (!all && count == hash->count)
@@ -1975,11 +1959,6 @@
printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
enum ftrace_bug_type ftrace_bug_type;
const void *ftrace_expected;
@@ -2013,7 +1992,7 @@
* modifying the code. @failed should be one of either:
* EFAULT - if the problem happens on reading the @ip address
* EINVAL - if what is read at @ip is not what was expected
- * EPERM - if the problem happens on writting to the @ip address
+ * EPERM - if the problem happens on writing to the @ip address
*/
void ftrace_bug(int failed, struct dyn_ftrace *rec)
{
@@ -2071,7 +2050,7 @@
}
}
-static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
+static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
{
unsigned long flag = 0UL;
@@ -2170,28 +2149,28 @@
/**
* ftrace_update_record, set a record that now is tracing or not
* @rec: the record to update
- * @enable: set to 1 if the record is tracing, zero to force disable
+ * @enable: set to true if the record is tracing, false to force disable
*
* The records that represent all functions that can be traced need
* to be updated when tracing has been enabled.
*/
-int ftrace_update_record(struct dyn_ftrace *rec, int enable)
+int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
{
- return ftrace_check_record(rec, enable, 1);
+ return ftrace_check_record(rec, enable, true);
}
/**
* ftrace_test_record, check if the record has been enabled or not
* @rec: the record to test
- * @enable: set to 1 to check if enabled, 0 if it is disabled
+ * @enable: set to true to check if enabled, false if it is disabled
*
* The arch code may need to test if a record is already set to
* tracing to determine how to modify the function code that it
* represents.
*/
-int ftrace_test_record(struct dyn_ftrace *rec, int enable)
+int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
{
- return ftrace_check_record(rec, enable, 0);
+ return ftrace_check_record(rec, enable, false);
}
static struct ftrace_ops *
@@ -2380,7 +2359,7 @@
}
static int
-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+__ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
{
unsigned long ftrace_old_addr;
unsigned long ftrace_addr;
@@ -2412,13 +2391,15 @@
return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
}
- return -1; /* unknow ftrace bug */
+ return -1; /* unknown ftrace bug */
}
-void __weak ftrace_replace_code(int enable)
+void __weak ftrace_replace_code(int mod_flags)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
+ bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
+ int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
int failed;
if (unlikely(ftrace_disabled))
@@ -2435,6 +2416,8 @@
/* Stop processing */
return;
}
+ if (schedulable)
+ cond_resched();
} while_for_each_ftrace_rec();
}
@@ -2548,8 +2531,12 @@
void ftrace_modify_all_code(int command)
{
int update = command & FTRACE_UPDATE_TRACE_FUNC;
+ int mod_flags = 0;
int err = 0;
+ if (command & FTRACE_MAY_SLEEP)
+ mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
+
/*
* If the ftrace_caller calls a ftrace_ops func directly,
* we need to make sure that it only traces functions it
@@ -2567,9 +2554,9 @@
}
if (command & FTRACE_UPDATE_CALLS)
- ftrace_replace_code(1);
+ ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
else if (command & FTRACE_DISABLE_CALLS)
- ftrace_replace_code(0);
+ ftrace_replace_code(mod_flags);
if (update && ftrace_trace_function != ftrace_ops_list_func) {
function_trace_op = set_function_trace_op;
@@ -2682,7 +2669,7 @@
update_all_ops = false;
}
-static int ftrace_startup(struct ftrace_ops *ops, int command)
+int ftrace_startup(struct ftrace_ops *ops, int command)
{
int ret;
@@ -2724,7 +2711,7 @@
return 0;
}
-static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+int ftrace_shutdown(struct ftrace_ops *ops, int command)
{
int ret;
@@ -2828,7 +2815,7 @@
* synchornize_rcu_tasks() will wait for those tasks to
* execute and either schedule voluntarily or enter user space.
*/
- if (IS_ENABLED(CONFIG_PREEMPT))
+ if (IS_ENABLED(CONFIG_PREEMPTION))
synchronize_rcu_tasks();
free_ops:
@@ -2951,14 +2938,13 @@
p = &pg->records[i];
p->flags = rec_flags;
-#ifndef CC_USING_NOP_MCOUNT
/*
* Do the initial record conversion from mcount jump
* to the NOP instructions.
*/
- if (!ftrace_code_disable(mod, p))
+ if (!__is_defined(CC_USING_NOP_MCOUNT) &&
+ !ftrace_code_disable(mod, p))
break;
-#endif
update_cnt++;
}
@@ -3017,7 +3003,7 @@
int cnt;
if (!num_to_init)
- return 0;
+ return NULL;
start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
if (!pg)
@@ -3110,6 +3096,14 @@
hnd = &iter->probe_entry->hlist;
hash = iter->probe->ops.func_hash->filter_hash;
+
+ /*
+ * A probe being registered may temporarily have an empty hash
+ * and it's at the end of the func_probes list.
+ */
+ if (!hash || hash == EMPTY_HASH)
+ return NULL;
+
size = 1 << hash->size_bits;
retry:
@@ -3493,6 +3487,11 @@
ftrace_avail_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
+ int ret;
+
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
@@ -3512,6 +3511,15 @@
{
struct ftrace_iterator *iter;
+ /*
+ * This shows us what functions are currently being
+ * traced and by what. Not sure if we want lockdown
+ * to hide such critical information for an admin.
+ * Although, perhaps it can show information we don't
+ * want people to see, but if something is tracing
+ * something, we probably want to know about it.
+ */
+
iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
if (!iter)
return -ENOMEM;
@@ -3547,21 +3555,22 @@
struct ftrace_hash *hash;
struct list_head *mod_head;
struct trace_array *tr = ops->private;
- int ret = 0;
+ int ret = -ENOMEM;
ftrace_ops_init(ops);
if (unlikely(ftrace_disabled))
return -ENODEV;
+ if (tracing_check_open_get_tr(tr))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
- return -ENOMEM;
+ goto out;
- if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
- kfree(iter);
- return -ENOMEM;
- }
+ if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+ goto out;
iter->ops = ops;
iter->flags = flag;
@@ -3591,13 +3600,13 @@
if (!iter->hash) {
trace_parser_put(&iter->parser);
- kfree(iter);
- ret = -ENOMEM;
goto out_unlock;
}
} else
iter->hash = hash;
+ ret = 0;
+
if (file->f_mode & FMODE_READ) {
iter->pg = ftrace_pages_start;
@@ -3609,7 +3618,6 @@
/* Failed */
free_ftrace_hash(iter->hash);
trace_parser_put(&iter->parser);
- kfree(iter);
}
} else
file->private_data = iter;
@@ -3617,6 +3625,13 @@
out_unlock:
mutex_unlock(&ops->func_hash->regex_lock);
+ out:
+ if (ret) {
+ kfree(iter);
+ if (tr)
+ trace_array_put(tr);
+ }
+
return ret;
}
@@ -3625,6 +3640,7 @@
{
struct ftrace_ops *ops = inode->i_private;
+ /* Checks for tracefs lockdown */
return ftrace_regex_open(ops,
FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
inode, file);
@@ -3635,6 +3651,7 @@
{
struct ftrace_ops *ops = inode->i_private;
+ /* Checks for tracefs lockdown */
return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
inode, file);
}
@@ -3715,6 +3732,31 @@
}
static int
+add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
+ int clear_filter)
+{
+ long index = simple_strtoul(func_g->search, NULL, 0);
+ struct ftrace_page *pg;
+ struct dyn_ftrace *rec;
+
+ /* The index starts at 1 */
+ if (--index < 0)
+ return 0;
+
+ do_for_each_ftrace_rec(pg, rec) {
+ if (pg->index <= index) {
+ index -= pg->index;
+ /* this is a double loop, break goes to the next page */
+ break;
+ }
+ rec = &pg->records[index];
+ enter_record(hash, rec, clear_filter);
+ return 1;
+ } while_for_each_ftrace_rec();
+ return 0;
+}
+
+static int
ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
struct ftrace_glob *mod_g, int exclude_mod)
{
@@ -3782,6 +3824,11 @@
if (unlikely(ftrace_disabled))
goto out_unlock;
+ if (func_g.type == MATCH_INDEX) {
+ found = add_rec_by_index(hash, &func_g, clear_filter);
+ goto out_unlock;
+ }
+
do_for_each_ftrace_rec(pg, rec) {
if (rec->flags & FTRACE_FL_DISABLED)
@@ -3862,7 +3909,7 @@
static bool module_exists(const char *module)
{
/* All modules have the symbol __this_module */
- const char this_mod[] = "__this_module";
+ static const char this_mod[] = "__this_module";
char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
unsigned long val;
int n;
@@ -4207,10 +4254,13 @@
struct ftrace_func_entry *entry;
struct ftrace_func_map *map;
struct hlist_head *hhd;
- int size = 1 << mapper->hash.size_bits;
- int i;
+ int size, i;
+
+ if (!mapper)
+ return;
if (free_func && mapper->hash.count) {
+ size = 1 << mapper->hash.size_bits;
for (i = 0; i < size; i++) {
hhd = &mapper->hash.buckets[i];
hlist_for_each_entry(entry, hhd, hlist) {
@@ -4302,12 +4352,21 @@
mutex_unlock(&ftrace_lock);
+ /*
+ * Note, there's a small window here that the func_hash->filter_hash
+ * may be NULL or empty. Need to be carefule when reading the loop.
+ */
mutex_lock(&probe->ops.func_hash->regex_lock);
orig_hash = &probe->ops.func_hash->filter_hash;
old_hash = *orig_hash;
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
+ if (!hash) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = ftrace_match_records(hash, glob, strlen(glob));
/* Nothing found? */
@@ -4496,7 +4555,7 @@
if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops);
- synchronize_sched();
+ synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist);
@@ -4738,7 +4797,7 @@
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
int reset, int enable)
{
- return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+ return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
}
/**
@@ -5002,6 +5061,8 @@
mutex_unlock(&iter->ops->func_hash->regex_lock);
free_ftrace_hash(iter->hash);
+ if (iter->tr)
+ trace_array_put(iter->tr);
kfree(iter);
return 0;
@@ -5159,9 +5220,13 @@
__ftrace_graph_open(struct inode *inode, struct file *file,
struct ftrace_graph_data *fgd)
{
- int ret = 0;
+ int ret;
struct ftrace_hash *new_hash = NULL;
+ ret = security_locked_down(LOCKDOWN_TRACEFS);
+ if (ret)
+ return ret;
+
if (file->f_mode & FMODE_WRITE) {
const int size_bits = FTRACE_HASH_DEFAULT_BITS;
@@ -5314,7 +5379,7 @@
mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */
- synchronize_sched();
+ synchronize_rcu();
free_ftrace_hash(old_hash);
}
@@ -5446,7 +5511,7 @@
/*
* The name "destroy_filter_files" is really a misnomer. Although
- * in the future, it may actualy delete the files, but this is
+ * in the future, it may actually delete the files, but this is
* really intended to make sure the ops passed in are disabled
* and that when this function returns, the caller is free to
* free the ops.
@@ -5708,7 +5773,7 @@
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
- call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
+ call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
@@ -5769,7 +5834,7 @@
/*
* If the tracing is enabled, go ahead and enable the record.
*
- * The reason not to enable the record immediatelly is the
+ * The reason not to enable the record immediately is the
* inherent check of ftrace_make_nop/ftrace_make_call for
* correct previous instructions. Making first the NOP
* conversion puts the module to the correct state, thus
@@ -5928,7 +5993,7 @@
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
- /* mod_map is freed via call_rcu_sched() */
+ /* mod_map is freed via call_rcu() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6001,11 +6066,7 @@
{
struct ftrace_func_entry *entry;
- if (ftrace_hash_empty(hash))
- return;
-
- entry = __ftrace_lookup_ip(hash, func->ip);
-
+ entry = ftrace_lookup_ip(hash, func->ip);
/*
* Do not allow this rec to match again.
* Yeah, it may waste some memory, but will be removed
@@ -6178,7 +6239,7 @@
}
#else
-static struct ftrace_ops global_ops = {
+struct ftrace_ops global_ops = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
FTRACE_OPS_FL_INITIALIZED |
@@ -6195,31 +6256,10 @@
static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
-/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command) \
- ({ \
- int ___ret = __register_ftrace_function(ops); \
- if (!___ret) \
- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
- ___ret; \
- })
-# define ftrace_shutdown(ops, command) \
- ({ \
- int ___ret = __unregister_ftrace_function(ops); \
- if (!___ret) \
- (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
- ___ret; \
- })
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
-static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
-{
- return 1;
-}
-
static void ftrace_update_trampoline(struct ftrace_ops *ops)
{
}
@@ -6250,7 +6290,7 @@
tr->ops->func = ftrace_stub;
}
-static inline void
+static nokprobe_inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{
@@ -6263,11 +6303,14 @@
/*
* Some of the ops may be dynamically allocated,
- * they must be freed after a synchronize_sched().
+ * they must be freed after a synchronize_rcu().
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) {
+ /* Stub functions don't need to be called nor tested */
+ if (op->flags & FTRACE_OPS_FL_STUB)
+ continue;
/*
* Check the following for each ops before calling their func:
* if RCU flag is set, then rcu_is_watching() must be true
@@ -6310,11 +6353,13 @@
{
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
+NOKPROBE_SYMBOL(ftrace_ops_list_func);
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
+NOKPROBE_SYMBOL(ftrace_ops_no_ops);
#endif
/*
@@ -6341,6 +6386,7 @@
preempt_enable_notrace();
trace_clear_recursion(bit);
}
+NOKPROBE_SYMBOL(ftrace_ops_assist_func);
/**
* ftrace_ops_get_func - get the function a trampoline should call
@@ -6434,7 +6480,7 @@
rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(pid_list);
}
@@ -6521,8 +6567,9 @@
struct seq_file *m;
int ret = 0;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ ret = tracing_check_open_get_tr(tr);
+ if (ret)
+ return ret;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC))
@@ -6581,7 +6628,7 @@
rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) {
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */
@@ -6746,353 +6793,3 @@
mutex_unlock(&ftrace_lock);
return ret;
}
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-
-static struct ftrace_ops graph_ops = {
- .func = ftrace_stub,
- .flags = FTRACE_OPS_FL_RECURSION_SAFE |
- FTRACE_OPS_FL_INITIALIZED |
- FTRACE_OPS_FL_PID |
- FTRACE_OPS_FL_STUB,
-#ifdef FTRACE_GRAPH_TRAMP_ADDR
- .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
- /* trampoline_size is only needed for dynamically allocated tramps */
-#endif
- ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
-};
-
-void ftrace_graph_sleep_time_control(bool enable)
-{
- fgraph_sleep_time = enable;
-}
-
-void ftrace_graph_graph_time_control(bool enable)
-{
- fgraph_graph_time = enable;
-}
-
-int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
-{
- return 0;
-}
-
-/* The callbacks that hook a function */
-trace_func_graph_ret_t ftrace_graph_return =
- (trace_func_graph_ret_t)ftrace_stub;
-trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
-static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
-
-/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
-static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
-{
- int i;
- int ret = 0;
- int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
- struct task_struct *g, *t;
-
- for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
- ret_stack_list[i] =
- kmalloc_array(FTRACE_RETFUNC_DEPTH,
- sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
- if (!ret_stack_list[i]) {
- start = 0;
- end = i;
- ret = -ENOMEM;
- goto free;
- }
- }
-
- read_lock(&tasklist_lock);
- do_each_thread(g, t) {
- if (start == end) {
- ret = -EAGAIN;
- goto unlock;
- }
-
- if (t->ret_stack == NULL) {
- atomic_set(&t->tracing_graph_pause, 0);
- atomic_set(&t->trace_overrun, 0);
- t->curr_ret_stack = -1;
- t->curr_ret_depth = -1;
- /* Make sure the tasks see the -1 first: */
- smp_wmb();
- t->ret_stack = ret_stack_list[start++];
- }
- } while_each_thread(g, t);
-
-unlock:
- read_unlock(&tasklist_lock);
-free:
- for (i = start; i < end; i++)
- kfree(ret_stack_list[i]);
- return ret;
-}
-
-static void
-ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
- struct task_struct *prev, struct task_struct *next)
-{
- unsigned long long timestamp;
- int index;
-
- /*
- * Does the user want to count the time a function was asleep.
- * If so, do not update the time stamps.
- */
- if (fgraph_sleep_time)
- return;
-
- timestamp = trace_clock_local();
-
- prev->ftrace_timestamp = timestamp;
-
- /* only process tasks that we timestamped */
- if (!next->ftrace_timestamp)
- return;
-
- /*
- * Update all the counters in next to make up for the
- * time next was sleeping.
- */
- timestamp -= next->ftrace_timestamp;
-
- for (index = next->curr_ret_stack; index >= 0; index--)
- next->ret_stack[index].calltime += timestamp;
-}
-
-/* Allocate a return stack for each task */
-static int start_graph_tracing(void)
-{
- struct ftrace_ret_stack **ret_stack_list;
- int ret, cpu;
-
- ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
- sizeof(struct ftrace_ret_stack *),
- GFP_KERNEL);
-
- if (!ret_stack_list)
- return -ENOMEM;
-
- /* The cpu_boot init_task->ret_stack will never be freed */
- for_each_online_cpu(cpu) {
- if (!idle_task(cpu)->ret_stack)
- ftrace_graph_init_idle_task(idle_task(cpu), cpu);
- }
-
- do {
- ret = alloc_retstack_tasklist(ret_stack_list);
- } while (ret == -EAGAIN);
-
- if (!ret) {
- ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
- if (ret)
- pr_info("ftrace_graph: Couldn't activate tracepoint"
- " probe to kernel_sched_switch\n");
- }
-
- kfree(ret_stack_list);
- return ret;
-}
-
-/*
- * Hibernation protection.
- * The state of the current task is too much unstable during
- * suspend/restore to disk. We want to protect against that.
- */
-static int
-ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
- void *unused)
-{
- switch (state) {
- case PM_HIBERNATION_PREPARE:
- pause_graph_tracing();
- break;
-
- case PM_POST_HIBERNATION:
- unpause_graph_tracing();
- break;
- }
- return NOTIFY_DONE;
-}
-
-static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
-{
- if (!ftrace_ops_test(&global_ops, trace->func, NULL))
- return 0;
- return __ftrace_graph_entry(trace);
-}
-
-/*
- * The function graph tracer should only trace the functions defined
- * by set_ftrace_filter and set_ftrace_notrace. If another function
- * tracer ops is registered, the graph tracer requires testing the
- * function against the global ops, and not just trace any function
- * that any ftrace_ops registered.
- */
-static void update_function_graph_func(void)
-{
- struct ftrace_ops *op;
- bool do_test = false;
-
- /*
- * The graph and global ops share the same set of functions
- * to test. If any other ops is on the list, then
- * the graph tracing needs to test if its the function
- * it should call.
- */
- do_for_each_ftrace_op(op, ftrace_ops_list) {
- if (op != &global_ops && op != &graph_ops &&
- op != &ftrace_list_end) {
- do_test = true;
- /* in double loop, break out with goto */
- goto out;
- }
- } while_for_each_ftrace_op(op);
- out:
- if (do_test)
- ftrace_graph_entry = ftrace_graph_entry_test;
- else
- ftrace_graph_entry = __ftrace_graph_entry;
-}
-
-static struct notifier_block ftrace_suspend_notifier = {
- .notifier_call = ftrace_suspend_notifier_call,
-};
-
-int register_ftrace_graph(trace_func_graph_ret_t retfunc,
- trace_func_graph_ent_t entryfunc)
-{
- int ret = 0;
-
- mutex_lock(&ftrace_lock);
-
- /* we currently allow only one tracer registered at a time */
- if (ftrace_graph_active) {
- ret = -EBUSY;
- goto out;
- }
-
- register_pm_notifier(&ftrace_suspend_notifier);
-
- ftrace_graph_active++;
- ret = start_graph_tracing();
- if (ret) {
- ftrace_graph_active--;
- goto out;
- }
-
- ftrace_graph_return = retfunc;
-
- /*
- * Update the indirect function to the entryfunc, and the
- * function that gets called to the entry_test first. Then
- * call the update fgraph entry function to determine if
- * the entryfunc should be called directly or not.
- */
- __ftrace_graph_entry = entryfunc;
- ftrace_graph_entry = ftrace_graph_entry_test;
- update_function_graph_func();
-
- ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
-out:
- mutex_unlock(&ftrace_lock);
- return ret;
-}
-
-void unregister_ftrace_graph(void)
-{
- mutex_lock(&ftrace_lock);
-
- if (unlikely(!ftrace_graph_active))
- goto out;
-
- ftrace_graph_active--;
- ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
- ftrace_graph_entry = ftrace_graph_entry_stub;
- __ftrace_graph_entry = ftrace_graph_entry_stub;
- ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
- unregister_pm_notifier(&ftrace_suspend_notifier);
- unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
-
- out:
- mutex_unlock(&ftrace_lock);
-}
-
-static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
-
-static void
-graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
-{
- atomic_set(&t->tracing_graph_pause, 0);
- atomic_set(&t->trace_overrun, 0);
- t->ftrace_timestamp = 0;
- /* make curr_ret_stack visible before we add the ret_stack */
- smp_wmb();
- t->ret_stack = ret_stack;
-}
-
-/*
- * Allocate a return stack for the idle task. May be the first
- * time through, or it may be done by CPU hotplug online.
- */
-void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
-{
- t->curr_ret_stack = -1;
- t->curr_ret_depth = -1;
- /*
- * The idle task has no parent, it either has its own
- * stack or no stack at all.
- */
- if (t->ret_stack)
- WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
-
- if (ftrace_graph_active) {
- struct ftrace_ret_stack *ret_stack;
-
- ret_stack = per_cpu(idle_ret_stack, cpu);
- if (!ret_stack) {
- ret_stack =
- kmalloc_array(FTRACE_RETFUNC_DEPTH,
- sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
- if (!ret_stack)
- return;
- per_cpu(idle_ret_stack, cpu) = ret_stack;
- }
- graph_init_task(t, ret_stack);
- }
-}
-
-/* Allocate a return stack for newly created task */
-void ftrace_graph_init_task(struct task_struct *t)
-{
- /* Make sure we do not use the parent ret_stack */
- t->ret_stack = NULL;
- t->curr_ret_stack = -1;
- t->curr_ret_depth = -1;
-
- if (ftrace_graph_active) {
- struct ftrace_ret_stack *ret_stack;
-
- ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
- sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
- if (!ret_stack)
- return;
- graph_init_task(t, ret_stack);
- }
-}
-
-void ftrace_graph_exit_task(struct task_struct *t)
-{
- struct ftrace_ret_stack *ret_stack = t->ret_stack;
-
- t->ret_stack = NULL;
- /* NULL must become visible to IRQs before we free it: */
- barrier();
-
- kfree(ret_stack);
-}
-#endif