Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | #include <trace/syscall.h> |
| 3 | #include <trace/events/syscalls.h> |
| 4 | #include <linux/syscalls.h> |
| 5 | #include <linux/slab.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */ |
| 8 | #include <linux/ftrace.h> |
| 9 | #include <linux/perf_event.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10 | #include <linux/xarray.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | #include <asm/syscall.h> |
| 12 | |
| 13 | #include "trace_output.h" |
| 14 | #include "trace.h" |
| 15 | |
| 16 | static DEFINE_MUTEX(syscall_trace_lock); |
| 17 | |
| 18 | static int syscall_enter_register(struct trace_event_call *event, |
| 19 | enum trace_reg type, void *data); |
| 20 | static int syscall_exit_register(struct trace_event_call *event, |
| 21 | enum trace_reg type, void *data); |
| 22 | |
| 23 | static struct list_head * |
| 24 | syscall_get_enter_fields(struct trace_event_call *call) |
| 25 | { |
| 26 | struct syscall_metadata *entry = call->data; |
| 27 | |
| 28 | return &entry->enter_fields; |
| 29 | } |
| 30 | |
| 31 | extern struct syscall_metadata *__start_syscalls_metadata[]; |
| 32 | extern struct syscall_metadata *__stop_syscalls_metadata[]; |
| 33 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 34 | static DEFINE_XARRAY(syscalls_metadata_sparse); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | static struct syscall_metadata **syscalls_metadata; |
| 36 | |
| 37 | #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME |
| 38 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) |
| 39 | { |
| 40 | /* |
| 41 | * Only compare after the "sys" prefix. Archs that use |
| 42 | * syscall wrappers may have syscalls symbols aliases prefixed |
| 43 | * with ".SyS" or ".sys" instead of "sys", leading to an unwanted |
| 44 | * mismatch. |
| 45 | */ |
| 46 | return !strcmp(sym + 3, name + 3); |
| 47 | } |
| 48 | #endif |
| 49 | |
| 50 | #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS |
| 51 | /* |
| 52 | * Some architectures that allow for 32bit applications |
| 53 | * to run on a 64bit kernel, do not map the syscalls for |
| 54 | * the 32bit tasks the same as they do for 64bit tasks. |
| 55 | * |
| 56 | * *cough*x86*cough* |
| 57 | * |
| 58 | * In such a case, instead of reporting the wrong syscalls, |
| 59 | * simply ignore them. |
| 60 | * |
| 61 | * For an arch to ignore the compat syscalls it needs to |
| 62 | * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as |
| 63 | * define the function arch_trace_is_compat_syscall() to let |
| 64 | * the tracing system know that it should ignore it. |
| 65 | */ |
| 66 | static int |
| 67 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) |
| 68 | { |
| 69 | if (unlikely(arch_trace_is_compat_syscall(regs))) |
| 70 | return -1; |
| 71 | |
| 72 | return syscall_get_nr(task, regs); |
| 73 | } |
| 74 | #else |
| 75 | static inline int |
| 76 | trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs) |
| 77 | { |
| 78 | return syscall_get_nr(task, regs); |
| 79 | } |
| 80 | #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */ |
| 81 | |
| 82 | static __init struct syscall_metadata * |
| 83 | find_syscall_meta(unsigned long syscall) |
| 84 | { |
| 85 | struct syscall_metadata **start; |
| 86 | struct syscall_metadata **stop; |
| 87 | char str[KSYM_SYMBOL_LEN]; |
| 88 | |
| 89 | |
| 90 | start = __start_syscalls_metadata; |
| 91 | stop = __stop_syscalls_metadata; |
| 92 | kallsyms_lookup(syscall, NULL, NULL, NULL, str); |
| 93 | |
| 94 | if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) |
| 95 | return NULL; |
| 96 | |
| 97 | for ( ; start < stop; start++) { |
| 98 | if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) |
| 99 | return *start; |
| 100 | } |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | static struct syscall_metadata *syscall_nr_to_meta(int nr) |
| 105 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 106 | if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) |
| 107 | return xa_load(&syscalls_metadata_sparse, (unsigned long)nr); |
| 108 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 109 | if (!syscalls_metadata || nr >= NR_syscalls || nr < 0) |
| 110 | return NULL; |
| 111 | |
| 112 | return syscalls_metadata[nr]; |
| 113 | } |
| 114 | |
| 115 | const char *get_syscall_name(int syscall) |
| 116 | { |
| 117 | struct syscall_metadata *entry; |
| 118 | |
| 119 | entry = syscall_nr_to_meta(syscall); |
| 120 | if (!entry) |
| 121 | return NULL; |
| 122 | |
| 123 | return entry->name; |
| 124 | } |
| 125 | |
| 126 | static enum print_line_t |
| 127 | print_syscall_enter(struct trace_iterator *iter, int flags, |
| 128 | struct trace_event *event) |
| 129 | { |
| 130 | struct trace_array *tr = iter->tr; |
| 131 | struct trace_seq *s = &iter->seq; |
| 132 | struct trace_entry *ent = iter->ent; |
| 133 | struct syscall_trace_enter *trace; |
| 134 | struct syscall_metadata *entry; |
| 135 | int i, syscall; |
| 136 | |
| 137 | trace = (typeof(trace))ent; |
| 138 | syscall = trace->nr; |
| 139 | entry = syscall_nr_to_meta(syscall); |
| 140 | |
| 141 | if (!entry) |
| 142 | goto end; |
| 143 | |
| 144 | if (entry->enter_event->event.type != ent->type) { |
| 145 | WARN_ON_ONCE(1); |
| 146 | goto end; |
| 147 | } |
| 148 | |
| 149 | trace_seq_printf(s, "%s(", entry->name); |
| 150 | |
| 151 | for (i = 0; i < entry->nb_args; i++) { |
| 152 | |
| 153 | if (trace_seq_has_overflowed(s)) |
| 154 | goto end; |
| 155 | |
| 156 | /* parameter types */ |
| 157 | if (tr->trace_flags & TRACE_ITER_VERBOSE) |
| 158 | trace_seq_printf(s, "%s ", entry->types[i]); |
| 159 | |
| 160 | /* parameter values */ |
| 161 | trace_seq_printf(s, "%s: %lx%s", entry->args[i], |
| 162 | trace->args[i], |
| 163 | i == entry->nb_args - 1 ? "" : ", "); |
| 164 | } |
| 165 | |
| 166 | trace_seq_putc(s, ')'); |
| 167 | end: |
| 168 | trace_seq_putc(s, '\n'); |
| 169 | |
| 170 | return trace_handle_return(s); |
| 171 | } |
| 172 | |
| 173 | static enum print_line_t |
| 174 | print_syscall_exit(struct trace_iterator *iter, int flags, |
| 175 | struct trace_event *event) |
| 176 | { |
| 177 | struct trace_seq *s = &iter->seq; |
| 178 | struct trace_entry *ent = iter->ent; |
| 179 | struct syscall_trace_exit *trace; |
| 180 | int syscall; |
| 181 | struct syscall_metadata *entry; |
| 182 | |
| 183 | trace = (typeof(trace))ent; |
| 184 | syscall = trace->nr; |
| 185 | entry = syscall_nr_to_meta(syscall); |
| 186 | |
| 187 | if (!entry) { |
| 188 | trace_seq_putc(s, '\n'); |
| 189 | goto out; |
| 190 | } |
| 191 | |
| 192 | if (entry->exit_event->event.type != ent->type) { |
| 193 | WARN_ON_ONCE(1); |
| 194 | return TRACE_TYPE_UNHANDLED; |
| 195 | } |
| 196 | |
| 197 | trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, |
| 198 | trace->ret); |
| 199 | |
| 200 | out: |
| 201 | return trace_handle_return(s); |
| 202 | } |
| 203 | |
| 204 | extern char *__bad_type_size(void); |
| 205 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 206 | #define SYSCALL_FIELD(_type, _name) { \ |
| 207 | .type = #_type, .name = #_name, \ |
| 208 | .size = sizeof(_type), .align = __alignof__(_type), \ |
| 209 | .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 210 | |
| 211 | static int __init |
| 212 | __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) |
| 213 | { |
| 214 | int i; |
| 215 | int pos = 0; |
| 216 | |
| 217 | /* When len=0, we just calculate the needed length */ |
| 218 | #define LEN_OR_ZERO (len ? len - pos : 0) |
| 219 | |
| 220 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); |
| 221 | for (i = 0; i < entry->nb_args; i++) { |
| 222 | pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s", |
| 223 | entry->args[i], sizeof(unsigned long), |
| 224 | i == entry->nb_args - 1 ? "" : ", "); |
| 225 | } |
| 226 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\""); |
| 227 | |
| 228 | for (i = 0; i < entry->nb_args; i++) { |
| 229 | pos += snprintf(buf + pos, LEN_OR_ZERO, |
| 230 | ", ((unsigned long)(REC->%s))", entry->args[i]); |
| 231 | } |
| 232 | |
| 233 | #undef LEN_OR_ZERO |
| 234 | |
| 235 | /* return the length of print_fmt */ |
| 236 | return pos; |
| 237 | } |
| 238 | |
| 239 | static int __init set_syscall_print_fmt(struct trace_event_call *call) |
| 240 | { |
| 241 | char *print_fmt; |
| 242 | int len; |
| 243 | struct syscall_metadata *entry = call->data; |
| 244 | |
| 245 | if (entry->enter_event != call) { |
| 246 | call->print_fmt = "\"0x%lx\", REC->ret"; |
| 247 | return 0; |
| 248 | } |
| 249 | |
| 250 | /* First: called with 0 length to calculate the needed length */ |
| 251 | len = __set_enter_print_fmt(entry, NULL, 0); |
| 252 | |
| 253 | print_fmt = kmalloc(len + 1, GFP_KERNEL); |
| 254 | if (!print_fmt) |
| 255 | return -ENOMEM; |
| 256 | |
| 257 | /* Second: actually write the @print_fmt */ |
| 258 | __set_enter_print_fmt(entry, print_fmt, len + 1); |
| 259 | call->print_fmt = print_fmt; |
| 260 | |
| 261 | return 0; |
| 262 | } |
| 263 | |
| 264 | static void __init free_syscall_print_fmt(struct trace_event_call *call) |
| 265 | { |
| 266 | struct syscall_metadata *entry = call->data; |
| 267 | |
| 268 | if (entry->enter_event == call) |
| 269 | kfree(call->print_fmt); |
| 270 | } |
| 271 | |
| 272 | static int __init syscall_enter_define_fields(struct trace_event_call *call) |
| 273 | { |
| 274 | struct syscall_trace_enter trace; |
| 275 | struct syscall_metadata *meta = call->data; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 276 | int offset = offsetof(typeof(trace), args); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 277 | int ret = 0; |
| 278 | int i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 279 | |
| 280 | for (i = 0; i < meta->nb_args; i++) { |
| 281 | ret = trace_define_field(call, meta->types[i], |
| 282 | meta->args[i], offset, |
| 283 | sizeof(unsigned long), 0, |
| 284 | FILTER_OTHER); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 285 | if (ret) |
| 286 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 287 | offset += sizeof(unsigned long); |
| 288 | } |
| 289 | |
| 290 | return ret; |
| 291 | } |
| 292 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 293 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
| 294 | { |
| 295 | struct trace_array *tr = data; |
| 296 | struct trace_event_file *trace_file; |
| 297 | struct syscall_trace_enter *entry; |
| 298 | struct syscall_metadata *sys_data; |
| 299 | struct ring_buffer_event *event; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 300 | struct trace_buffer *buffer; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 301 | unsigned long irq_flags; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 302 | unsigned long args[6]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 303 | int pc; |
| 304 | int syscall_nr; |
| 305 | int size; |
| 306 | |
| 307 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 308 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 309 | return; |
| 310 | |
| 311 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ |
| 312 | trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); |
| 313 | if (!trace_file) |
| 314 | return; |
| 315 | |
| 316 | if (trace_trigger_soft_disabled(trace_file)) |
| 317 | return; |
| 318 | |
| 319 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 320 | if (!sys_data) |
| 321 | return; |
| 322 | |
| 323 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
| 324 | |
| 325 | local_save_flags(irq_flags); |
| 326 | pc = preempt_count(); |
| 327 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 328 | buffer = tr->array_buffer.buffer; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 329 | event = trace_buffer_lock_reserve(buffer, |
| 330 | sys_data->enter_event->event.type, size, irq_flags, pc); |
| 331 | if (!event) |
| 332 | return; |
| 333 | |
| 334 | entry = ring_buffer_event_data(event); |
| 335 | entry->nr = syscall_nr; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 336 | syscall_get_arguments(current, regs, args); |
| 337 | memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 338 | |
| 339 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 340 | irq_flags, pc); |
| 341 | } |
| 342 | |
| 343 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
| 344 | { |
| 345 | struct trace_array *tr = data; |
| 346 | struct trace_event_file *trace_file; |
| 347 | struct syscall_trace_exit *entry; |
| 348 | struct syscall_metadata *sys_data; |
| 349 | struct ring_buffer_event *event; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 350 | struct trace_buffer *buffer; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 351 | unsigned long irq_flags; |
| 352 | int pc; |
| 353 | int syscall_nr; |
| 354 | |
| 355 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 356 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 357 | return; |
| 358 | |
| 359 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ |
| 360 | trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); |
| 361 | if (!trace_file) |
| 362 | return; |
| 363 | |
| 364 | if (trace_trigger_soft_disabled(trace_file)) |
| 365 | return; |
| 366 | |
| 367 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 368 | if (!sys_data) |
| 369 | return; |
| 370 | |
| 371 | local_save_flags(irq_flags); |
| 372 | pc = preempt_count(); |
| 373 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 374 | buffer = tr->array_buffer.buffer; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 375 | event = trace_buffer_lock_reserve(buffer, |
| 376 | sys_data->exit_event->event.type, sizeof(*entry), |
| 377 | irq_flags, pc); |
| 378 | if (!event) |
| 379 | return; |
| 380 | |
| 381 | entry = ring_buffer_event_data(event); |
| 382 | entry->nr = syscall_nr; |
| 383 | entry->ret = syscall_get_return_value(current, regs); |
| 384 | |
| 385 | event_trigger_unlock_commit(trace_file, buffer, event, entry, |
| 386 | irq_flags, pc); |
| 387 | } |
| 388 | |
| 389 | static int reg_event_syscall_enter(struct trace_event_file *file, |
| 390 | struct trace_event_call *call) |
| 391 | { |
| 392 | struct trace_array *tr = file->tr; |
| 393 | int ret = 0; |
| 394 | int num; |
| 395 | |
| 396 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 397 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 398 | return -ENOSYS; |
| 399 | mutex_lock(&syscall_trace_lock); |
| 400 | if (!tr->sys_refcount_enter) |
| 401 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
| 402 | if (!ret) { |
| 403 | rcu_assign_pointer(tr->enter_syscall_files[num], file); |
| 404 | tr->sys_refcount_enter++; |
| 405 | } |
| 406 | mutex_unlock(&syscall_trace_lock); |
| 407 | return ret; |
| 408 | } |
| 409 | |
| 410 | static void unreg_event_syscall_enter(struct trace_event_file *file, |
| 411 | struct trace_event_call *call) |
| 412 | { |
| 413 | struct trace_array *tr = file->tr; |
| 414 | int num; |
| 415 | |
| 416 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 417 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 418 | return; |
| 419 | mutex_lock(&syscall_trace_lock); |
| 420 | tr->sys_refcount_enter--; |
| 421 | RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); |
| 422 | if (!tr->sys_refcount_enter) |
| 423 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
| 424 | mutex_unlock(&syscall_trace_lock); |
| 425 | } |
| 426 | |
| 427 | static int reg_event_syscall_exit(struct trace_event_file *file, |
| 428 | struct trace_event_call *call) |
| 429 | { |
| 430 | struct trace_array *tr = file->tr; |
| 431 | int ret = 0; |
| 432 | int num; |
| 433 | |
| 434 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 435 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 436 | return -ENOSYS; |
| 437 | mutex_lock(&syscall_trace_lock); |
| 438 | if (!tr->sys_refcount_exit) |
| 439 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
| 440 | if (!ret) { |
| 441 | rcu_assign_pointer(tr->exit_syscall_files[num], file); |
| 442 | tr->sys_refcount_exit++; |
| 443 | } |
| 444 | mutex_unlock(&syscall_trace_lock); |
| 445 | return ret; |
| 446 | } |
| 447 | |
| 448 | static void unreg_event_syscall_exit(struct trace_event_file *file, |
| 449 | struct trace_event_call *call) |
| 450 | { |
| 451 | struct trace_array *tr = file->tr; |
| 452 | int num; |
| 453 | |
| 454 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 455 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
| 456 | return; |
| 457 | mutex_lock(&syscall_trace_lock); |
| 458 | tr->sys_refcount_exit--; |
| 459 | RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); |
| 460 | if (!tr->sys_refcount_exit) |
| 461 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
| 462 | mutex_unlock(&syscall_trace_lock); |
| 463 | } |
| 464 | |
| 465 | static int __init init_syscall_trace(struct trace_event_call *call) |
| 466 | { |
| 467 | int id; |
| 468 | int num; |
| 469 | |
| 470 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 471 | if (num < 0 || num >= NR_syscalls) { |
| 472 | pr_debug("syscall %s metadata not mapped, disabling ftrace event\n", |
| 473 | ((struct syscall_metadata *)call->data)->name); |
| 474 | return -ENOSYS; |
| 475 | } |
| 476 | |
| 477 | if (set_syscall_print_fmt(call) < 0) |
| 478 | return -ENOMEM; |
| 479 | |
| 480 | id = trace_event_raw_init(call); |
| 481 | |
| 482 | if (id < 0) { |
| 483 | free_syscall_print_fmt(call); |
| 484 | return id; |
| 485 | } |
| 486 | |
| 487 | return id; |
| 488 | } |
| 489 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 490 | static struct trace_event_fields __refdata syscall_enter_fields_array[] = { |
| 491 | SYSCALL_FIELD(int, __syscall_nr), |
| 492 | { .type = TRACE_FUNCTION_TYPE, |
| 493 | .define_fields = syscall_enter_define_fields }, |
| 494 | {} |
| 495 | }; |
| 496 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 497 | struct trace_event_functions enter_syscall_print_funcs = { |
| 498 | .trace = print_syscall_enter, |
| 499 | }; |
| 500 | |
| 501 | struct trace_event_functions exit_syscall_print_funcs = { |
| 502 | .trace = print_syscall_exit, |
| 503 | }; |
| 504 | |
| 505 | struct trace_event_class __refdata event_class_syscall_enter = { |
| 506 | .system = "syscalls", |
| 507 | .reg = syscall_enter_register, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 508 | .fields_array = syscall_enter_fields_array, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 509 | .get_fields = syscall_get_enter_fields, |
| 510 | .raw_init = init_syscall_trace, |
| 511 | }; |
| 512 | |
| 513 | struct trace_event_class __refdata event_class_syscall_exit = { |
| 514 | .system = "syscalls", |
| 515 | .reg = syscall_exit_register, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 516 | .fields_array = (struct trace_event_fields[]){ |
| 517 | SYSCALL_FIELD(int, __syscall_nr), |
| 518 | SYSCALL_FIELD(long, ret), |
| 519 | {} |
| 520 | }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 521 | .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), |
| 522 | .raw_init = init_syscall_trace, |
| 523 | }; |
| 524 | |
| 525 | unsigned long __init __weak arch_syscall_addr(int nr) |
| 526 | { |
| 527 | return (unsigned long)sys_call_table[nr]; |
| 528 | } |
| 529 | |
| 530 | void __init init_ftrace_syscalls(void) |
| 531 | { |
| 532 | struct syscall_metadata *meta; |
| 533 | unsigned long addr; |
| 534 | int i; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 535 | void *ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 536 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 537 | if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) { |
| 538 | syscalls_metadata = kcalloc(NR_syscalls, |
| 539 | sizeof(*syscalls_metadata), |
| 540 | GFP_KERNEL); |
| 541 | if (!syscalls_metadata) { |
| 542 | WARN_ON(1); |
| 543 | return; |
| 544 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 545 | } |
| 546 | |
| 547 | for (i = 0; i < NR_syscalls; i++) { |
| 548 | addr = arch_syscall_addr(i); |
| 549 | meta = find_syscall_meta(addr); |
| 550 | if (!meta) |
| 551 | continue; |
| 552 | |
| 553 | meta->syscall_nr = i; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 554 | |
| 555 | if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) { |
| 556 | syscalls_metadata[i] = meta; |
| 557 | } else { |
| 558 | ret = xa_store(&syscalls_metadata_sparse, i, meta, |
| 559 | GFP_KERNEL); |
| 560 | WARN(xa_is_err(ret), |
| 561 | "Syscall memory allocation failed\n"); |
| 562 | } |
| 563 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 564 | } |
| 565 | } |
| 566 | |
| 567 | #ifdef CONFIG_PERF_EVENTS |
| 568 | |
| 569 | static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls); |
| 570 | static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); |
| 571 | static int sys_perf_refcount_enter; |
| 572 | static int sys_perf_refcount_exit; |
| 573 | |
| 574 | static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs, |
| 575 | struct syscall_metadata *sys_data, |
| 576 | struct syscall_trace_enter *rec) |
| 577 | { |
| 578 | struct syscall_tp_t { |
| 579 | unsigned long long regs; |
| 580 | unsigned long syscall_nr; |
| 581 | unsigned long args[SYSCALL_DEFINE_MAXARGS]; |
| 582 | } param; |
| 583 | int i; |
| 584 | |
| 585 | *(struct pt_regs **)¶m = regs; |
| 586 | param.syscall_nr = rec->nr; |
| 587 | for (i = 0; i < sys_data->nb_args; i++) |
| 588 | param.args[i] = rec->args[i]; |
| 589 | return trace_call_bpf(call, ¶m); |
| 590 | } |
| 591 | |
| 592 | static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) |
| 593 | { |
| 594 | struct syscall_metadata *sys_data; |
| 595 | struct syscall_trace_enter *rec; |
| 596 | struct hlist_head *head; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 597 | unsigned long args[6]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 598 | bool valid_prog_array; |
| 599 | int syscall_nr; |
| 600 | int rctx; |
| 601 | int size; |
| 602 | |
| 603 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 604 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 605 | return; |
| 606 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
| 607 | return; |
| 608 | |
| 609 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 610 | if (!sys_data) |
| 611 | return; |
| 612 | |
| 613 | head = this_cpu_ptr(sys_data->enter_event->perf_events); |
| 614 | valid_prog_array = bpf_prog_array_valid(sys_data->enter_event); |
| 615 | if (!valid_prog_array && hlist_empty(head)) |
| 616 | return; |
| 617 | |
| 618 | /* get the size after alignment with the u32 buffer size field */ |
| 619 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); |
| 620 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
| 621 | size -= sizeof(u32); |
| 622 | |
| 623 | rec = perf_trace_buf_alloc(size, NULL, &rctx); |
| 624 | if (!rec) |
| 625 | return; |
| 626 | |
| 627 | rec->nr = syscall_nr; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 628 | syscall_get_arguments(current, regs, args); |
| 629 | memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 630 | |
| 631 | if ((valid_prog_array && |
| 632 | !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || |
| 633 | hlist_empty(head)) { |
| 634 | perf_swevent_put_recursion_context(rctx); |
| 635 | return; |
| 636 | } |
| 637 | |
| 638 | perf_trace_buf_submit(rec, size, rctx, |
| 639 | sys_data->enter_event->event.type, 1, regs, |
| 640 | head, NULL); |
| 641 | } |
| 642 | |
| 643 | static int perf_sysenter_enable(struct trace_event_call *call) |
| 644 | { |
| 645 | int ret = 0; |
| 646 | int num; |
| 647 | |
| 648 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 649 | |
| 650 | mutex_lock(&syscall_trace_lock); |
| 651 | if (!sys_perf_refcount_enter) |
| 652 | ret = register_trace_sys_enter(perf_syscall_enter, NULL); |
| 653 | if (ret) { |
| 654 | pr_info("event trace: Could not activate syscall entry trace point"); |
| 655 | } else { |
| 656 | set_bit(num, enabled_perf_enter_syscalls); |
| 657 | sys_perf_refcount_enter++; |
| 658 | } |
| 659 | mutex_unlock(&syscall_trace_lock); |
| 660 | return ret; |
| 661 | } |
| 662 | |
| 663 | static void perf_sysenter_disable(struct trace_event_call *call) |
| 664 | { |
| 665 | int num; |
| 666 | |
| 667 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 668 | |
| 669 | mutex_lock(&syscall_trace_lock); |
| 670 | sys_perf_refcount_enter--; |
| 671 | clear_bit(num, enabled_perf_enter_syscalls); |
| 672 | if (!sys_perf_refcount_enter) |
| 673 | unregister_trace_sys_enter(perf_syscall_enter, NULL); |
| 674 | mutex_unlock(&syscall_trace_lock); |
| 675 | } |
| 676 | |
| 677 | static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs, |
| 678 | struct syscall_trace_exit *rec) |
| 679 | { |
| 680 | struct syscall_tp_t { |
| 681 | unsigned long long regs; |
| 682 | unsigned long syscall_nr; |
| 683 | unsigned long ret; |
| 684 | } param; |
| 685 | |
| 686 | *(struct pt_regs **)¶m = regs; |
| 687 | param.syscall_nr = rec->nr; |
| 688 | param.ret = rec->ret; |
| 689 | return trace_call_bpf(call, ¶m); |
| 690 | } |
| 691 | |
| 692 | static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) |
| 693 | { |
| 694 | struct syscall_metadata *sys_data; |
| 695 | struct syscall_trace_exit *rec; |
| 696 | struct hlist_head *head; |
| 697 | bool valid_prog_array; |
| 698 | int syscall_nr; |
| 699 | int rctx; |
| 700 | int size; |
| 701 | |
| 702 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 703 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 704 | return; |
| 705 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
| 706 | return; |
| 707 | |
| 708 | sys_data = syscall_nr_to_meta(syscall_nr); |
| 709 | if (!sys_data) |
| 710 | return; |
| 711 | |
| 712 | head = this_cpu_ptr(sys_data->exit_event->perf_events); |
| 713 | valid_prog_array = bpf_prog_array_valid(sys_data->exit_event); |
| 714 | if (!valid_prog_array && hlist_empty(head)) |
| 715 | return; |
| 716 | |
| 717 | /* We can probably do that at build time */ |
| 718 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
| 719 | size -= sizeof(u32); |
| 720 | |
| 721 | rec = perf_trace_buf_alloc(size, NULL, &rctx); |
| 722 | if (!rec) |
| 723 | return; |
| 724 | |
| 725 | rec->nr = syscall_nr; |
| 726 | rec->ret = syscall_get_return_value(current, regs); |
| 727 | |
| 728 | if ((valid_prog_array && |
| 729 | !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) || |
| 730 | hlist_empty(head)) { |
| 731 | perf_swevent_put_recursion_context(rctx); |
| 732 | return; |
| 733 | } |
| 734 | |
| 735 | perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, |
| 736 | 1, regs, head, NULL); |
| 737 | } |
| 738 | |
| 739 | static int perf_sysexit_enable(struct trace_event_call *call) |
| 740 | { |
| 741 | int ret = 0; |
| 742 | int num; |
| 743 | |
| 744 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 745 | |
| 746 | mutex_lock(&syscall_trace_lock); |
| 747 | if (!sys_perf_refcount_exit) |
| 748 | ret = register_trace_sys_exit(perf_syscall_exit, NULL); |
| 749 | if (ret) { |
| 750 | pr_info("event trace: Could not activate syscall exit trace point"); |
| 751 | } else { |
| 752 | set_bit(num, enabled_perf_exit_syscalls); |
| 753 | sys_perf_refcount_exit++; |
| 754 | } |
| 755 | mutex_unlock(&syscall_trace_lock); |
| 756 | return ret; |
| 757 | } |
| 758 | |
| 759 | static void perf_sysexit_disable(struct trace_event_call *call) |
| 760 | { |
| 761 | int num; |
| 762 | |
| 763 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
| 764 | |
| 765 | mutex_lock(&syscall_trace_lock); |
| 766 | sys_perf_refcount_exit--; |
| 767 | clear_bit(num, enabled_perf_exit_syscalls); |
| 768 | if (!sys_perf_refcount_exit) |
| 769 | unregister_trace_sys_exit(perf_syscall_exit, NULL); |
| 770 | mutex_unlock(&syscall_trace_lock); |
| 771 | } |
| 772 | |
| 773 | #endif /* CONFIG_PERF_EVENTS */ |
| 774 | |
| 775 | static int syscall_enter_register(struct trace_event_call *event, |
| 776 | enum trace_reg type, void *data) |
| 777 | { |
| 778 | struct trace_event_file *file = data; |
| 779 | |
| 780 | switch (type) { |
| 781 | case TRACE_REG_REGISTER: |
| 782 | return reg_event_syscall_enter(file, event); |
| 783 | case TRACE_REG_UNREGISTER: |
| 784 | unreg_event_syscall_enter(file, event); |
| 785 | return 0; |
| 786 | |
| 787 | #ifdef CONFIG_PERF_EVENTS |
| 788 | case TRACE_REG_PERF_REGISTER: |
| 789 | return perf_sysenter_enable(event); |
| 790 | case TRACE_REG_PERF_UNREGISTER: |
| 791 | perf_sysenter_disable(event); |
| 792 | return 0; |
| 793 | case TRACE_REG_PERF_OPEN: |
| 794 | case TRACE_REG_PERF_CLOSE: |
| 795 | case TRACE_REG_PERF_ADD: |
| 796 | case TRACE_REG_PERF_DEL: |
| 797 | return 0; |
| 798 | #endif |
| 799 | } |
| 800 | return 0; |
| 801 | } |
| 802 | |
| 803 | static int syscall_exit_register(struct trace_event_call *event, |
| 804 | enum trace_reg type, void *data) |
| 805 | { |
| 806 | struct trace_event_file *file = data; |
| 807 | |
| 808 | switch (type) { |
| 809 | case TRACE_REG_REGISTER: |
| 810 | return reg_event_syscall_exit(file, event); |
| 811 | case TRACE_REG_UNREGISTER: |
| 812 | unreg_event_syscall_exit(file, event); |
| 813 | return 0; |
| 814 | |
| 815 | #ifdef CONFIG_PERF_EVENTS |
| 816 | case TRACE_REG_PERF_REGISTER: |
| 817 | return perf_sysexit_enable(event); |
| 818 | case TRACE_REG_PERF_UNREGISTER: |
| 819 | perf_sysexit_disable(event); |
| 820 | return 0; |
| 821 | case TRACE_REG_PERF_OPEN: |
| 822 | case TRACE_REG_PERF_CLOSE: |
| 823 | case TRACE_REG_PERF_ADD: |
| 824 | case TRACE_REG_PERF_DEL: |
| 825 | return 0; |
| 826 | #endif |
| 827 | } |
| 828 | return 0; |
| 829 | } |