Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * bpf-loader.c |
| 4 | * |
| 5 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> |
| 6 | * Copyright (C) 2015 Huawei Inc. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/bpf.h> |
| 10 | #include <bpf/libbpf.h> |
| 11 | #include <bpf/bpf.h> |
| 12 | #include <linux/err.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/string.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 15 | #include <linux/zalloc.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 16 | #include <errno.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 17 | #include <stdlib.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 18 | #include "debug.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 19 | #include "evlist.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | #include "bpf-loader.h" |
| 21 | #include "bpf-prologue.h" |
| 22 | #include "probe-event.h" |
| 23 | #include "probe-finder.h" // for MAX_PROBES |
| 24 | #include "parse-events.h" |
| 25 | #include "strfilter.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 26 | #include "util.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | #include "llvm-utils.h" |
| 28 | #include "c++/clang-c.h" |
| 29 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 30 | #include <internal/xyarray.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 31 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 32 | static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), |
| 33 | const char *fmt, va_list args) |
| 34 | { |
| 35 | return veprintf(1, verbose, pr_fmt(fmt), args); |
| 36 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | |
| 38 | struct bpf_prog_priv { |
| 39 | bool is_tp; |
| 40 | char *sys_name; |
| 41 | char *evt_name; |
| 42 | struct perf_probe_event pev; |
| 43 | bool need_prologue; |
| 44 | struct bpf_insn *insns_buf; |
| 45 | int nr_types; |
| 46 | int *type_mapping; |
| 47 | }; |
| 48 | |
| 49 | static bool libbpf_initialized; |
| 50 | |
| 51 | struct bpf_object * |
| 52 | bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) |
| 53 | { |
| 54 | struct bpf_object *obj; |
| 55 | |
| 56 | if (!libbpf_initialized) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 57 | libbpf_set_print(libbpf_perf_print); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 58 | libbpf_initialized = true; |
| 59 | } |
| 60 | |
| 61 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); |
| 62 | if (IS_ERR_OR_NULL(obj)) { |
| 63 | pr_debug("bpf: failed to load buffer\n"); |
| 64 | return ERR_PTR(-EINVAL); |
| 65 | } |
| 66 | |
| 67 | return obj; |
| 68 | } |
| 69 | |
| 70 | struct bpf_object *bpf__prepare_load(const char *filename, bool source) |
| 71 | { |
| 72 | struct bpf_object *obj; |
| 73 | |
| 74 | if (!libbpf_initialized) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 75 | libbpf_set_print(libbpf_perf_print); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | libbpf_initialized = true; |
| 77 | } |
| 78 | |
| 79 | if (source) { |
| 80 | int err; |
| 81 | void *obj_buf; |
| 82 | size_t obj_buf_sz; |
| 83 | |
| 84 | perf_clang__init(); |
| 85 | err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz); |
| 86 | perf_clang__cleanup(); |
| 87 | if (err) { |
| 88 | pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err); |
| 89 | err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); |
| 90 | if (err) |
| 91 | return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); |
| 92 | } else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 93 | pr_debug("bpf: successful builtin compilation\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 94 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); |
| 95 | |
| 96 | if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) |
| 97 | llvm__dump_obj(filename, obj_buf, obj_buf_sz); |
| 98 | |
| 99 | free(obj_buf); |
| 100 | } else |
| 101 | obj = bpf_object__open(filename); |
| 102 | |
| 103 | if (IS_ERR_OR_NULL(obj)) { |
| 104 | pr_debug("bpf: failed to load %s\n", filename); |
| 105 | return obj; |
| 106 | } |
| 107 | |
| 108 | return obj; |
| 109 | } |
| 110 | |
| 111 | void bpf__clear(void) |
| 112 | { |
| 113 | struct bpf_object *obj, *tmp; |
| 114 | |
| 115 | bpf_object__for_each_safe(obj, tmp) { |
| 116 | bpf__unprobe(obj); |
| 117 | bpf_object__close(obj); |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | static void |
| 122 | clear_prog_priv(struct bpf_program *prog __maybe_unused, |
| 123 | void *_priv) |
| 124 | { |
| 125 | struct bpf_prog_priv *priv = _priv; |
| 126 | |
| 127 | cleanup_perf_probe_events(&priv->pev, 1); |
| 128 | zfree(&priv->insns_buf); |
| 129 | zfree(&priv->type_mapping); |
| 130 | zfree(&priv->sys_name); |
| 131 | zfree(&priv->evt_name); |
| 132 | free(priv); |
| 133 | } |
| 134 | |
| 135 | static int |
| 136 | prog_config__exec(const char *value, struct perf_probe_event *pev) |
| 137 | { |
| 138 | pev->uprobes = true; |
| 139 | pev->target = strdup(value); |
| 140 | if (!pev->target) |
| 141 | return -ENOMEM; |
| 142 | return 0; |
| 143 | } |
| 144 | |
| 145 | static int |
| 146 | prog_config__module(const char *value, struct perf_probe_event *pev) |
| 147 | { |
| 148 | pev->uprobes = false; |
| 149 | pev->target = strdup(value); |
| 150 | if (!pev->target) |
| 151 | return -ENOMEM; |
| 152 | return 0; |
| 153 | } |
| 154 | |
| 155 | static int |
| 156 | prog_config__bool(const char *value, bool *pbool, bool invert) |
| 157 | { |
| 158 | int err; |
| 159 | bool bool_value; |
| 160 | |
| 161 | if (!pbool) |
| 162 | return -EINVAL; |
| 163 | |
| 164 | err = strtobool(value, &bool_value); |
| 165 | if (err) |
| 166 | return err; |
| 167 | |
| 168 | *pbool = invert ? !bool_value : bool_value; |
| 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | static int |
| 173 | prog_config__inlines(const char *value, |
| 174 | struct perf_probe_event *pev __maybe_unused) |
| 175 | { |
| 176 | return prog_config__bool(value, &probe_conf.no_inlines, true); |
| 177 | } |
| 178 | |
| 179 | static int |
| 180 | prog_config__force(const char *value, |
| 181 | struct perf_probe_event *pev __maybe_unused) |
| 182 | { |
| 183 | return prog_config__bool(value, &probe_conf.force_add, false); |
| 184 | } |
| 185 | |
| 186 | static struct { |
| 187 | const char *key; |
| 188 | const char *usage; |
| 189 | const char *desc; |
| 190 | int (*func)(const char *, struct perf_probe_event *); |
| 191 | } bpf_prog_config_terms[] = { |
| 192 | { |
| 193 | .key = "exec", |
| 194 | .usage = "exec=<full path of file>", |
| 195 | .desc = "Set uprobe target", |
| 196 | .func = prog_config__exec, |
| 197 | }, |
| 198 | { |
| 199 | .key = "module", |
| 200 | .usage = "module=<module name> ", |
| 201 | .desc = "Set kprobe module", |
| 202 | .func = prog_config__module, |
| 203 | }, |
| 204 | { |
| 205 | .key = "inlines", |
| 206 | .usage = "inlines=[yes|no] ", |
| 207 | .desc = "Probe at inline symbol", |
| 208 | .func = prog_config__inlines, |
| 209 | }, |
| 210 | { |
| 211 | .key = "force", |
| 212 | .usage = "force=[yes|no] ", |
| 213 | .desc = "Forcibly add events with existing name", |
| 214 | .func = prog_config__force, |
| 215 | }, |
| 216 | }; |
| 217 | |
| 218 | static int |
| 219 | do_prog_config(const char *key, const char *value, |
| 220 | struct perf_probe_event *pev) |
| 221 | { |
| 222 | unsigned int i; |
| 223 | |
| 224 | pr_debug("config bpf program: %s=%s\n", key, value); |
| 225 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) |
| 226 | if (strcmp(key, bpf_prog_config_terms[i].key) == 0) |
| 227 | return bpf_prog_config_terms[i].func(value, pev); |
| 228 | |
| 229 | pr_debug("BPF: ERROR: invalid program config option: %s=%s\n", |
| 230 | key, value); |
| 231 | |
| 232 | pr_debug("\nHint: Valid options are:\n"); |
| 233 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) |
| 234 | pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage, |
| 235 | bpf_prog_config_terms[i].desc); |
| 236 | pr_debug("\n"); |
| 237 | |
| 238 | return -BPF_LOADER_ERRNO__PROGCONF_TERM; |
| 239 | } |
| 240 | |
| 241 | static const char * |
| 242 | parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev) |
| 243 | { |
| 244 | char *text = strdup(config_str); |
| 245 | char *sep, *line; |
| 246 | const char *main_str = NULL; |
| 247 | int err = 0; |
| 248 | |
| 249 | if (!text) { |
| 250 | pr_debug("Not enough memory: dup config_str failed\n"); |
| 251 | return ERR_PTR(-ENOMEM); |
| 252 | } |
| 253 | |
| 254 | line = text; |
| 255 | while ((sep = strchr(line, ';'))) { |
| 256 | char *equ; |
| 257 | |
| 258 | *sep = '\0'; |
| 259 | equ = strchr(line, '='); |
| 260 | if (!equ) { |
| 261 | pr_warning("WARNING: invalid config in BPF object: %s\n", |
| 262 | line); |
| 263 | pr_warning("\tShould be 'key=value'.\n"); |
| 264 | goto nextline; |
| 265 | } |
| 266 | *equ = '\0'; |
| 267 | |
| 268 | err = do_prog_config(line, equ + 1, pev); |
| 269 | if (err) |
| 270 | break; |
| 271 | nextline: |
| 272 | line = sep + 1; |
| 273 | } |
| 274 | |
| 275 | if (!err) |
| 276 | main_str = config_str + (line - text); |
| 277 | free(text); |
| 278 | |
| 279 | return err ? ERR_PTR(err) : main_str; |
| 280 | } |
| 281 | |
| 282 | static int |
| 283 | parse_prog_config(const char *config_str, const char **p_main_str, |
| 284 | bool *is_tp, struct perf_probe_event *pev) |
| 285 | { |
| 286 | int err; |
| 287 | const char *main_str = parse_prog_config_kvpair(config_str, pev); |
| 288 | |
| 289 | if (IS_ERR(main_str)) |
| 290 | return PTR_ERR(main_str); |
| 291 | |
| 292 | *p_main_str = main_str; |
| 293 | if (!strchr(main_str, '=')) { |
| 294 | /* Is a tracepoint event? */ |
| 295 | const char *s = strchr(main_str, ':'); |
| 296 | |
| 297 | if (!s) { |
| 298 | pr_debug("bpf: '%s' is not a valid tracepoint\n", |
| 299 | config_str); |
| 300 | return -BPF_LOADER_ERRNO__CONFIG; |
| 301 | } |
| 302 | |
| 303 | *is_tp = true; |
| 304 | return 0; |
| 305 | } |
| 306 | |
| 307 | *is_tp = false; |
| 308 | err = parse_perf_probe_command(main_str, pev); |
| 309 | if (err < 0) { |
| 310 | pr_debug("bpf: '%s' is not a valid config string\n", |
| 311 | config_str); |
| 312 | /* parse failed, don't need clear pev. */ |
| 313 | return -BPF_LOADER_ERRNO__CONFIG; |
| 314 | } |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | static int |
| 319 | config_bpf_program(struct bpf_program *prog) |
| 320 | { |
| 321 | struct perf_probe_event *pev = NULL; |
| 322 | struct bpf_prog_priv *priv = NULL; |
| 323 | const char *config_str, *main_str; |
| 324 | bool is_tp = false; |
| 325 | int err; |
| 326 | |
| 327 | /* Initialize per-program probing setting */ |
| 328 | probe_conf.no_inlines = false; |
| 329 | probe_conf.force_add = false; |
| 330 | |
| 331 | config_str = bpf_program__title(prog, false); |
| 332 | if (IS_ERR(config_str)) { |
| 333 | pr_debug("bpf: unable to get title for program\n"); |
| 334 | return PTR_ERR(config_str); |
| 335 | } |
| 336 | |
| 337 | priv = calloc(sizeof(*priv), 1); |
| 338 | if (!priv) { |
| 339 | pr_debug("bpf: failed to alloc priv\n"); |
| 340 | return -ENOMEM; |
| 341 | } |
| 342 | pev = &priv->pev; |
| 343 | |
| 344 | pr_debug("bpf: config program '%s'\n", config_str); |
| 345 | err = parse_prog_config(config_str, &main_str, &is_tp, pev); |
| 346 | if (err) |
| 347 | goto errout; |
| 348 | |
| 349 | if (is_tp) { |
| 350 | char *s = strchr(main_str, ':'); |
| 351 | |
| 352 | priv->is_tp = true; |
| 353 | priv->sys_name = strndup(main_str, s - main_str); |
| 354 | priv->evt_name = strdup(s + 1); |
| 355 | goto set_priv; |
| 356 | } |
| 357 | |
| 358 | if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { |
| 359 | pr_debug("bpf: '%s': group for event is set and not '%s'.\n", |
| 360 | config_str, PERF_BPF_PROBE_GROUP); |
| 361 | err = -BPF_LOADER_ERRNO__GROUP; |
| 362 | goto errout; |
| 363 | } else if (!pev->group) |
| 364 | pev->group = strdup(PERF_BPF_PROBE_GROUP); |
| 365 | |
| 366 | if (!pev->group) { |
| 367 | pr_debug("bpf: strdup failed\n"); |
| 368 | err = -ENOMEM; |
| 369 | goto errout; |
| 370 | } |
| 371 | |
| 372 | if (!pev->event) { |
| 373 | pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", |
| 374 | config_str); |
| 375 | err = -BPF_LOADER_ERRNO__EVENTNAME; |
| 376 | goto errout; |
| 377 | } |
| 378 | pr_debug("bpf: config '%s' is ok\n", config_str); |
| 379 | |
| 380 | set_priv: |
| 381 | err = bpf_program__set_priv(prog, priv, clear_prog_priv); |
| 382 | if (err) { |
| 383 | pr_debug("Failed to set priv for program '%s'\n", config_str); |
| 384 | goto errout; |
| 385 | } |
| 386 | |
| 387 | return 0; |
| 388 | |
| 389 | errout: |
| 390 | if (pev) |
| 391 | clear_perf_probe_event(pev); |
| 392 | free(priv); |
| 393 | return err; |
| 394 | } |
| 395 | |
| 396 | static int bpf__prepare_probe(void) |
| 397 | { |
| 398 | static int err = 0; |
| 399 | static bool initialized = false; |
| 400 | |
| 401 | /* |
| 402 | * Make err static, so if init failed the first, bpf__prepare_probe() |
| 403 | * fails each time without calling init_probe_symbol_maps multiple |
| 404 | * times. |
| 405 | */ |
| 406 | if (initialized) |
| 407 | return err; |
| 408 | |
| 409 | initialized = true; |
| 410 | err = init_probe_symbol_maps(false); |
| 411 | if (err < 0) |
| 412 | pr_debug("Failed to init_probe_symbol_maps\n"); |
| 413 | probe_conf.max_probes = MAX_PROBES; |
| 414 | return err; |
| 415 | } |
| 416 | |
| 417 | static int |
| 418 | preproc_gen_prologue(struct bpf_program *prog, int n, |
| 419 | struct bpf_insn *orig_insns, int orig_insns_cnt, |
| 420 | struct bpf_prog_prep_result *res) |
| 421 | { |
| 422 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
| 423 | struct probe_trace_event *tev; |
| 424 | struct perf_probe_event *pev; |
| 425 | struct bpf_insn *buf; |
| 426 | size_t prologue_cnt = 0; |
| 427 | int i, err; |
| 428 | |
| 429 | if (IS_ERR(priv) || !priv || priv->is_tp) |
| 430 | goto errout; |
| 431 | |
| 432 | pev = &priv->pev; |
| 433 | |
| 434 | if (n < 0 || n >= priv->nr_types) |
| 435 | goto errout; |
| 436 | |
| 437 | /* Find a tev belongs to that type */ |
| 438 | for (i = 0; i < pev->ntevs; i++) { |
| 439 | if (priv->type_mapping[i] == n) |
| 440 | break; |
| 441 | } |
| 442 | |
| 443 | if (i >= pev->ntevs) { |
| 444 | pr_debug("Internal error: prologue type %d not found\n", n); |
| 445 | return -BPF_LOADER_ERRNO__PROLOGUE; |
| 446 | } |
| 447 | |
| 448 | tev = &pev->tevs[i]; |
| 449 | |
| 450 | buf = priv->insns_buf; |
| 451 | err = bpf__gen_prologue(tev->args, tev->nargs, |
| 452 | buf, &prologue_cnt, |
| 453 | BPF_MAXINSNS - orig_insns_cnt); |
| 454 | if (err) { |
| 455 | const char *title; |
| 456 | |
| 457 | title = bpf_program__title(prog, false); |
| 458 | if (!title) |
| 459 | title = "[unknown]"; |
| 460 | |
| 461 | pr_debug("Failed to generate prologue for program %s\n", |
| 462 | title); |
| 463 | return err; |
| 464 | } |
| 465 | |
| 466 | memcpy(&buf[prologue_cnt], orig_insns, |
| 467 | sizeof(struct bpf_insn) * orig_insns_cnt); |
| 468 | |
| 469 | res->new_insn_ptr = buf; |
| 470 | res->new_insn_cnt = prologue_cnt + orig_insns_cnt; |
| 471 | res->pfd = NULL; |
| 472 | return 0; |
| 473 | |
| 474 | errout: |
| 475 | pr_debug("Internal error in preproc_gen_prologue\n"); |
| 476 | return -BPF_LOADER_ERRNO__PROLOGUE; |
| 477 | } |
| 478 | |
| 479 | /* |
| 480 | * compare_tev_args is reflexive, transitive and antisymmetric. |
| 481 | * I can proof it but this margin is too narrow to contain. |
| 482 | */ |
| 483 | static int compare_tev_args(const void *ptev1, const void *ptev2) |
| 484 | { |
| 485 | int i, ret; |
| 486 | const struct probe_trace_event *tev1 = |
| 487 | *(const struct probe_trace_event **)ptev1; |
| 488 | const struct probe_trace_event *tev2 = |
| 489 | *(const struct probe_trace_event **)ptev2; |
| 490 | |
| 491 | ret = tev2->nargs - tev1->nargs; |
| 492 | if (ret) |
| 493 | return ret; |
| 494 | |
| 495 | for (i = 0; i < tev1->nargs; i++) { |
| 496 | struct probe_trace_arg *arg1, *arg2; |
| 497 | struct probe_trace_arg_ref *ref1, *ref2; |
| 498 | |
| 499 | arg1 = &tev1->args[i]; |
| 500 | arg2 = &tev2->args[i]; |
| 501 | |
| 502 | ret = strcmp(arg1->value, arg2->value); |
| 503 | if (ret) |
| 504 | return ret; |
| 505 | |
| 506 | ref1 = arg1->ref; |
| 507 | ref2 = arg2->ref; |
| 508 | |
| 509 | while (ref1 && ref2) { |
| 510 | ret = ref2->offset - ref1->offset; |
| 511 | if (ret) |
| 512 | return ret; |
| 513 | |
| 514 | ref1 = ref1->next; |
| 515 | ref2 = ref2->next; |
| 516 | } |
| 517 | |
| 518 | if (ref1 || ref2) |
| 519 | return ref2 ? 1 : -1; |
| 520 | } |
| 521 | |
| 522 | return 0; |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * Assign a type number to each tevs in a pev. |
| 527 | * mapping is an array with same slots as tevs in that pev. |
| 528 | * nr_types will be set to number of types. |
| 529 | */ |
| 530 | static int map_prologue(struct perf_probe_event *pev, int *mapping, |
| 531 | int *nr_types) |
| 532 | { |
| 533 | int i, type = 0; |
| 534 | struct probe_trace_event **ptevs; |
| 535 | |
| 536 | size_t array_sz = sizeof(*ptevs) * pev->ntevs; |
| 537 | |
| 538 | ptevs = malloc(array_sz); |
| 539 | if (!ptevs) { |
| 540 | pr_debug("Not enough memory: alloc ptevs failed\n"); |
| 541 | return -ENOMEM; |
| 542 | } |
| 543 | |
| 544 | pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs); |
| 545 | for (i = 0; i < pev->ntevs; i++) |
| 546 | ptevs[i] = &pev->tevs[i]; |
| 547 | |
| 548 | qsort(ptevs, pev->ntevs, sizeof(*ptevs), |
| 549 | compare_tev_args); |
| 550 | |
| 551 | for (i = 0; i < pev->ntevs; i++) { |
| 552 | int n; |
| 553 | |
| 554 | n = ptevs[i] - pev->tevs; |
| 555 | if (i == 0) { |
| 556 | mapping[n] = type; |
| 557 | pr_debug("mapping[%d]=%d\n", n, type); |
| 558 | continue; |
| 559 | } |
| 560 | |
| 561 | if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0) |
| 562 | mapping[n] = type; |
| 563 | else |
| 564 | mapping[n] = ++type; |
| 565 | |
| 566 | pr_debug("mapping[%d]=%d\n", n, mapping[n]); |
| 567 | } |
| 568 | free(ptevs); |
| 569 | *nr_types = type + 1; |
| 570 | |
| 571 | return 0; |
| 572 | } |
| 573 | |
| 574 | static int hook_load_preprocessor(struct bpf_program *prog) |
| 575 | { |
| 576 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
| 577 | struct perf_probe_event *pev; |
| 578 | bool need_prologue = false; |
| 579 | int err, i; |
| 580 | |
| 581 | if (IS_ERR(priv) || !priv) { |
| 582 | pr_debug("Internal error when hook preprocessor\n"); |
| 583 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 584 | } |
| 585 | |
| 586 | if (priv->is_tp) { |
| 587 | priv->need_prologue = false; |
| 588 | return 0; |
| 589 | } |
| 590 | |
| 591 | pev = &priv->pev; |
| 592 | for (i = 0; i < pev->ntevs; i++) { |
| 593 | struct probe_trace_event *tev = &pev->tevs[i]; |
| 594 | |
| 595 | if (tev->nargs > 0) { |
| 596 | need_prologue = true; |
| 597 | break; |
| 598 | } |
| 599 | } |
| 600 | |
| 601 | /* |
| 602 | * Since all tevs don't have argument, we don't need generate |
| 603 | * prologue. |
| 604 | */ |
| 605 | if (!need_prologue) { |
| 606 | priv->need_prologue = false; |
| 607 | return 0; |
| 608 | } |
| 609 | |
| 610 | priv->need_prologue = true; |
| 611 | priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS); |
| 612 | if (!priv->insns_buf) { |
| 613 | pr_debug("Not enough memory: alloc insns_buf failed\n"); |
| 614 | return -ENOMEM; |
| 615 | } |
| 616 | |
| 617 | priv->type_mapping = malloc(sizeof(int) * pev->ntevs); |
| 618 | if (!priv->type_mapping) { |
| 619 | pr_debug("Not enough memory: alloc type_mapping failed\n"); |
| 620 | return -ENOMEM; |
| 621 | } |
| 622 | memset(priv->type_mapping, -1, |
| 623 | sizeof(int) * pev->ntevs); |
| 624 | |
| 625 | err = map_prologue(pev, priv->type_mapping, &priv->nr_types); |
| 626 | if (err) |
| 627 | return err; |
| 628 | |
| 629 | err = bpf_program__set_prep(prog, priv->nr_types, |
| 630 | preproc_gen_prologue); |
| 631 | return err; |
| 632 | } |
| 633 | |
| 634 | int bpf__probe(struct bpf_object *obj) |
| 635 | { |
| 636 | int err = 0; |
| 637 | struct bpf_program *prog; |
| 638 | struct bpf_prog_priv *priv; |
| 639 | struct perf_probe_event *pev; |
| 640 | |
| 641 | err = bpf__prepare_probe(); |
| 642 | if (err) { |
| 643 | pr_debug("bpf__prepare_probe failed\n"); |
| 644 | return err; |
| 645 | } |
| 646 | |
| 647 | bpf_object__for_each_program(prog, obj) { |
| 648 | err = config_bpf_program(prog); |
| 649 | if (err) |
| 650 | goto out; |
| 651 | |
| 652 | priv = bpf_program__priv(prog); |
| 653 | if (IS_ERR(priv) || !priv) { |
| 654 | err = PTR_ERR(priv); |
| 655 | goto out; |
| 656 | } |
| 657 | |
| 658 | if (priv->is_tp) { |
| 659 | bpf_program__set_tracepoint(prog); |
| 660 | continue; |
| 661 | } |
| 662 | |
| 663 | bpf_program__set_kprobe(prog); |
| 664 | pev = &priv->pev; |
| 665 | |
| 666 | err = convert_perf_probe_events(pev, 1); |
| 667 | if (err < 0) { |
| 668 | pr_debug("bpf_probe: failed to convert perf probe events\n"); |
| 669 | goto out; |
| 670 | } |
| 671 | |
| 672 | err = apply_perf_probe_events(pev, 1); |
| 673 | if (err < 0) { |
| 674 | pr_debug("bpf_probe: failed to apply perf probe events\n"); |
| 675 | goto out; |
| 676 | } |
| 677 | |
| 678 | /* |
| 679 | * After probing, let's consider prologue, which |
| 680 | * adds program fetcher to BPF programs. |
| 681 | * |
| 682 | * hook_load_preprocessorr() hooks pre-processor |
| 683 | * to bpf_program, let it generate prologue |
| 684 | * dynamically during loading. |
| 685 | */ |
| 686 | err = hook_load_preprocessor(prog); |
| 687 | if (err) |
| 688 | goto out; |
| 689 | } |
| 690 | out: |
| 691 | return err < 0 ? err : 0; |
| 692 | } |
| 693 | |
| 694 | #define EVENTS_WRITE_BUFSIZE 4096 |
| 695 | int bpf__unprobe(struct bpf_object *obj) |
| 696 | { |
| 697 | int err, ret = 0; |
| 698 | struct bpf_program *prog; |
| 699 | |
| 700 | bpf_object__for_each_program(prog, obj) { |
| 701 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
| 702 | int i; |
| 703 | |
| 704 | if (IS_ERR(priv) || !priv || priv->is_tp) |
| 705 | continue; |
| 706 | |
| 707 | for (i = 0; i < priv->pev.ntevs; i++) { |
| 708 | struct probe_trace_event *tev = &priv->pev.tevs[i]; |
| 709 | char name_buf[EVENTS_WRITE_BUFSIZE]; |
| 710 | struct strfilter *delfilter; |
| 711 | |
| 712 | snprintf(name_buf, EVENTS_WRITE_BUFSIZE, |
| 713 | "%s:%s", tev->group, tev->event); |
| 714 | name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; |
| 715 | |
| 716 | delfilter = strfilter__new(name_buf, NULL); |
| 717 | if (!delfilter) { |
| 718 | pr_debug("Failed to create filter for unprobing\n"); |
| 719 | ret = -ENOMEM; |
| 720 | continue; |
| 721 | } |
| 722 | |
| 723 | err = del_perf_probe_events(delfilter); |
| 724 | strfilter__delete(delfilter); |
| 725 | if (err) { |
| 726 | pr_debug("Failed to delete %s\n", name_buf); |
| 727 | ret = err; |
| 728 | continue; |
| 729 | } |
| 730 | } |
| 731 | } |
| 732 | return ret; |
| 733 | } |
| 734 | |
| 735 | int bpf__load(struct bpf_object *obj) |
| 736 | { |
| 737 | int err; |
| 738 | |
| 739 | err = bpf_object__load(obj); |
| 740 | if (err) { |
| 741 | char bf[128]; |
| 742 | libbpf_strerror(err, bf, sizeof(bf)); |
| 743 | pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf); |
| 744 | return err; |
| 745 | } |
| 746 | return 0; |
| 747 | } |
| 748 | |
| 749 | int bpf__foreach_event(struct bpf_object *obj, |
| 750 | bpf_prog_iter_callback_t func, |
| 751 | void *arg) |
| 752 | { |
| 753 | struct bpf_program *prog; |
| 754 | int err; |
| 755 | |
| 756 | bpf_object__for_each_program(prog, obj) { |
| 757 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
| 758 | struct probe_trace_event *tev; |
| 759 | struct perf_probe_event *pev; |
| 760 | int i, fd; |
| 761 | |
| 762 | if (IS_ERR(priv) || !priv) { |
| 763 | pr_debug("bpf: failed to get private field\n"); |
| 764 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 765 | } |
| 766 | |
| 767 | if (priv->is_tp) { |
| 768 | fd = bpf_program__fd(prog); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 769 | err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 770 | if (err) { |
| 771 | pr_debug("bpf: tracepoint call back failed, stop iterate\n"); |
| 772 | return err; |
| 773 | } |
| 774 | continue; |
| 775 | } |
| 776 | |
| 777 | pev = &priv->pev; |
| 778 | for (i = 0; i < pev->ntevs; i++) { |
| 779 | tev = &pev->tevs[i]; |
| 780 | |
| 781 | if (priv->need_prologue) { |
| 782 | int type = priv->type_mapping[i]; |
| 783 | |
| 784 | fd = bpf_program__nth_fd(prog, type); |
| 785 | } else { |
| 786 | fd = bpf_program__fd(prog); |
| 787 | } |
| 788 | |
| 789 | if (fd < 0) { |
| 790 | pr_debug("bpf: failed to get file descriptor\n"); |
| 791 | return fd; |
| 792 | } |
| 793 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 794 | err = (*func)(tev->group, tev->event, fd, obj, arg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 795 | if (err) { |
| 796 | pr_debug("bpf: call back failed, stop iterate\n"); |
| 797 | return err; |
| 798 | } |
| 799 | } |
| 800 | } |
| 801 | return 0; |
| 802 | } |
| 803 | |
| 804 | enum bpf_map_op_type { |
| 805 | BPF_MAP_OP_SET_VALUE, |
| 806 | BPF_MAP_OP_SET_EVSEL, |
| 807 | }; |
| 808 | |
| 809 | enum bpf_map_key_type { |
| 810 | BPF_MAP_KEY_ALL, |
| 811 | BPF_MAP_KEY_RANGES, |
| 812 | }; |
| 813 | |
| 814 | struct bpf_map_op { |
| 815 | struct list_head list; |
| 816 | enum bpf_map_op_type op_type; |
| 817 | enum bpf_map_key_type key_type; |
| 818 | union { |
| 819 | struct parse_events_array array; |
| 820 | } k; |
| 821 | union { |
| 822 | u64 value; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 823 | struct evsel *evsel; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 824 | } v; |
| 825 | }; |
| 826 | |
| 827 | struct bpf_map_priv { |
| 828 | struct list_head ops_list; |
| 829 | }; |
| 830 | |
| 831 | static void |
| 832 | bpf_map_op__delete(struct bpf_map_op *op) |
| 833 | { |
| 834 | if (!list_empty(&op->list)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 835 | list_del_init(&op->list); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 836 | if (op->key_type == BPF_MAP_KEY_RANGES) |
| 837 | parse_events__clear_array(&op->k.array); |
| 838 | free(op); |
| 839 | } |
| 840 | |
| 841 | static void |
| 842 | bpf_map_priv__purge(struct bpf_map_priv *priv) |
| 843 | { |
| 844 | struct bpf_map_op *pos, *n; |
| 845 | |
| 846 | list_for_each_entry_safe(pos, n, &priv->ops_list, list) { |
| 847 | list_del_init(&pos->list); |
| 848 | bpf_map_op__delete(pos); |
| 849 | } |
| 850 | } |
| 851 | |
| 852 | static void |
| 853 | bpf_map_priv__clear(struct bpf_map *map __maybe_unused, |
| 854 | void *_priv) |
| 855 | { |
| 856 | struct bpf_map_priv *priv = _priv; |
| 857 | |
| 858 | bpf_map_priv__purge(priv); |
| 859 | free(priv); |
| 860 | } |
| 861 | |
| 862 | static int |
| 863 | bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) |
| 864 | { |
| 865 | op->key_type = BPF_MAP_KEY_ALL; |
| 866 | if (!term) |
| 867 | return 0; |
| 868 | |
| 869 | if (term->array.nr_ranges) { |
| 870 | size_t memsz = term->array.nr_ranges * |
| 871 | sizeof(op->k.array.ranges[0]); |
| 872 | |
| 873 | op->k.array.ranges = memdup(term->array.ranges, memsz); |
| 874 | if (!op->k.array.ranges) { |
| 875 | pr_debug("Not enough memory to alloc indices for map\n"); |
| 876 | return -ENOMEM; |
| 877 | } |
| 878 | op->key_type = BPF_MAP_KEY_RANGES; |
| 879 | op->k.array.nr_ranges = term->array.nr_ranges; |
| 880 | } |
| 881 | return 0; |
| 882 | } |
| 883 | |
| 884 | static struct bpf_map_op * |
| 885 | bpf_map_op__new(struct parse_events_term *term) |
| 886 | { |
| 887 | struct bpf_map_op *op; |
| 888 | int err; |
| 889 | |
| 890 | op = zalloc(sizeof(*op)); |
| 891 | if (!op) { |
| 892 | pr_debug("Failed to alloc bpf_map_op\n"); |
| 893 | return ERR_PTR(-ENOMEM); |
| 894 | } |
| 895 | INIT_LIST_HEAD(&op->list); |
| 896 | |
| 897 | err = bpf_map_op_setkey(op, term); |
| 898 | if (err) { |
| 899 | free(op); |
| 900 | return ERR_PTR(err); |
| 901 | } |
| 902 | return op; |
| 903 | } |
| 904 | |
| 905 | static struct bpf_map_op * |
| 906 | bpf_map_op__clone(struct bpf_map_op *op) |
| 907 | { |
| 908 | struct bpf_map_op *newop; |
| 909 | |
| 910 | newop = memdup(op, sizeof(*op)); |
| 911 | if (!newop) { |
| 912 | pr_debug("Failed to alloc bpf_map_op\n"); |
| 913 | return NULL; |
| 914 | } |
| 915 | |
| 916 | INIT_LIST_HEAD(&newop->list); |
| 917 | if (op->key_type == BPF_MAP_KEY_RANGES) { |
| 918 | size_t memsz = op->k.array.nr_ranges * |
| 919 | sizeof(op->k.array.ranges[0]); |
| 920 | |
| 921 | newop->k.array.ranges = memdup(op->k.array.ranges, memsz); |
| 922 | if (!newop->k.array.ranges) { |
| 923 | pr_debug("Failed to alloc indices for map\n"); |
| 924 | free(newop); |
| 925 | return NULL; |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | return newop; |
| 930 | } |
| 931 | |
| 932 | static struct bpf_map_priv * |
| 933 | bpf_map_priv__clone(struct bpf_map_priv *priv) |
| 934 | { |
| 935 | struct bpf_map_priv *newpriv; |
| 936 | struct bpf_map_op *pos, *newop; |
| 937 | |
| 938 | newpriv = zalloc(sizeof(*newpriv)); |
| 939 | if (!newpriv) { |
| 940 | pr_debug("Not enough memory to alloc map private\n"); |
| 941 | return NULL; |
| 942 | } |
| 943 | INIT_LIST_HEAD(&newpriv->ops_list); |
| 944 | |
| 945 | list_for_each_entry(pos, &priv->ops_list, list) { |
| 946 | newop = bpf_map_op__clone(pos); |
| 947 | if (!newop) { |
| 948 | bpf_map_priv__purge(newpriv); |
| 949 | return NULL; |
| 950 | } |
| 951 | list_add_tail(&newop->list, &newpriv->ops_list); |
| 952 | } |
| 953 | |
| 954 | return newpriv; |
| 955 | } |
| 956 | |
| 957 | static int |
| 958 | bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) |
| 959 | { |
| 960 | const char *map_name = bpf_map__name(map); |
| 961 | struct bpf_map_priv *priv = bpf_map__priv(map); |
| 962 | |
| 963 | if (IS_ERR(priv)) { |
| 964 | pr_debug("Failed to get private from map %s\n", map_name); |
| 965 | return PTR_ERR(priv); |
| 966 | } |
| 967 | |
| 968 | if (!priv) { |
| 969 | priv = zalloc(sizeof(*priv)); |
| 970 | if (!priv) { |
| 971 | pr_debug("Not enough memory to alloc map private\n"); |
| 972 | return -ENOMEM; |
| 973 | } |
| 974 | INIT_LIST_HEAD(&priv->ops_list); |
| 975 | |
| 976 | if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { |
| 977 | free(priv); |
| 978 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 979 | } |
| 980 | } |
| 981 | |
| 982 | list_add_tail(&op->list, &priv->ops_list); |
| 983 | return 0; |
| 984 | } |
| 985 | |
| 986 | static struct bpf_map_op * |
| 987 | bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) |
| 988 | { |
| 989 | struct bpf_map_op *op; |
| 990 | int err; |
| 991 | |
| 992 | op = bpf_map_op__new(term); |
| 993 | if (IS_ERR(op)) |
| 994 | return op; |
| 995 | |
| 996 | err = bpf_map__add_op(map, op); |
| 997 | if (err) { |
| 998 | bpf_map_op__delete(op); |
| 999 | return ERR_PTR(err); |
| 1000 | } |
| 1001 | return op; |
| 1002 | } |
| 1003 | |
| 1004 | static int |
| 1005 | __bpf_map__config_value(struct bpf_map *map, |
| 1006 | struct parse_events_term *term) |
| 1007 | { |
| 1008 | struct bpf_map_op *op; |
| 1009 | const char *map_name = bpf_map__name(map); |
| 1010 | const struct bpf_map_def *def = bpf_map__def(map); |
| 1011 | |
| 1012 | if (IS_ERR(def)) { |
| 1013 | pr_debug("Unable to get map definition from '%s'\n", |
| 1014 | map_name); |
| 1015 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1016 | } |
| 1017 | |
| 1018 | if (def->type != BPF_MAP_TYPE_ARRAY) { |
| 1019 | pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", |
| 1020 | map_name); |
| 1021 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; |
| 1022 | } |
| 1023 | if (def->key_size < sizeof(unsigned int)) { |
| 1024 | pr_debug("Map %s has incorrect key size\n", map_name); |
| 1025 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; |
| 1026 | } |
| 1027 | switch (def->value_size) { |
| 1028 | case 1: |
| 1029 | case 2: |
| 1030 | case 4: |
| 1031 | case 8: |
| 1032 | break; |
| 1033 | default: |
| 1034 | pr_debug("Map %s has incorrect value size\n", map_name); |
| 1035 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; |
| 1036 | } |
| 1037 | |
| 1038 | op = bpf_map__add_newop(map, term); |
| 1039 | if (IS_ERR(op)) |
| 1040 | return PTR_ERR(op); |
| 1041 | op->op_type = BPF_MAP_OP_SET_VALUE; |
| 1042 | op->v.value = term->val.num; |
| 1043 | return 0; |
| 1044 | } |
| 1045 | |
| 1046 | static int |
| 1047 | bpf_map__config_value(struct bpf_map *map, |
| 1048 | struct parse_events_term *term, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1049 | struct evlist *evlist __maybe_unused) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1050 | { |
| 1051 | if (!term->err_val) { |
| 1052 | pr_debug("Config value not set\n"); |
| 1053 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; |
| 1054 | } |
| 1055 | |
| 1056 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) { |
| 1057 | pr_debug("ERROR: wrong value type for 'value'\n"); |
| 1058 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; |
| 1059 | } |
| 1060 | |
| 1061 | return __bpf_map__config_value(map, term); |
| 1062 | } |
| 1063 | |
| 1064 | static int |
| 1065 | __bpf_map__config_event(struct bpf_map *map, |
| 1066 | struct parse_events_term *term, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1067 | struct evlist *evlist) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1068 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1069 | struct evsel *evsel; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1070 | const struct bpf_map_def *def; |
| 1071 | struct bpf_map_op *op; |
| 1072 | const char *map_name = bpf_map__name(map); |
| 1073 | |
| 1074 | evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str); |
| 1075 | if (!evsel) { |
| 1076 | pr_debug("Event (for '%s') '%s' doesn't exist\n", |
| 1077 | map_name, term->val.str); |
| 1078 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; |
| 1079 | } |
| 1080 | |
| 1081 | def = bpf_map__def(map); |
| 1082 | if (IS_ERR(def)) { |
| 1083 | pr_debug("Unable to get map definition from '%s'\n", |
| 1084 | map_name); |
| 1085 | return PTR_ERR(def); |
| 1086 | } |
| 1087 | |
| 1088 | /* |
| 1089 | * No need to check key_size and value_size: |
| 1090 | * kernel has already checked them. |
| 1091 | */ |
| 1092 | if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { |
| 1093 | pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", |
| 1094 | map_name); |
| 1095 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; |
| 1096 | } |
| 1097 | |
| 1098 | op = bpf_map__add_newop(map, term); |
| 1099 | if (IS_ERR(op)) |
| 1100 | return PTR_ERR(op); |
| 1101 | op->op_type = BPF_MAP_OP_SET_EVSEL; |
| 1102 | op->v.evsel = evsel; |
| 1103 | return 0; |
| 1104 | } |
| 1105 | |
| 1106 | static int |
| 1107 | bpf_map__config_event(struct bpf_map *map, |
| 1108 | struct parse_events_term *term, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1109 | struct evlist *evlist) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1110 | { |
| 1111 | if (!term->err_val) { |
| 1112 | pr_debug("Config value not set\n"); |
| 1113 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; |
| 1114 | } |
| 1115 | |
| 1116 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) { |
| 1117 | pr_debug("ERROR: wrong value type for 'event'\n"); |
| 1118 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; |
| 1119 | } |
| 1120 | |
| 1121 | return __bpf_map__config_event(map, term, evlist); |
| 1122 | } |
| 1123 | |
| 1124 | struct bpf_obj_config__map_func { |
| 1125 | const char *config_opt; |
| 1126 | int (*config_func)(struct bpf_map *, struct parse_events_term *, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1127 | struct evlist *); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1128 | }; |
| 1129 | |
| 1130 | struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = { |
| 1131 | {"value", bpf_map__config_value}, |
| 1132 | {"event", bpf_map__config_event}, |
| 1133 | }; |
| 1134 | |
| 1135 | static int |
| 1136 | config_map_indices_range_check(struct parse_events_term *term, |
| 1137 | struct bpf_map *map, |
| 1138 | const char *map_name) |
| 1139 | { |
| 1140 | struct parse_events_array *array = &term->array; |
| 1141 | const struct bpf_map_def *def; |
| 1142 | unsigned int i; |
| 1143 | |
| 1144 | if (!array->nr_ranges) |
| 1145 | return 0; |
| 1146 | if (!array->ranges) { |
| 1147 | pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n", |
| 1148 | map_name, (int)array->nr_ranges); |
| 1149 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1150 | } |
| 1151 | |
| 1152 | def = bpf_map__def(map); |
| 1153 | if (IS_ERR(def)) { |
| 1154 | pr_debug("ERROR: Unable to get map definition from '%s'\n", |
| 1155 | map_name); |
| 1156 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1157 | } |
| 1158 | |
| 1159 | for (i = 0; i < array->nr_ranges; i++) { |
| 1160 | unsigned int start = array->ranges[i].start; |
| 1161 | size_t length = array->ranges[i].length; |
| 1162 | unsigned int idx = start + length - 1; |
| 1163 | |
| 1164 | if (idx >= def->max_entries) { |
| 1165 | pr_debug("ERROR: index %d too large\n", idx); |
| 1166 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; |
| 1167 | } |
| 1168 | } |
| 1169 | return 0; |
| 1170 | } |
| 1171 | |
| 1172 | static int |
| 1173 | bpf__obj_config_map(struct bpf_object *obj, |
| 1174 | struct parse_events_term *term, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1175 | struct evlist *evlist, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1176 | int *key_scan_pos) |
| 1177 | { |
| 1178 | /* key is "map:<mapname>.<config opt>" */ |
| 1179 | char *map_name = strdup(term->config + sizeof("map:") - 1); |
| 1180 | struct bpf_map *map; |
| 1181 | int err = -BPF_LOADER_ERRNO__OBJCONF_OPT; |
| 1182 | char *map_opt; |
| 1183 | size_t i; |
| 1184 | |
| 1185 | if (!map_name) |
| 1186 | return -ENOMEM; |
| 1187 | |
| 1188 | map_opt = strchr(map_name, '.'); |
| 1189 | if (!map_opt) { |
| 1190 | pr_debug("ERROR: Invalid map config: %s\n", map_name); |
| 1191 | goto out; |
| 1192 | } |
| 1193 | |
| 1194 | *map_opt++ = '\0'; |
| 1195 | if (*map_opt == '\0') { |
| 1196 | pr_debug("ERROR: Invalid map option: %s\n", term->config); |
| 1197 | goto out; |
| 1198 | } |
| 1199 | |
| 1200 | map = bpf_object__find_map_by_name(obj, map_name); |
| 1201 | if (!map) { |
| 1202 | pr_debug("ERROR: Map %s doesn't exist\n", map_name); |
| 1203 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST; |
| 1204 | goto out; |
| 1205 | } |
| 1206 | |
| 1207 | *key_scan_pos += strlen(map_opt); |
| 1208 | err = config_map_indices_range_check(term, map, map_name); |
| 1209 | if (err) |
| 1210 | goto out; |
| 1211 | *key_scan_pos -= strlen(map_opt); |
| 1212 | |
| 1213 | for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) { |
| 1214 | struct bpf_obj_config__map_func *func = |
| 1215 | &bpf_obj_config__map_funcs[i]; |
| 1216 | |
| 1217 | if (strcmp(map_opt, func->config_opt) == 0) { |
| 1218 | err = func->config_func(map, term, evlist); |
| 1219 | goto out; |
| 1220 | } |
| 1221 | } |
| 1222 | |
| 1223 | pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); |
| 1224 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; |
| 1225 | out: |
| 1226 | free(map_name); |
| 1227 | if (!err) |
| 1228 | key_scan_pos += strlen(map_opt); |
| 1229 | return err; |
| 1230 | } |
| 1231 | |
| 1232 | int bpf__config_obj(struct bpf_object *obj, |
| 1233 | struct parse_events_term *term, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1234 | struct evlist *evlist, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1235 | int *error_pos) |
| 1236 | { |
| 1237 | int key_scan_pos = 0; |
| 1238 | int err; |
| 1239 | |
| 1240 | if (!obj || !term || !term->config) |
| 1241 | return -EINVAL; |
| 1242 | |
| 1243 | if (strstarts(term->config, "map:")) { |
| 1244 | key_scan_pos = sizeof("map:") - 1; |
| 1245 | err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos); |
| 1246 | goto out; |
| 1247 | } |
| 1248 | err = -BPF_LOADER_ERRNO__OBJCONF_OPT; |
| 1249 | out: |
| 1250 | if (error_pos) |
| 1251 | *error_pos = key_scan_pos; |
| 1252 | return err; |
| 1253 | |
| 1254 | } |
| 1255 | |
| 1256 | typedef int (*map_config_func_t)(const char *name, int map_fd, |
| 1257 | const struct bpf_map_def *pdef, |
| 1258 | struct bpf_map_op *op, |
| 1259 | void *pkey, void *arg); |
| 1260 | |
| 1261 | static int |
| 1262 | foreach_key_array_all(map_config_func_t func, |
| 1263 | void *arg, const char *name, |
| 1264 | int map_fd, const struct bpf_map_def *pdef, |
| 1265 | struct bpf_map_op *op) |
| 1266 | { |
| 1267 | unsigned int i; |
| 1268 | int err; |
| 1269 | |
| 1270 | for (i = 0; i < pdef->max_entries; i++) { |
| 1271 | err = func(name, map_fd, pdef, op, &i, arg); |
| 1272 | if (err) { |
| 1273 | pr_debug("ERROR: failed to insert value to %s[%u]\n", |
| 1274 | name, i); |
| 1275 | return err; |
| 1276 | } |
| 1277 | } |
| 1278 | return 0; |
| 1279 | } |
| 1280 | |
| 1281 | static int |
| 1282 | foreach_key_array_ranges(map_config_func_t func, void *arg, |
| 1283 | const char *name, int map_fd, |
| 1284 | const struct bpf_map_def *pdef, |
| 1285 | struct bpf_map_op *op) |
| 1286 | { |
| 1287 | unsigned int i, j; |
| 1288 | int err; |
| 1289 | |
| 1290 | for (i = 0; i < op->k.array.nr_ranges; i++) { |
| 1291 | unsigned int start = op->k.array.ranges[i].start; |
| 1292 | size_t length = op->k.array.ranges[i].length; |
| 1293 | |
| 1294 | for (j = 0; j < length; j++) { |
| 1295 | unsigned int idx = start + j; |
| 1296 | |
| 1297 | err = func(name, map_fd, pdef, op, &idx, arg); |
| 1298 | if (err) { |
| 1299 | pr_debug("ERROR: failed to insert value to %s[%u]\n", |
| 1300 | name, idx); |
| 1301 | return err; |
| 1302 | } |
| 1303 | } |
| 1304 | } |
| 1305 | return 0; |
| 1306 | } |
| 1307 | |
| 1308 | static int |
| 1309 | bpf_map_config_foreach_key(struct bpf_map *map, |
| 1310 | map_config_func_t func, |
| 1311 | void *arg) |
| 1312 | { |
| 1313 | int err, map_fd; |
| 1314 | struct bpf_map_op *op; |
| 1315 | const struct bpf_map_def *def; |
| 1316 | const char *name = bpf_map__name(map); |
| 1317 | struct bpf_map_priv *priv = bpf_map__priv(map); |
| 1318 | |
| 1319 | if (IS_ERR(priv)) { |
| 1320 | pr_debug("ERROR: failed to get private from map %s\n", name); |
| 1321 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1322 | } |
| 1323 | if (!priv || list_empty(&priv->ops_list)) { |
| 1324 | pr_debug("INFO: nothing to config for map %s\n", name); |
| 1325 | return 0; |
| 1326 | } |
| 1327 | |
| 1328 | def = bpf_map__def(map); |
| 1329 | if (IS_ERR(def)) { |
| 1330 | pr_debug("ERROR: failed to get definition from map %s\n", name); |
| 1331 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1332 | } |
| 1333 | map_fd = bpf_map__fd(map); |
| 1334 | if (map_fd < 0) { |
| 1335 | pr_debug("ERROR: failed to get fd from map %s\n", name); |
| 1336 | return map_fd; |
| 1337 | } |
| 1338 | |
| 1339 | list_for_each_entry(op, &priv->ops_list, list) { |
| 1340 | switch (def->type) { |
| 1341 | case BPF_MAP_TYPE_ARRAY: |
| 1342 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
| 1343 | switch (op->key_type) { |
| 1344 | case BPF_MAP_KEY_ALL: |
| 1345 | err = foreach_key_array_all(func, arg, name, |
| 1346 | map_fd, def, op); |
| 1347 | break; |
| 1348 | case BPF_MAP_KEY_RANGES: |
| 1349 | err = foreach_key_array_ranges(func, arg, name, |
| 1350 | map_fd, def, |
| 1351 | op); |
| 1352 | break; |
| 1353 | default: |
| 1354 | pr_debug("ERROR: keytype for map '%s' invalid\n", |
| 1355 | name); |
| 1356 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1357 | } |
| 1358 | if (err) |
| 1359 | return err; |
| 1360 | break; |
| 1361 | default: |
| 1362 | pr_debug("ERROR: type of '%s' incorrect\n", name); |
| 1363 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; |
| 1364 | } |
| 1365 | } |
| 1366 | |
| 1367 | return 0; |
| 1368 | } |
| 1369 | |
| 1370 | static int |
| 1371 | apply_config_value_for_key(int map_fd, void *pkey, |
| 1372 | size_t val_size, u64 val) |
| 1373 | { |
| 1374 | int err = 0; |
| 1375 | |
| 1376 | switch (val_size) { |
| 1377 | case 1: { |
| 1378 | u8 _val = (u8)(val); |
| 1379 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); |
| 1380 | break; |
| 1381 | } |
| 1382 | case 2: { |
| 1383 | u16 _val = (u16)(val); |
| 1384 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); |
| 1385 | break; |
| 1386 | } |
| 1387 | case 4: { |
| 1388 | u32 _val = (u32)(val); |
| 1389 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); |
| 1390 | break; |
| 1391 | } |
| 1392 | case 8: { |
| 1393 | err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY); |
| 1394 | break; |
| 1395 | } |
| 1396 | default: |
| 1397 | pr_debug("ERROR: invalid value size\n"); |
| 1398 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; |
| 1399 | } |
| 1400 | if (err && errno) |
| 1401 | err = -errno; |
| 1402 | return err; |
| 1403 | } |
| 1404 | |
| 1405 | static int |
| 1406 | apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1407 | struct evsel *evsel) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1408 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1409 | struct xyarray *xy = evsel->core.fd; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1410 | struct perf_event_attr *attr; |
| 1411 | unsigned int key, events; |
| 1412 | bool check_pass = false; |
| 1413 | int *evt_fd; |
| 1414 | int err; |
| 1415 | |
| 1416 | if (!xy) { |
| 1417 | pr_debug("ERROR: evsel not ready for map %s\n", name); |
| 1418 | return -BPF_LOADER_ERRNO__INTERNAL; |
| 1419 | } |
| 1420 | |
| 1421 | if (xy->row_size / xy->entry_size != 1) { |
| 1422 | pr_debug("ERROR: Dimension of target event is incorrect for map %s\n", |
| 1423 | name); |
| 1424 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM; |
| 1425 | } |
| 1426 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1427 | attr = &evsel->core.attr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1428 | if (attr->inherit) { |
| 1429 | pr_debug("ERROR: Can't put inherit event into map %s\n", name); |
| 1430 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH; |
| 1431 | } |
| 1432 | |
| 1433 | if (perf_evsel__is_bpf_output(evsel)) |
| 1434 | check_pass = true; |
| 1435 | if (attr->type == PERF_TYPE_RAW) |
| 1436 | check_pass = true; |
| 1437 | if (attr->type == PERF_TYPE_HARDWARE) |
| 1438 | check_pass = true; |
| 1439 | if (!check_pass) { |
| 1440 | pr_debug("ERROR: Event type is wrong for map %s\n", name); |
| 1441 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE; |
| 1442 | } |
| 1443 | |
| 1444 | events = xy->entries / (xy->row_size / xy->entry_size); |
| 1445 | key = *((unsigned int *)pkey); |
| 1446 | if (key >= events) { |
| 1447 | pr_debug("ERROR: there is no event %d for map %s\n", |
| 1448 | key, name); |
| 1449 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE; |
| 1450 | } |
| 1451 | evt_fd = xyarray__entry(xy, key, 0); |
| 1452 | err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY); |
| 1453 | if (err && errno) |
| 1454 | err = -errno; |
| 1455 | return err; |
| 1456 | } |
| 1457 | |
| 1458 | static int |
| 1459 | apply_obj_config_map_for_key(const char *name, int map_fd, |
| 1460 | const struct bpf_map_def *pdef, |
| 1461 | struct bpf_map_op *op, |
| 1462 | void *pkey, void *arg __maybe_unused) |
| 1463 | { |
| 1464 | int err; |
| 1465 | |
| 1466 | switch (op->op_type) { |
| 1467 | case BPF_MAP_OP_SET_VALUE: |
| 1468 | err = apply_config_value_for_key(map_fd, pkey, |
| 1469 | pdef->value_size, |
| 1470 | op->v.value); |
| 1471 | break; |
| 1472 | case BPF_MAP_OP_SET_EVSEL: |
| 1473 | err = apply_config_evsel_for_key(name, map_fd, pkey, |
| 1474 | op->v.evsel); |
| 1475 | break; |
| 1476 | default: |
| 1477 | pr_debug("ERROR: unknown value type for '%s'\n", name); |
| 1478 | err = -BPF_LOADER_ERRNO__INTERNAL; |
| 1479 | } |
| 1480 | return err; |
| 1481 | } |
| 1482 | |
| 1483 | static int |
| 1484 | apply_obj_config_map(struct bpf_map *map) |
| 1485 | { |
| 1486 | return bpf_map_config_foreach_key(map, |
| 1487 | apply_obj_config_map_for_key, |
| 1488 | NULL); |
| 1489 | } |
| 1490 | |
| 1491 | static int |
| 1492 | apply_obj_config_object(struct bpf_object *obj) |
| 1493 | { |
| 1494 | struct bpf_map *map; |
| 1495 | int err; |
| 1496 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1497 | bpf_object__for_each_map(map, obj) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1498 | err = apply_obj_config_map(map); |
| 1499 | if (err) |
| 1500 | return err; |
| 1501 | } |
| 1502 | return 0; |
| 1503 | } |
| 1504 | |
| 1505 | int bpf__apply_obj_config(void) |
| 1506 | { |
| 1507 | struct bpf_object *obj, *tmp; |
| 1508 | int err; |
| 1509 | |
| 1510 | bpf_object__for_each_safe(obj, tmp) { |
| 1511 | err = apply_obj_config_object(obj); |
| 1512 | if (err) |
| 1513 | return err; |
| 1514 | } |
| 1515 | |
| 1516 | return 0; |
| 1517 | } |
| 1518 | |
| 1519 | #define bpf__for_each_map(pos, obj, objtmp) \ |
| 1520 | bpf_object__for_each_safe(obj, objtmp) \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1521 | bpf_object__for_each_map(pos, obj) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1522 | |
| 1523 | #define bpf__for_each_map_named(pos, obj, objtmp, name) \ |
| 1524 | bpf__for_each_map(pos, obj, objtmp) \ |
| 1525 | if (bpf_map__name(pos) && \ |
| 1526 | (strcmp(name, \ |
| 1527 | bpf_map__name(pos)) == 0)) |
| 1528 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1529 | struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1530 | { |
| 1531 | struct bpf_map_priv *tmpl_priv = NULL; |
| 1532 | struct bpf_object *obj, *tmp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1533 | struct evsel *evsel = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1534 | struct bpf_map *map; |
| 1535 | int err; |
| 1536 | bool need_init = false; |
| 1537 | |
| 1538 | bpf__for_each_map_named(map, obj, tmp, name) { |
| 1539 | struct bpf_map_priv *priv = bpf_map__priv(map); |
| 1540 | |
| 1541 | if (IS_ERR(priv)) |
| 1542 | return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); |
| 1543 | |
| 1544 | /* |
| 1545 | * No need to check map type: type should have been |
| 1546 | * verified by kernel. |
| 1547 | */ |
| 1548 | if (!need_init && !priv) |
| 1549 | need_init = !priv; |
| 1550 | if (!tmpl_priv && priv) |
| 1551 | tmpl_priv = priv; |
| 1552 | } |
| 1553 | |
| 1554 | if (!need_init) |
| 1555 | return NULL; |
| 1556 | |
| 1557 | if (!tmpl_priv) { |
| 1558 | char *event_definition = NULL; |
| 1559 | |
| 1560 | if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0) |
| 1561 | return ERR_PTR(-ENOMEM); |
| 1562 | |
| 1563 | err = parse_events(evlist, event_definition, NULL); |
| 1564 | free(event_definition); |
| 1565 | |
| 1566 | if (err) { |
| 1567 | pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name); |
| 1568 | return ERR_PTR(-err); |
| 1569 | } |
| 1570 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1571 | evsel = evlist__last(evlist); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1572 | } |
| 1573 | |
| 1574 | bpf__for_each_map_named(map, obj, tmp, name) { |
| 1575 | struct bpf_map_priv *priv = bpf_map__priv(map); |
| 1576 | |
| 1577 | if (IS_ERR(priv)) |
| 1578 | return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); |
| 1579 | if (priv) |
| 1580 | continue; |
| 1581 | |
| 1582 | if (tmpl_priv) { |
| 1583 | priv = bpf_map_priv__clone(tmpl_priv); |
| 1584 | if (!priv) |
| 1585 | return ERR_PTR(-ENOMEM); |
| 1586 | |
| 1587 | err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); |
| 1588 | if (err) { |
| 1589 | bpf_map_priv__clear(map, priv); |
| 1590 | return ERR_PTR(err); |
| 1591 | } |
| 1592 | } else if (evsel) { |
| 1593 | struct bpf_map_op *op; |
| 1594 | |
| 1595 | op = bpf_map__add_newop(map, NULL); |
| 1596 | if (IS_ERR(op)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1597 | return ERR_CAST(op); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1598 | op->op_type = BPF_MAP_OP_SET_EVSEL; |
| 1599 | op->v.evsel = evsel; |
| 1600 | } |
| 1601 | } |
| 1602 | |
| 1603 | return evsel; |
| 1604 | } |
| 1605 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1606 | int bpf__setup_stdout(struct evlist *evlist) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1607 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1608 | struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__"); |
| 1609 | return PTR_ERR_OR_ZERO(evsel); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1610 | } |
| 1611 | |
| 1612 | #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) |
| 1613 | #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) |
| 1614 | #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) |
| 1615 | |
| 1616 | static const char *bpf_loader_strerror_table[NR_ERRNO] = { |
| 1617 | [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", |
| 1618 | [ERRCODE_OFFSET(GROUP)] = "Invalid group name", |
| 1619 | [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", |
| 1620 | [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", |
| 1621 | [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", |
| 1622 | [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string", |
| 1623 | [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue", |
| 1624 | [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program", |
| 1625 | [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue", |
| 1626 | [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option", |
| 1627 | [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')", |
| 1628 | [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option", |
| 1629 | [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist", |
| 1630 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map", |
| 1631 | [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type", |
| 1632 | [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size", |
| 1633 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size", |
| 1634 | [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting", |
| 1635 | [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting", |
| 1636 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large", |
| 1637 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event", |
| 1638 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map", |
| 1639 | [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large", |
| 1640 | }; |
| 1641 | |
| 1642 | static int |
| 1643 | bpf_loader_strerror(int err, char *buf, size_t size) |
| 1644 | { |
| 1645 | char sbuf[STRERR_BUFSIZE]; |
| 1646 | const char *msg; |
| 1647 | |
| 1648 | if (!buf || !size) |
| 1649 | return -1; |
| 1650 | |
| 1651 | err = err > 0 ? err : -err; |
| 1652 | |
| 1653 | if (err >= __LIBBPF_ERRNO__START) |
| 1654 | return libbpf_strerror(err, buf, size); |
| 1655 | |
| 1656 | if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { |
| 1657 | msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; |
| 1658 | snprintf(buf, size, "%s", msg); |
| 1659 | buf[size - 1] = '\0'; |
| 1660 | return 0; |
| 1661 | } |
| 1662 | |
| 1663 | if (err >= __BPF_LOADER_ERRNO__END) |
| 1664 | snprintf(buf, size, "Unknown bpf loader error %d", err); |
| 1665 | else |
| 1666 | snprintf(buf, size, "%s", |
| 1667 | str_error_r(err, sbuf, sizeof(sbuf))); |
| 1668 | |
| 1669 | buf[size - 1] = '\0'; |
| 1670 | return -1; |
| 1671 | } |
| 1672 | |
| 1673 | #define bpf__strerror_head(err, buf, size) \ |
| 1674 | char sbuf[STRERR_BUFSIZE], *emsg;\ |
| 1675 | if (!size)\ |
| 1676 | return 0;\ |
| 1677 | if (err < 0)\ |
| 1678 | err = -err;\ |
| 1679 | bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ |
| 1680 | emsg = sbuf;\ |
| 1681 | switch (err) {\ |
| 1682 | default:\ |
| 1683 | scnprintf(buf, size, "%s", emsg);\ |
| 1684 | break; |
| 1685 | |
| 1686 | #define bpf__strerror_entry(val, fmt...)\ |
| 1687 | case val: {\ |
| 1688 | scnprintf(buf, size, fmt);\ |
| 1689 | break;\ |
| 1690 | } |
| 1691 | |
| 1692 | #define bpf__strerror_end(buf, size)\ |
| 1693 | }\ |
| 1694 | buf[size - 1] = '\0'; |
| 1695 | |
| 1696 | int bpf__strerror_prepare_load(const char *filename, bool source, |
| 1697 | int err, char *buf, size_t size) |
| 1698 | { |
| 1699 | size_t n; |
| 1700 | int ret; |
| 1701 | |
| 1702 | n = snprintf(buf, size, "Failed to load %s%s: ", |
| 1703 | filename, source ? " from source" : ""); |
| 1704 | if (n >= size) { |
| 1705 | buf[size - 1] = '\0'; |
| 1706 | return 0; |
| 1707 | } |
| 1708 | buf += n; |
| 1709 | size -= n; |
| 1710 | |
| 1711 | ret = bpf_loader_strerror(err, buf, size); |
| 1712 | buf[size - 1] = '\0'; |
| 1713 | return ret; |
| 1714 | } |
| 1715 | |
| 1716 | int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, |
| 1717 | int err, char *buf, size_t size) |
| 1718 | { |
| 1719 | bpf__strerror_head(err, buf, size); |
| 1720 | case BPF_LOADER_ERRNO__PROGCONF_TERM: { |
| 1721 | scnprintf(buf, size, "%s (add -v to see detail)", emsg); |
| 1722 | break; |
| 1723 | } |
| 1724 | bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'"); |
| 1725 | bpf__strerror_entry(EACCES, "You need to be root"); |
| 1726 | bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); |
| 1727 | bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); |
| 1728 | bpf__strerror_end(buf, size); |
| 1729 | return 0; |
| 1730 | } |
| 1731 | |
| 1732 | int bpf__strerror_load(struct bpf_object *obj, |
| 1733 | int err, char *buf, size_t size) |
| 1734 | { |
| 1735 | bpf__strerror_head(err, buf, size); |
| 1736 | case LIBBPF_ERRNO__KVER: { |
| 1737 | unsigned int obj_kver = bpf_object__kversion(obj); |
| 1738 | unsigned int real_kver; |
| 1739 | |
| 1740 | if (fetch_kernel_version(&real_kver, NULL, 0)) { |
| 1741 | scnprintf(buf, size, "Unable to fetch kernel version"); |
| 1742 | break; |
| 1743 | } |
| 1744 | |
| 1745 | if (obj_kver != real_kver) { |
| 1746 | scnprintf(buf, size, |
| 1747 | "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", |
| 1748 | KVER_PARAM(obj_kver), |
| 1749 | KVER_PARAM(real_kver)); |
| 1750 | break; |
| 1751 | } |
| 1752 | |
| 1753 | scnprintf(buf, size, "Failed to load program for unknown reason"); |
| 1754 | break; |
| 1755 | } |
| 1756 | bpf__strerror_end(buf, size); |
| 1757 | return 0; |
| 1758 | } |
| 1759 | |
| 1760 | int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, |
| 1761 | struct parse_events_term *term __maybe_unused, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1762 | struct evlist *evlist __maybe_unused, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1763 | int *error_pos __maybe_unused, int err, |
| 1764 | char *buf, size_t size) |
| 1765 | { |
| 1766 | bpf__strerror_head(err, buf, size); |
| 1767 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, |
| 1768 | "Can't use this config term with this map type"); |
| 1769 | bpf__strerror_end(buf, size); |
| 1770 | return 0; |
| 1771 | } |
| 1772 | |
| 1773 | int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) |
| 1774 | { |
| 1775 | bpf__strerror_head(err, buf, size); |
| 1776 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, |
| 1777 | "Cannot set event to BPF map in multi-thread tracing"); |
| 1778 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, |
| 1779 | "%s (Hint: use -i to turn off inherit)", emsg); |
| 1780 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, |
| 1781 | "Can only put raw, hardware and BPF output event into a BPF map"); |
| 1782 | bpf__strerror_end(buf, size); |
| 1783 | return 0; |
| 1784 | } |
| 1785 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1786 | int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1787 | int err, char *buf, size_t size) |
| 1788 | { |
| 1789 | bpf__strerror_head(err, buf, size); |
| 1790 | bpf__strerror_end(buf, size); |
| 1791 | return 0; |
| 1792 | } |