blob: fba2ade28fb3a264987745366119ff710b216035 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* Copyright (c) 2016 Facebook
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 */
4#include <linux/bpf.h>
5#include <linux/jhash.h>
6#include <linux/filter.h>
7#include <linux/stacktrace.h>
8#include <linux/perf_event.h>
9#include <linux/elf.h>
10#include <linux/pagemap.h>
11#include <linux/irq_work.h>
12#include "percpu_freelist.h"
13
14#define STACK_CREATE_FLAG_MASK \
15 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
16 BPF_F_STACK_BUILD_ID)
17
18struct stack_map_bucket {
19 struct pcpu_freelist_node fnode;
20 u32 hash;
21 u32 nr;
22 u64 data[];
23};
24
25struct bpf_stack_map {
26 struct bpf_map map;
27 void *elems;
28 struct pcpu_freelist freelist;
29 u32 n_buckets;
30 struct stack_map_bucket *buckets[];
31};
32
33/* irq_work to run up_read() for build_id lookup in nmi context */
34struct stack_map_irq_work {
35 struct irq_work irq_work;
36 struct rw_semaphore *sem;
37};
38
39static void do_up_read(struct irq_work *entry)
40{
41 struct stack_map_irq_work *work;
42
43 work = container_of(entry, struct stack_map_irq_work, irq_work);
David Brazdil0f672f62019-12-10 10:32:29 +000044 up_read_non_owner(work->sem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 work->sem = NULL;
46}
47
48static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
49
50static inline bool stack_map_use_build_id(struct bpf_map *map)
51{
52 return (map->map_flags & BPF_F_STACK_BUILD_ID);
53}
54
55static inline int stack_map_data_size(struct bpf_map *map)
56{
57 return stack_map_use_build_id(map) ?
58 sizeof(struct bpf_stack_build_id) : sizeof(u64);
59}
60
61static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
62{
63 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
64 int err;
65
66 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
67 smap->map.numa_node);
68 if (!smap->elems)
69 return -ENOMEM;
70
71 err = pcpu_freelist_init(&smap->freelist);
72 if (err)
73 goto free_elems;
74
75 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
76 smap->map.max_entries);
77 return 0;
78
79free_elems:
80 bpf_map_area_free(smap->elems);
81 return err;
82}
83
84/* Called from syscall */
85static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
86{
87 u32 value_size = attr->value_size;
88 struct bpf_stack_map *smap;
David Brazdil0f672f62019-12-10 10:32:29 +000089 struct bpf_map_memory mem;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090 u64 cost, n_buckets;
91 int err;
92
93 if (!capable(CAP_SYS_ADMIN))
94 return ERR_PTR(-EPERM);
95
96 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
97 return ERR_PTR(-EINVAL);
98
99 /* check sanity of attributes */
100 if (attr->max_entries == 0 || attr->key_size != 4 ||
101 value_size < 8 || value_size % 8)
102 return ERR_PTR(-EINVAL);
103
104 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
105 if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
106 if (value_size % sizeof(struct bpf_stack_build_id) ||
107 value_size / sizeof(struct bpf_stack_build_id)
108 > sysctl_perf_event_max_stack)
109 return ERR_PTR(-EINVAL);
110 } else if (value_size / 8 > sysctl_perf_event_max_stack)
111 return ERR_PTR(-EINVAL);
112
113 /* hash table size must be power of 2 */
114 n_buckets = roundup_pow_of_two(attr->max_entries);
Olivier Deprez0e641232021-09-23 10:07:05 +0200115 if (!n_buckets)
116 return ERR_PTR(-E2BIG);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000117
118 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
David Brazdil0f672f62019-12-10 10:32:29 +0000119 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
120 err = bpf_map_charge_init(&mem, cost);
121 if (err)
122 return ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123
124 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
David Brazdil0f672f62019-12-10 10:32:29 +0000125 if (!smap) {
126 bpf_map_charge_finish(&mem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 return ERR_PTR(-ENOMEM);
David Brazdil0f672f62019-12-10 10:32:29 +0000128 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000129
130 bpf_map_init_from_attr(&smap->map, attr);
131 smap->map.value_size = value_size;
132 smap->n_buckets = n_buckets;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000133
134 err = get_callchain_buffers(sysctl_perf_event_max_stack);
135 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000136 goto free_charge;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137
138 err = prealloc_elems_and_freelist(smap);
139 if (err)
140 goto put_buffers;
141
David Brazdil0f672f62019-12-10 10:32:29 +0000142 bpf_map_charge_move(&smap->map.memory, &mem);
143
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144 return &smap->map;
145
146put_buffers:
147 put_callchain_buffers();
David Brazdil0f672f62019-12-10 10:32:29 +0000148free_charge:
149 bpf_map_charge_finish(&mem);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150 bpf_map_area_free(smap);
151 return ERR_PTR(err);
152}
153
154#define BPF_BUILD_ID 3
155/*
156 * Parse build id from the note segment. This logic can be shared between
157 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
158 * identical.
159 */
160static inline int stack_map_parse_build_id(void *page_addr,
161 unsigned char *build_id,
162 void *note_start,
163 Elf32_Word note_size)
164{
165 Elf32_Word note_offs = 0, new_offs;
166
167 /* check for overflow */
168 if (note_start < page_addr || note_start + note_size < note_start)
169 return -EINVAL;
170
171 /* only supports note that fits in the first page */
172 if (note_start + note_size > page_addr + PAGE_SIZE)
173 return -EINVAL;
174
175 while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
176 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
177
178 if (nhdr->n_type == BPF_BUILD_ID &&
179 nhdr->n_namesz == sizeof("GNU") &&
David Brazdil0f672f62019-12-10 10:32:29 +0000180 nhdr->n_descsz > 0 &&
181 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182 memcpy(build_id,
183 note_start + note_offs +
184 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
David Brazdil0f672f62019-12-10 10:32:29 +0000185 nhdr->n_descsz);
186 memset(build_id + nhdr->n_descsz, 0,
187 BPF_BUILD_ID_SIZE - nhdr->n_descsz);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000188 return 0;
189 }
190 new_offs = note_offs + sizeof(Elf32_Nhdr) +
191 ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
192 if (new_offs <= note_offs) /* overflow */
193 break;
194 note_offs = new_offs;
195 }
196 return -EINVAL;
197}
198
199/* Parse build ID from 32-bit ELF */
200static int stack_map_get_build_id_32(void *page_addr,
201 unsigned char *build_id)
202{
203 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
204 Elf32_Phdr *phdr;
205 int i;
206
207 /* only supports phdr that fits in one page */
208 if (ehdr->e_phnum >
209 (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
210 return -EINVAL;
211
212 phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
213
214 for (i = 0; i < ehdr->e_phnum; ++i)
215 if (phdr[i].p_type == PT_NOTE)
216 return stack_map_parse_build_id(page_addr, build_id,
217 page_addr + phdr[i].p_offset,
218 phdr[i].p_filesz);
219 return -EINVAL;
220}
221
222/* Parse build ID from 64-bit ELF */
223static int stack_map_get_build_id_64(void *page_addr,
224 unsigned char *build_id)
225{
226 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
227 Elf64_Phdr *phdr;
228 int i;
229
230 /* only supports phdr that fits in one page */
231 if (ehdr->e_phnum >
232 (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
233 return -EINVAL;
234
235 phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
236
237 for (i = 0; i < ehdr->e_phnum; ++i)
238 if (phdr[i].p_type == PT_NOTE)
239 return stack_map_parse_build_id(page_addr, build_id,
240 page_addr + phdr[i].p_offset,
241 phdr[i].p_filesz);
242 return -EINVAL;
243}
244
245/* Parse build ID of ELF file mapped to vma */
246static int stack_map_get_build_id(struct vm_area_struct *vma,
247 unsigned char *build_id)
248{
249 Elf32_Ehdr *ehdr;
250 struct page *page;
251 void *page_addr;
252 int ret;
253
254 /* only works for page backed storage */
255 if (!vma->vm_file)
256 return -EINVAL;
257
258 page = find_get_page(vma->vm_file->f_mapping, 0);
259 if (!page)
260 return -EFAULT; /* page not mapped */
261
262 ret = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +0000263 page_addr = kmap_atomic(page);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264 ehdr = (Elf32_Ehdr *)page_addr;
265
266 /* compare magic x7f "ELF" */
267 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
268 goto out;
269
270 /* only support executable file and shared object file */
271 if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
272 goto out;
273
274 if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
275 ret = stack_map_get_build_id_32(page_addr, build_id);
276 else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
277 ret = stack_map_get_build_id_64(page_addr, build_id);
278out:
David Brazdil0f672f62019-12-10 10:32:29 +0000279 kunmap_atomic(page_addr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 put_page(page);
281 return ret;
282}
283
284static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
285 u64 *ips, u32 trace_nr, bool user)
286{
287 int i;
288 struct vm_area_struct *vma;
289 bool irq_work_busy = false;
290 struct stack_map_irq_work *work = NULL;
291
Olivier Deprez0e641232021-09-23 10:07:05 +0200292 if (irqs_disabled()) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293 work = this_cpu_ptr(&up_read_work);
294 if (work->irq_work.flags & IRQ_WORK_BUSY)
295 /* cannot queue more up_read, fallback */
296 irq_work_busy = true;
297 }
298
299 /*
Olivier Deprez0e641232021-09-23 10:07:05 +0200300 * We cannot do up_read() when the irq is disabled, because of
301 * risk to deadlock with rq_lock. To do build_id lookup when the
302 * irqs are disabled, we need to run up_read() in irq_work. We use
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303 * a percpu variable to do the irq_work. If the irq_work is
304 * already used by another lookup, we fall back to report ips.
305 *
306 * Same fallback is used for kernel stack (!user) on a stackmap
307 * with build_id.
308 */
309 if (!user || !current || !current->mm || irq_work_busy ||
310 down_read_trylock(&current->mm->mmap_sem) == 0) {
311 /* cannot access current->mm, fall back to ips */
312 for (i = 0; i < trace_nr; i++) {
313 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
314 id_offs[i].ip = ips[i];
David Brazdil0f672f62019-12-10 10:32:29 +0000315 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000316 }
317 return;
318 }
319
320 for (i = 0; i < trace_nr; i++) {
321 vma = find_vma(current->mm, ips[i]);
322 if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
323 /* per entry fall back to ips */
324 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
325 id_offs[i].ip = ips[i];
David Brazdil0f672f62019-12-10 10:32:29 +0000326 memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000327 continue;
328 }
329 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
330 - vma->vm_start;
331 id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
332 }
333
334 if (!work) {
335 up_read(&current->mm->mmap_sem);
336 } else {
337 work->sem = &current->mm->mmap_sem;
338 irq_work_queue(&work->irq_work);
David Brazdil0f672f62019-12-10 10:32:29 +0000339 /*
340 * The irq_work will release the mmap_sem with
341 * up_read_non_owner(). The rwsem_release() is called
342 * here to release the lock from lockdep's perspective.
343 */
344 rwsem_release(&current->mm->mmap_sem.dep_map, 1, _RET_IP_);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000345 }
346}
347
348BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
349 u64, flags)
350{
351 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
352 struct perf_callchain_entry *trace;
353 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
354 u32 max_depth = map->value_size / stack_map_data_size(map);
355 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
356 u32 init_nr = sysctl_perf_event_max_stack - max_depth;
357 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
358 u32 hash, id, trace_nr, trace_len;
359 bool user = flags & BPF_F_USER_STACK;
360 bool kernel = !user;
361 u64 *ips;
362 bool hash_matches;
363
364 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
365 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
366 return -EINVAL;
367
368 trace = get_perf_callchain(regs, init_nr, kernel, user,
369 sysctl_perf_event_max_stack, false, false);
370
371 if (unlikely(!trace))
372 /* couldn't fetch the stack trace */
373 return -EFAULT;
374
375 /* get_perf_callchain() guarantees that trace->nr >= init_nr
376 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
377 */
378 trace_nr = trace->nr - init_nr;
379
380 if (trace_nr <= skip)
381 /* skipping more than usable stack trace */
382 return -EFAULT;
383
384 trace_nr -= skip;
385 trace_len = trace_nr * sizeof(u64);
386 ips = trace->ip + skip + init_nr;
387 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
388 id = hash & (smap->n_buckets - 1);
389 bucket = READ_ONCE(smap->buckets[id]);
390
391 hash_matches = bucket && bucket->hash == hash;
392 /* fast cmp */
393 if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
394 return id;
395
396 if (stack_map_use_build_id(map)) {
397 /* for build_id+offset, pop a bucket before slow cmp */
398 new_bucket = (struct stack_map_bucket *)
399 pcpu_freelist_pop(&smap->freelist);
400 if (unlikely(!new_bucket))
401 return -ENOMEM;
402 new_bucket->nr = trace_nr;
403 stack_map_get_build_id_offset(
404 (struct bpf_stack_build_id *)new_bucket->data,
405 ips, trace_nr, user);
406 trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
407 if (hash_matches && bucket->nr == trace_nr &&
408 memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
409 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
410 return id;
411 }
412 if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
413 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
414 return -EEXIST;
415 }
416 } else {
417 if (hash_matches && bucket->nr == trace_nr &&
418 memcmp(bucket->data, ips, trace_len) == 0)
419 return id;
420 if (bucket && !(flags & BPF_F_REUSE_STACKID))
421 return -EEXIST;
422
423 new_bucket = (struct stack_map_bucket *)
424 pcpu_freelist_pop(&smap->freelist);
425 if (unlikely(!new_bucket))
426 return -ENOMEM;
427 memcpy(new_bucket->data, ips, trace_len);
428 }
429
430 new_bucket->hash = hash;
431 new_bucket->nr = trace_nr;
432
433 old_bucket = xchg(&smap->buckets[id], new_bucket);
434 if (old_bucket)
435 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
436 return id;
437}
438
439const struct bpf_func_proto bpf_get_stackid_proto = {
440 .func = bpf_get_stackid,
441 .gpl_only = true,
442 .ret_type = RET_INTEGER,
443 .arg1_type = ARG_PTR_TO_CTX,
444 .arg2_type = ARG_CONST_MAP_PTR,
445 .arg3_type = ARG_ANYTHING,
446};
447
448BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
449 u64, flags)
450{
451 u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
452 bool user_build_id = flags & BPF_F_USER_BUILD_ID;
453 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
454 bool user = flags & BPF_F_USER_STACK;
455 struct perf_callchain_entry *trace;
456 bool kernel = !user;
457 int err = -EINVAL;
458 u64 *ips;
459
460 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
461 BPF_F_USER_BUILD_ID)))
462 goto clear;
463 if (kernel && user_build_id)
464 goto clear;
465
466 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
467 : sizeof(u64);
468 if (unlikely(size % elem_size))
469 goto clear;
470
471 num_elem = size / elem_size;
472 if (sysctl_perf_event_max_stack < num_elem)
473 init_nr = 0;
474 else
475 init_nr = sysctl_perf_event_max_stack - num_elem;
476 trace = get_perf_callchain(regs, init_nr, kernel, user,
477 sysctl_perf_event_max_stack, false, false);
478 if (unlikely(!trace))
479 goto err_fault;
480
481 trace_nr = trace->nr - init_nr;
482 if (trace_nr < skip)
483 goto err_fault;
484
485 trace_nr -= skip;
486 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
487 copy_len = trace_nr * elem_size;
488 ips = trace->ip + skip + init_nr;
489 if (user && user_build_id)
490 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
491 else
492 memcpy(buf, ips, copy_len);
493
494 if (size > copy_len)
495 memset(buf + copy_len, 0, size - copy_len);
496 return copy_len;
497
498err_fault:
499 err = -EFAULT;
500clear:
501 memset(buf, 0, size);
502 return err;
503}
504
505const struct bpf_func_proto bpf_get_stack_proto = {
506 .func = bpf_get_stack,
507 .gpl_only = true,
508 .ret_type = RET_INTEGER,
509 .arg1_type = ARG_PTR_TO_CTX,
510 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
511 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
512 .arg4_type = ARG_ANYTHING,
513};
514
515/* Called from eBPF program */
516static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
517{
David Brazdil0f672f62019-12-10 10:32:29 +0000518 return ERR_PTR(-EOPNOTSUPP);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000519}
520
521/* Called from syscall */
522int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
523{
524 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
525 struct stack_map_bucket *bucket, *old_bucket;
526 u32 id = *(u32 *)key, trace_len;
527
528 if (unlikely(id >= smap->n_buckets))
529 return -ENOENT;
530
531 bucket = xchg(&smap->buckets[id], NULL);
532 if (!bucket)
533 return -ENOENT;
534
535 trace_len = bucket->nr * stack_map_data_size(map);
536 memcpy(value, bucket->data, trace_len);
537 memset(value + trace_len, 0, map->value_size - trace_len);
538
539 old_bucket = xchg(&smap->buckets[id], bucket);
540 if (old_bucket)
541 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
542 return 0;
543}
544
545static int stack_map_get_next_key(struct bpf_map *map, void *key,
546 void *next_key)
547{
548 struct bpf_stack_map *smap = container_of(map,
549 struct bpf_stack_map, map);
550 u32 id;
551
552 WARN_ON_ONCE(!rcu_read_lock_held());
553
554 if (!key) {
555 id = 0;
556 } else {
557 id = *(u32 *)key;
558 if (id >= smap->n_buckets || !smap->buckets[id])
559 id = 0;
560 else
561 id++;
562 }
563
564 while (id < smap->n_buckets && !smap->buckets[id])
565 id++;
566
567 if (id >= smap->n_buckets)
568 return -ENOENT;
569
570 *(u32 *)next_key = id;
571 return 0;
572}
573
574static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
575 u64 map_flags)
576{
577 return -EINVAL;
578}
579
580/* Called from syscall or from eBPF program */
581static int stack_map_delete_elem(struct bpf_map *map, void *key)
582{
583 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
584 struct stack_map_bucket *old_bucket;
585 u32 id = *(u32 *)key;
586
587 if (unlikely(id >= smap->n_buckets))
588 return -E2BIG;
589
590 old_bucket = xchg(&smap->buckets[id], NULL);
591 if (old_bucket) {
592 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
593 return 0;
594 } else {
595 return -ENOENT;
596 }
597}
598
599/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
600static void stack_map_free(struct bpf_map *map)
601{
602 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
603
604 /* wait for bpf programs to complete before freeing stack map */
605 synchronize_rcu();
606
607 bpf_map_area_free(smap->elems);
608 pcpu_freelist_destroy(&smap->freelist);
609 bpf_map_area_free(smap);
610 put_callchain_buffers();
611}
612
David Brazdil0f672f62019-12-10 10:32:29 +0000613const struct bpf_map_ops stack_trace_map_ops = {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000614 .map_alloc = stack_map_alloc,
615 .map_free = stack_map_free,
616 .map_get_next_key = stack_map_get_next_key,
617 .map_lookup_elem = stack_map_lookup_elem,
618 .map_update_elem = stack_map_update_elem,
619 .map_delete_elem = stack_map_delete_elem,
620 .map_check_btf = map_check_no_btf,
621};
622
623static int __init stack_map_init(void)
624{
625 int cpu;
626 struct stack_map_irq_work *work;
627
628 for_each_possible_cpu(cpu) {
629 work = per_cpu_ptr(&up_read_work, cpu);
630 init_irq_work(&work->irq_work, do_up_read);
631 }
632 return 0;
633}
634subsys_initcall(stack_map_init);