David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3 | */ |
| 4 | #include <linux/bpf.h> |
| 5 | #include <linux/rcupdate.h> |
| 6 | #include <linux/random.h> |
| 7 | #include <linux/smp.h> |
| 8 | #include <linux/topology.h> |
| 9 | #include <linux/ktime.h> |
| 10 | #include <linux/sched.h> |
| 11 | #include <linux/uidgid.h> |
| 12 | #include <linux/filter.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 13 | #include <linux/ctype.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 14 | #include <linux/jiffies.h> |
| 15 | #include <linux/pid_namespace.h> |
| 16 | #include <linux/proc_ns.h> |
| 17 | #include <linux/security.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 18 | |
| 19 | #include "../../lib/kstrtox.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 20 | |
| 21 | /* If kernel subsystem is allowing eBPF programs to call this function, |
| 22 | * inside its own verifier_ops->get_func_proto() callback it should return |
| 23 | * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments |
| 24 | * |
| 25 | * Different map implementations will rely on rcu in map methods |
| 26 | * lookup/update/delete, therefore eBPF programs must run under rcu lock |
| 27 | * if program is allowed to access maps, so check rcu_read_lock_held in |
| 28 | * all three functions. |
| 29 | */ |
| 30 | BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) |
| 31 | { |
| 32 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 33 | return (unsigned long) map->ops->map_lookup_elem(map, key); |
| 34 | } |
| 35 | |
| 36 | const struct bpf_func_proto bpf_map_lookup_elem_proto = { |
| 37 | .func = bpf_map_lookup_elem, |
| 38 | .gpl_only = false, |
| 39 | .pkt_access = true, |
| 40 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, |
| 41 | .arg1_type = ARG_CONST_MAP_PTR, |
| 42 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
| 43 | }; |
| 44 | |
| 45 | BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, |
| 46 | void *, value, u64, flags) |
| 47 | { |
| 48 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 49 | return map->ops->map_update_elem(map, key, value, flags); |
| 50 | } |
| 51 | |
| 52 | const struct bpf_func_proto bpf_map_update_elem_proto = { |
| 53 | .func = bpf_map_update_elem, |
| 54 | .gpl_only = false, |
| 55 | .pkt_access = true, |
| 56 | .ret_type = RET_INTEGER, |
| 57 | .arg1_type = ARG_CONST_MAP_PTR, |
| 58 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
| 59 | .arg3_type = ARG_PTR_TO_MAP_VALUE, |
| 60 | .arg4_type = ARG_ANYTHING, |
| 61 | }; |
| 62 | |
| 63 | BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) |
| 64 | { |
| 65 | WARN_ON_ONCE(!rcu_read_lock_held()); |
| 66 | return map->ops->map_delete_elem(map, key); |
| 67 | } |
| 68 | |
| 69 | const struct bpf_func_proto bpf_map_delete_elem_proto = { |
| 70 | .func = bpf_map_delete_elem, |
| 71 | .gpl_only = false, |
| 72 | .pkt_access = true, |
| 73 | .ret_type = RET_INTEGER, |
| 74 | .arg1_type = ARG_CONST_MAP_PTR, |
| 75 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
| 76 | }; |
| 77 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 78 | BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) |
| 79 | { |
| 80 | return map->ops->map_push_elem(map, value, flags); |
| 81 | } |
| 82 | |
| 83 | const struct bpf_func_proto bpf_map_push_elem_proto = { |
| 84 | .func = bpf_map_push_elem, |
| 85 | .gpl_only = false, |
| 86 | .pkt_access = true, |
| 87 | .ret_type = RET_INTEGER, |
| 88 | .arg1_type = ARG_CONST_MAP_PTR, |
| 89 | .arg2_type = ARG_PTR_TO_MAP_VALUE, |
| 90 | .arg3_type = ARG_ANYTHING, |
| 91 | }; |
| 92 | |
| 93 | BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) |
| 94 | { |
| 95 | return map->ops->map_pop_elem(map, value); |
| 96 | } |
| 97 | |
| 98 | const struct bpf_func_proto bpf_map_pop_elem_proto = { |
| 99 | .func = bpf_map_pop_elem, |
| 100 | .gpl_only = false, |
| 101 | .ret_type = RET_INTEGER, |
| 102 | .arg1_type = ARG_CONST_MAP_PTR, |
| 103 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, |
| 104 | }; |
| 105 | |
| 106 | BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) |
| 107 | { |
| 108 | return map->ops->map_peek_elem(map, value); |
| 109 | } |
| 110 | |
| 111 | const struct bpf_func_proto bpf_map_peek_elem_proto = { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 112 | .func = bpf_map_peek_elem, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 113 | .gpl_only = false, |
| 114 | .ret_type = RET_INTEGER, |
| 115 | .arg1_type = ARG_CONST_MAP_PTR, |
| 116 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, |
| 117 | }; |
| 118 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 119 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
| 120 | .func = bpf_user_rnd_u32, |
| 121 | .gpl_only = false, |
| 122 | .ret_type = RET_INTEGER, |
| 123 | }; |
| 124 | |
| 125 | BPF_CALL_0(bpf_get_smp_processor_id) |
| 126 | { |
| 127 | return smp_processor_id(); |
| 128 | } |
| 129 | |
| 130 | const struct bpf_func_proto bpf_get_smp_processor_id_proto = { |
| 131 | .func = bpf_get_smp_processor_id, |
| 132 | .gpl_only = false, |
| 133 | .ret_type = RET_INTEGER, |
| 134 | }; |
| 135 | |
| 136 | BPF_CALL_0(bpf_get_numa_node_id) |
| 137 | { |
| 138 | return numa_node_id(); |
| 139 | } |
| 140 | |
| 141 | const struct bpf_func_proto bpf_get_numa_node_id_proto = { |
| 142 | .func = bpf_get_numa_node_id, |
| 143 | .gpl_only = false, |
| 144 | .ret_type = RET_INTEGER, |
| 145 | }; |
| 146 | |
| 147 | BPF_CALL_0(bpf_ktime_get_ns) |
| 148 | { |
| 149 | /* NMI safe access to clock monotonic */ |
| 150 | return ktime_get_mono_fast_ns(); |
| 151 | } |
| 152 | |
| 153 | const struct bpf_func_proto bpf_ktime_get_ns_proto = { |
| 154 | .func = bpf_ktime_get_ns, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 155 | .gpl_only = false, |
| 156 | .ret_type = RET_INTEGER, |
| 157 | }; |
| 158 | |
| 159 | BPF_CALL_0(bpf_ktime_get_boot_ns) |
| 160 | { |
| 161 | /* NMI safe access to clock boottime */ |
| 162 | return ktime_get_boot_fast_ns(); |
| 163 | } |
| 164 | |
| 165 | const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { |
| 166 | .func = bpf_ktime_get_boot_ns, |
| 167 | .gpl_only = false, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 168 | .ret_type = RET_INTEGER, |
| 169 | }; |
| 170 | |
| 171 | BPF_CALL_0(bpf_get_current_pid_tgid) |
| 172 | { |
| 173 | struct task_struct *task = current; |
| 174 | |
| 175 | if (unlikely(!task)) |
| 176 | return -EINVAL; |
| 177 | |
| 178 | return (u64) task->tgid << 32 | task->pid; |
| 179 | } |
| 180 | |
| 181 | const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { |
| 182 | .func = bpf_get_current_pid_tgid, |
| 183 | .gpl_only = false, |
| 184 | .ret_type = RET_INTEGER, |
| 185 | }; |
| 186 | |
| 187 | BPF_CALL_0(bpf_get_current_uid_gid) |
| 188 | { |
| 189 | struct task_struct *task = current; |
| 190 | kuid_t uid; |
| 191 | kgid_t gid; |
| 192 | |
| 193 | if (unlikely(!task)) |
| 194 | return -EINVAL; |
| 195 | |
| 196 | current_uid_gid(&uid, &gid); |
| 197 | return (u64) from_kgid(&init_user_ns, gid) << 32 | |
| 198 | from_kuid(&init_user_ns, uid); |
| 199 | } |
| 200 | |
| 201 | const struct bpf_func_proto bpf_get_current_uid_gid_proto = { |
| 202 | .func = bpf_get_current_uid_gid, |
| 203 | .gpl_only = false, |
| 204 | .ret_type = RET_INTEGER, |
| 205 | }; |
| 206 | |
| 207 | BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) |
| 208 | { |
| 209 | struct task_struct *task = current; |
| 210 | |
| 211 | if (unlikely(!task)) |
| 212 | goto err_clear; |
| 213 | |
| 214 | strncpy(buf, task->comm, size); |
| 215 | |
| 216 | /* Verifier guarantees that size > 0. For task->comm exceeding |
| 217 | * size, guarantee that buf is %NUL-terminated. Unconditionally |
| 218 | * done here to save the size test. |
| 219 | */ |
| 220 | buf[size - 1] = 0; |
| 221 | return 0; |
| 222 | err_clear: |
| 223 | memset(buf, 0, size); |
| 224 | return -EINVAL; |
| 225 | } |
| 226 | |
| 227 | const struct bpf_func_proto bpf_get_current_comm_proto = { |
| 228 | .func = bpf_get_current_comm, |
| 229 | .gpl_only = false, |
| 230 | .ret_type = RET_INTEGER, |
| 231 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
| 232 | .arg2_type = ARG_CONST_SIZE, |
| 233 | }; |
| 234 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 235 | #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) |
| 236 | |
| 237 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
| 238 | { |
| 239 | arch_spinlock_t *l = (void *)lock; |
| 240 | union { |
| 241 | __u32 val; |
| 242 | arch_spinlock_t lock; |
| 243 | } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; |
| 244 | |
| 245 | compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); |
| 246 | BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); |
| 247 | BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); |
| 248 | arch_spin_lock(l); |
| 249 | } |
| 250 | |
| 251 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
| 252 | { |
| 253 | arch_spinlock_t *l = (void *)lock; |
| 254 | |
| 255 | arch_spin_unlock(l); |
| 256 | } |
| 257 | |
| 258 | #else |
| 259 | |
| 260 | static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) |
| 261 | { |
| 262 | atomic_t *l = (void *)lock; |
| 263 | |
| 264 | BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); |
| 265 | do { |
| 266 | atomic_cond_read_relaxed(l, !VAL); |
| 267 | } while (atomic_xchg(l, 1)); |
| 268 | } |
| 269 | |
| 270 | static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) |
| 271 | { |
| 272 | atomic_t *l = (void *)lock; |
| 273 | |
| 274 | atomic_set_release(l, 0); |
| 275 | } |
| 276 | |
| 277 | #endif |
| 278 | |
| 279 | static DEFINE_PER_CPU(unsigned long, irqsave_flags); |
| 280 | |
| 281 | notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) |
| 282 | { |
| 283 | unsigned long flags; |
| 284 | |
| 285 | local_irq_save(flags); |
| 286 | __bpf_spin_lock(lock); |
| 287 | __this_cpu_write(irqsave_flags, flags); |
| 288 | return 0; |
| 289 | } |
| 290 | |
| 291 | const struct bpf_func_proto bpf_spin_lock_proto = { |
| 292 | .func = bpf_spin_lock, |
| 293 | .gpl_only = false, |
| 294 | .ret_type = RET_VOID, |
| 295 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
| 296 | }; |
| 297 | |
| 298 | notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) |
| 299 | { |
| 300 | unsigned long flags; |
| 301 | |
| 302 | flags = __this_cpu_read(irqsave_flags); |
| 303 | __bpf_spin_unlock(lock); |
| 304 | local_irq_restore(flags); |
| 305 | return 0; |
| 306 | } |
| 307 | |
| 308 | const struct bpf_func_proto bpf_spin_unlock_proto = { |
| 309 | .func = bpf_spin_unlock, |
| 310 | .gpl_only = false, |
| 311 | .ret_type = RET_VOID, |
| 312 | .arg1_type = ARG_PTR_TO_SPIN_LOCK, |
| 313 | }; |
| 314 | |
| 315 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
| 316 | bool lock_src) |
| 317 | { |
| 318 | struct bpf_spin_lock *lock; |
| 319 | |
| 320 | if (lock_src) |
| 321 | lock = src + map->spin_lock_off; |
| 322 | else |
| 323 | lock = dst + map->spin_lock_off; |
| 324 | preempt_disable(); |
| 325 | ____bpf_spin_lock(lock); |
| 326 | copy_map_value(map, dst, src); |
| 327 | ____bpf_spin_unlock(lock); |
| 328 | preempt_enable(); |
| 329 | } |
| 330 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 331 | BPF_CALL_0(bpf_jiffies64) |
| 332 | { |
| 333 | return get_jiffies_64(); |
| 334 | } |
| 335 | |
| 336 | const struct bpf_func_proto bpf_jiffies64_proto = { |
| 337 | .func = bpf_jiffies64, |
| 338 | .gpl_only = false, |
| 339 | .ret_type = RET_INTEGER, |
| 340 | }; |
| 341 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 342 | #ifdef CONFIG_CGROUPS |
| 343 | BPF_CALL_0(bpf_get_current_cgroup_id) |
| 344 | { |
| 345 | struct cgroup *cgrp = task_dfl_cgroup(current); |
| 346 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 347 | return cgroup_id(cgrp); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { |
| 351 | .func = bpf_get_current_cgroup_id, |
| 352 | .gpl_only = false, |
| 353 | .ret_type = RET_INTEGER, |
| 354 | }; |
| 355 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 356 | BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) |
| 357 | { |
| 358 | struct cgroup *cgrp = task_dfl_cgroup(current); |
| 359 | struct cgroup *ancestor; |
| 360 | |
| 361 | ancestor = cgroup_ancestor(cgrp, ancestor_level); |
| 362 | if (!ancestor) |
| 363 | return 0; |
| 364 | return cgroup_id(ancestor); |
| 365 | } |
| 366 | |
| 367 | const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { |
| 368 | .func = bpf_get_current_ancestor_cgroup_id, |
| 369 | .gpl_only = false, |
| 370 | .ret_type = RET_INTEGER, |
| 371 | .arg1_type = ARG_ANYTHING, |
| 372 | }; |
| 373 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 374 | #ifdef CONFIG_CGROUP_BPF |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 375 | DECLARE_PER_CPU(struct bpf_cgroup_storage_info, |
| 376 | bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 377 | |
| 378 | BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) |
| 379 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 380 | /* flags argument is not used now, |
| 381 | * but provides an ability to extend the API. |
| 382 | * verifier checks that its value is correct. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 383 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 384 | enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 385 | struct bpf_cgroup_storage *storage = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 386 | void *ptr; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 387 | int i; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 388 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 389 | for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) { |
| 390 | if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) |
| 391 | continue; |
| 392 | |
| 393 | storage = this_cpu_read(bpf_cgroup_storage_info[i].storage[stype]); |
| 394 | break; |
| 395 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 396 | |
| 397 | if (stype == BPF_CGROUP_STORAGE_SHARED) |
| 398 | ptr = &READ_ONCE(storage->buf)->data[0]; |
| 399 | else |
| 400 | ptr = this_cpu_ptr(storage->percpu_buf); |
| 401 | |
| 402 | return (unsigned long)ptr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | const struct bpf_func_proto bpf_get_local_storage_proto = { |
| 406 | .func = bpf_get_local_storage, |
| 407 | .gpl_only = false, |
| 408 | .ret_type = RET_PTR_TO_MAP_VALUE, |
| 409 | .arg1_type = ARG_CONST_MAP_PTR, |
| 410 | .arg2_type = ARG_ANYTHING, |
| 411 | }; |
| 412 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 413 | |
| 414 | #define BPF_STRTOX_BASE_MASK 0x1F |
| 415 | |
| 416 | static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, |
| 417 | unsigned long long *res, bool *is_negative) |
| 418 | { |
| 419 | unsigned int base = flags & BPF_STRTOX_BASE_MASK; |
| 420 | const char *cur_buf = buf; |
| 421 | size_t cur_len = buf_len; |
| 422 | unsigned int consumed; |
| 423 | size_t val_len; |
| 424 | char str[64]; |
| 425 | |
| 426 | if (!buf || !buf_len || !res || !is_negative) |
| 427 | return -EINVAL; |
| 428 | |
| 429 | if (base != 0 && base != 8 && base != 10 && base != 16) |
| 430 | return -EINVAL; |
| 431 | |
| 432 | if (flags & ~BPF_STRTOX_BASE_MASK) |
| 433 | return -EINVAL; |
| 434 | |
| 435 | while (cur_buf < buf + buf_len && isspace(*cur_buf)) |
| 436 | ++cur_buf; |
| 437 | |
| 438 | *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); |
| 439 | if (*is_negative) |
| 440 | ++cur_buf; |
| 441 | |
| 442 | consumed = cur_buf - buf; |
| 443 | cur_len -= consumed; |
| 444 | if (!cur_len) |
| 445 | return -EINVAL; |
| 446 | |
| 447 | cur_len = min(cur_len, sizeof(str) - 1); |
| 448 | memcpy(str, cur_buf, cur_len); |
| 449 | str[cur_len] = '\0'; |
| 450 | cur_buf = str; |
| 451 | |
| 452 | cur_buf = _parse_integer_fixup_radix(cur_buf, &base); |
| 453 | val_len = _parse_integer(cur_buf, base, res); |
| 454 | |
| 455 | if (val_len & KSTRTOX_OVERFLOW) |
| 456 | return -ERANGE; |
| 457 | |
| 458 | if (val_len == 0) |
| 459 | return -EINVAL; |
| 460 | |
| 461 | cur_buf += val_len; |
| 462 | consumed += cur_buf - str; |
| 463 | |
| 464 | return consumed; |
| 465 | } |
| 466 | |
| 467 | static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, |
| 468 | long long *res) |
| 469 | { |
| 470 | unsigned long long _res; |
| 471 | bool is_negative; |
| 472 | int err; |
| 473 | |
| 474 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); |
| 475 | if (err < 0) |
| 476 | return err; |
| 477 | if (is_negative) { |
| 478 | if ((long long)-_res > 0) |
| 479 | return -ERANGE; |
| 480 | *res = -_res; |
| 481 | } else { |
| 482 | if ((long long)_res < 0) |
| 483 | return -ERANGE; |
| 484 | *res = _res; |
| 485 | } |
| 486 | return err; |
| 487 | } |
| 488 | |
| 489 | BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, |
| 490 | long *, res) |
| 491 | { |
| 492 | long long _res; |
| 493 | int err; |
| 494 | |
| 495 | err = __bpf_strtoll(buf, buf_len, flags, &_res); |
| 496 | if (err < 0) |
| 497 | return err; |
| 498 | if (_res != (long)_res) |
| 499 | return -ERANGE; |
| 500 | *res = _res; |
| 501 | return err; |
| 502 | } |
| 503 | |
| 504 | const struct bpf_func_proto bpf_strtol_proto = { |
| 505 | .func = bpf_strtol, |
| 506 | .gpl_only = false, |
| 507 | .ret_type = RET_INTEGER, |
| 508 | .arg1_type = ARG_PTR_TO_MEM, |
| 509 | .arg2_type = ARG_CONST_SIZE, |
| 510 | .arg3_type = ARG_ANYTHING, |
| 511 | .arg4_type = ARG_PTR_TO_LONG, |
| 512 | }; |
| 513 | |
| 514 | BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, |
| 515 | unsigned long *, res) |
| 516 | { |
| 517 | unsigned long long _res; |
| 518 | bool is_negative; |
| 519 | int err; |
| 520 | |
| 521 | err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); |
| 522 | if (err < 0) |
| 523 | return err; |
| 524 | if (is_negative) |
| 525 | return -EINVAL; |
| 526 | if (_res != (unsigned long)_res) |
| 527 | return -ERANGE; |
| 528 | *res = _res; |
| 529 | return err; |
| 530 | } |
| 531 | |
| 532 | const struct bpf_func_proto bpf_strtoul_proto = { |
| 533 | .func = bpf_strtoul, |
| 534 | .gpl_only = false, |
| 535 | .ret_type = RET_INTEGER, |
| 536 | .arg1_type = ARG_PTR_TO_MEM, |
| 537 | .arg2_type = ARG_CONST_SIZE, |
| 538 | .arg3_type = ARG_ANYTHING, |
| 539 | .arg4_type = ARG_PTR_TO_LONG, |
| 540 | }; |
| 541 | #endif |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 542 | |
| 543 | BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, |
| 544 | struct bpf_pidns_info *, nsdata, u32, size) |
| 545 | { |
| 546 | struct task_struct *task = current; |
| 547 | struct pid_namespace *pidns; |
| 548 | int err = -EINVAL; |
| 549 | |
| 550 | if (unlikely(size != sizeof(struct bpf_pidns_info))) |
| 551 | goto clear; |
| 552 | |
| 553 | if (unlikely((u64)(dev_t)dev != dev)) |
| 554 | goto clear; |
| 555 | |
| 556 | if (unlikely(!task)) |
| 557 | goto clear; |
| 558 | |
| 559 | pidns = task_active_pid_ns(task); |
| 560 | if (unlikely(!pidns)) { |
| 561 | err = -ENOENT; |
| 562 | goto clear; |
| 563 | } |
| 564 | |
| 565 | if (!ns_match(&pidns->ns, (dev_t)dev, ino)) |
| 566 | goto clear; |
| 567 | |
| 568 | nsdata->pid = task_pid_nr_ns(task, pidns); |
| 569 | nsdata->tgid = task_tgid_nr_ns(task, pidns); |
| 570 | return 0; |
| 571 | clear: |
| 572 | memset((void *)nsdata, 0, (size_t) size); |
| 573 | return err; |
| 574 | } |
| 575 | |
| 576 | const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { |
| 577 | .func = bpf_get_ns_current_pid_tgid, |
| 578 | .gpl_only = false, |
| 579 | .ret_type = RET_INTEGER, |
| 580 | .arg1_type = ARG_ANYTHING, |
| 581 | .arg2_type = ARG_ANYTHING, |
| 582 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
| 583 | .arg4_type = ARG_CONST_SIZE, |
| 584 | }; |
| 585 | |
| 586 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { |
| 587 | .func = bpf_get_raw_cpu_id, |
| 588 | .gpl_only = false, |
| 589 | .ret_type = RET_INTEGER, |
| 590 | }; |
| 591 | |
| 592 | BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, |
| 593 | u64, flags, void *, data, u64, size) |
| 594 | { |
| 595 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
| 596 | return -EINVAL; |
| 597 | |
| 598 | return bpf_event_output(map, flags, data, size, NULL, 0, NULL); |
| 599 | } |
| 600 | |
| 601 | const struct bpf_func_proto bpf_event_output_data_proto = { |
| 602 | .func = bpf_event_output_data, |
| 603 | .gpl_only = true, |
| 604 | .ret_type = RET_INTEGER, |
| 605 | .arg1_type = ARG_PTR_TO_CTX, |
| 606 | .arg2_type = ARG_CONST_MAP_PTR, |
| 607 | .arg3_type = ARG_ANYTHING, |
| 608 | .arg4_type = ARG_PTR_TO_MEM, |
| 609 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
| 610 | }; |
| 611 | |
| 612 | BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, |
| 613 | const void __user *, user_ptr) |
| 614 | { |
| 615 | int ret = copy_from_user(dst, user_ptr, size); |
| 616 | |
| 617 | if (unlikely(ret)) { |
| 618 | memset(dst, 0, size); |
| 619 | ret = -EFAULT; |
| 620 | } |
| 621 | |
| 622 | return ret; |
| 623 | } |
| 624 | |
| 625 | const struct bpf_func_proto bpf_copy_from_user_proto = { |
| 626 | .func = bpf_copy_from_user, |
| 627 | .gpl_only = false, |
| 628 | .ret_type = RET_INTEGER, |
| 629 | .arg1_type = ARG_PTR_TO_UNINIT_MEM, |
| 630 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
| 631 | .arg3_type = ARG_ANYTHING, |
| 632 | }; |
| 633 | |
| 634 | BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) |
| 635 | { |
| 636 | if (cpu >= nr_cpu_ids) |
| 637 | return (unsigned long)NULL; |
| 638 | |
| 639 | return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); |
| 640 | } |
| 641 | |
| 642 | const struct bpf_func_proto bpf_per_cpu_ptr_proto = { |
| 643 | .func = bpf_per_cpu_ptr, |
| 644 | .gpl_only = false, |
| 645 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, |
| 646 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
| 647 | .arg2_type = ARG_ANYTHING, |
| 648 | }; |
| 649 | |
| 650 | BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) |
| 651 | { |
| 652 | return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); |
| 653 | } |
| 654 | |
| 655 | const struct bpf_func_proto bpf_this_cpu_ptr_proto = { |
| 656 | .func = bpf_this_cpu_ptr, |
| 657 | .gpl_only = false, |
| 658 | .ret_type = RET_PTR_TO_MEM_OR_BTF_ID, |
| 659 | .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, |
| 660 | }; |
| 661 | |
| 662 | const struct bpf_func_proto bpf_get_current_task_proto __weak; |
| 663 | const struct bpf_func_proto bpf_probe_read_user_proto __weak; |
| 664 | const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; |
| 665 | const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; |
| 666 | const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; |
| 667 | |
| 668 | const struct bpf_func_proto * |
| 669 | bpf_base_func_proto(enum bpf_func_id func_id) |
| 670 | { |
| 671 | switch (func_id) { |
| 672 | case BPF_FUNC_map_lookup_elem: |
| 673 | return &bpf_map_lookup_elem_proto; |
| 674 | case BPF_FUNC_map_update_elem: |
| 675 | return &bpf_map_update_elem_proto; |
| 676 | case BPF_FUNC_map_delete_elem: |
| 677 | return &bpf_map_delete_elem_proto; |
| 678 | case BPF_FUNC_map_push_elem: |
| 679 | return &bpf_map_push_elem_proto; |
| 680 | case BPF_FUNC_map_pop_elem: |
| 681 | return &bpf_map_pop_elem_proto; |
| 682 | case BPF_FUNC_map_peek_elem: |
| 683 | return &bpf_map_peek_elem_proto; |
| 684 | case BPF_FUNC_get_prandom_u32: |
| 685 | return &bpf_get_prandom_u32_proto; |
| 686 | case BPF_FUNC_get_smp_processor_id: |
| 687 | return &bpf_get_raw_smp_processor_id_proto; |
| 688 | case BPF_FUNC_get_numa_node_id: |
| 689 | return &bpf_get_numa_node_id_proto; |
| 690 | case BPF_FUNC_tail_call: |
| 691 | return &bpf_tail_call_proto; |
| 692 | case BPF_FUNC_ktime_get_ns: |
| 693 | return &bpf_ktime_get_ns_proto; |
| 694 | case BPF_FUNC_ktime_get_boot_ns: |
| 695 | return &bpf_ktime_get_boot_ns_proto; |
| 696 | case BPF_FUNC_ringbuf_output: |
| 697 | return &bpf_ringbuf_output_proto; |
| 698 | case BPF_FUNC_ringbuf_reserve: |
| 699 | return &bpf_ringbuf_reserve_proto; |
| 700 | case BPF_FUNC_ringbuf_submit: |
| 701 | return &bpf_ringbuf_submit_proto; |
| 702 | case BPF_FUNC_ringbuf_discard: |
| 703 | return &bpf_ringbuf_discard_proto; |
| 704 | case BPF_FUNC_ringbuf_query: |
| 705 | return &bpf_ringbuf_query_proto; |
| 706 | default: |
| 707 | break; |
| 708 | } |
| 709 | |
| 710 | if (!bpf_capable()) |
| 711 | return NULL; |
| 712 | |
| 713 | switch (func_id) { |
| 714 | case BPF_FUNC_spin_lock: |
| 715 | return &bpf_spin_lock_proto; |
| 716 | case BPF_FUNC_spin_unlock: |
| 717 | return &bpf_spin_unlock_proto; |
| 718 | case BPF_FUNC_jiffies64: |
| 719 | return &bpf_jiffies64_proto; |
| 720 | case BPF_FUNC_per_cpu_ptr: |
| 721 | return &bpf_per_cpu_ptr_proto; |
| 722 | case BPF_FUNC_this_cpu_ptr: |
| 723 | return &bpf_this_cpu_ptr_proto; |
| 724 | default: |
| 725 | break; |
| 726 | } |
| 727 | |
| 728 | if (!perfmon_capable()) |
| 729 | return NULL; |
| 730 | |
| 731 | switch (func_id) { |
| 732 | case BPF_FUNC_trace_printk: |
| 733 | return bpf_get_trace_printk_proto(); |
| 734 | case BPF_FUNC_get_current_task: |
| 735 | return &bpf_get_current_task_proto; |
| 736 | case BPF_FUNC_probe_read_user: |
| 737 | return &bpf_probe_read_user_proto; |
| 738 | case BPF_FUNC_probe_read_kernel: |
| 739 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
| 740 | NULL : &bpf_probe_read_kernel_proto; |
| 741 | case BPF_FUNC_probe_read_user_str: |
| 742 | return &bpf_probe_read_user_str_proto; |
| 743 | case BPF_FUNC_probe_read_kernel_str: |
| 744 | return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? |
| 745 | NULL : &bpf_probe_read_kernel_str_proto; |
| 746 | case BPF_FUNC_snprintf_btf: |
| 747 | return &bpf_snprintf_btf_proto; |
| 748 | default: |
| 749 | return NULL; |
| 750 | } |
| 751 | } |