blob: 391bc1480dfb1da5142725f623c8813bab937ca6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 */
4#ifndef _LINUX_BPF_VERIFIER_H
5#define _LINUX_BPF_VERIFIER_H 1
6
7#include <linux/bpf.h> /* for enum bpf_reg_type */
8#include <linux/filter.h> /* for MAX_BPF_STACK */
9#include <linux/tnum.h>
10
11/* Maximum variable offset umax_value permitted when resolving memory accesses.
12 * In practice this is far bigger than any realistic pointer offset; this limit
13 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
14 */
15#define BPF_MAX_VAR_OFF (1 << 29)
16/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
17 * that converting umax_value to int cannot overflow.
18 */
19#define BPF_MAX_VAR_SIZ (1 << 29)
20
21/* Liveness marks, used for registers and spilled-regs (in stack slots).
22 * Read marks propagate upwards until they find a write mark; they record that
23 * "one of this state's descendants read this reg" (and therefore the reg is
24 * relevant for states_equal() checks).
25 * Write marks collect downwards and do not propagate; they record that "the
26 * straight-line code that reached this state (from its parent) wrote this reg"
27 * (and therefore that reads propagated from this state or its descendants
28 * should not propagate to its parent).
29 * A state with a write mark can receive read marks; it just won't propagate
30 * them to its parent, since the write mark is a property, not of the state,
31 * but of the link between it and its parent. See mark_reg_read() and
32 * mark_stack_slot_read() in kernel/bpf/verifier.c.
33 */
34enum bpf_reg_liveness {
35 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
David Brazdil0f672f62019-12-10 10:32:29 +000036 REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
37 REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
38 REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
39 REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
40 REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000041};
42
43struct bpf_reg_state {
David Brazdil0f672f62019-12-10 10:32:29 +000044 /* Ordering of fields matters. See states_equal() */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045 enum bpf_reg_type type;
46 union {
47 /* valid when type == PTR_TO_PACKET */
48 u16 range;
49
50 /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
51 * PTR_TO_MAP_VALUE_OR_NULL
52 */
53 struct bpf_map *map_ptr;
54
Olivier Deprez157378f2022-04-04 15:47:50 +020055 u32 btf_id; /* for PTR_TO_BTF_ID */
56
57 u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
58
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059 /* Max size from any of the above. */
60 unsigned long raw;
61 };
62 /* Fixed part of pointer offset, pointer types only */
63 s32 off;
64 /* For PTR_TO_PACKET, used to find other pointers with the same variable
65 * offset, so they can share range knowledge.
66 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
67 * came from, when one is tested for != NULL.
Olivier Deprez157378f2022-04-04 15:47:50 +020068 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
69 * for the purpose of tracking that it's freed.
David Brazdil0f672f62019-12-10 10:32:29 +000070 * For PTR_TO_SOCKET this is used to share which pointers retain the
71 * same reference to the socket, to determine proper reference freeing.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072 */
73 u32 id;
David Brazdil0f672f62019-12-10 10:32:29 +000074 /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
75 * from a pointer-cast helper, bpf_sk_fullsock() and
76 * bpf_tcp_sock().
77 *
78 * Consider the following where "sk" is a reference counted
79 * pointer returned from "sk = bpf_sk_lookup_tcp();":
80 *
81 * 1: sk = bpf_sk_lookup_tcp();
82 * 2: if (!sk) { return 0; }
83 * 3: fullsock = bpf_sk_fullsock(sk);
84 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
85 * 5: tp = bpf_tcp_sock(fullsock);
86 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
87 * 7: bpf_sk_release(sk);
88 * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
89 *
90 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
91 * "tp" ptr should be invalidated also. In order to do that,
92 * the reg holding "fullsock" and "sk" need to remember
93 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
94 * such that the verifier can reset all regs which have
95 * ref_obj_id matching the sk_reg->id.
96 *
97 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
98 * sk_reg->id will stay as NULL-marking purpose only.
99 * After NULL-marking is done, sk_reg->id can be reset to 0.
100 *
101 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
102 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
103 *
104 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
105 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
106 * which is the same as sk_reg->ref_obj_id.
107 *
108 * From the verifier perspective, if sk, fullsock and tp
109 * are not NULL, they are the same ptr with different
110 * reg->type. In particular, bpf_sk_release(tp) is also
111 * allowed and has the same effect as bpf_sk_release(sk).
112 */
113 u32 ref_obj_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000114 /* For scalar types (SCALAR_VALUE), this represents our knowledge of
115 * the actual value.
116 * For pointer types, this represents the variable part of the offset
117 * from the pointed-to object, and is shared with all bpf_reg_states
118 * with the same id as us.
119 */
120 struct tnum var_off;
121 /* Used to determine if any memory access using this register will
122 * result in a bad access.
123 * These refer to the same value as var_off, not necessarily the actual
124 * contents of the register.
125 */
126 s64 smin_value; /* minimum possible (s64)value */
127 s64 smax_value; /* maximum possible (s64)value */
128 u64 umin_value; /* minimum possible (u64)value */
129 u64 umax_value; /* maximum possible (u64)value */
Olivier Deprez157378f2022-04-04 15:47:50 +0200130 s32 s32_min_value; /* minimum possible (s32)value */
131 s32 s32_max_value; /* maximum possible (s32)value */
132 u32 u32_min_value; /* minimum possible (u32)value */
133 u32 u32_max_value; /* maximum possible (u32)value */
David Brazdil0f672f62019-12-10 10:32:29 +0000134 /* parentage chain for liveness checking */
135 struct bpf_reg_state *parent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136 /* Inside the callee two registers can be both PTR_TO_STACK like
137 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
138 * while another to the caller's stack. To differentiate them 'frameno'
139 * is used which is an index in bpf_verifier_state->frame[] array
140 * pointing to bpf_func_state.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000141 */
142 u32 frameno;
David Brazdil0f672f62019-12-10 10:32:29 +0000143 /* Tracks subreg definition. The stored value is the insn_idx of the
144 * writing insn. This is safe because subreg_def is used before any insn
145 * patching which only happens after main verification finished.
146 */
147 s32 subreg_def;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000148 enum bpf_reg_liveness live;
David Brazdil0f672f62019-12-10 10:32:29 +0000149 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
150 bool precise;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151};
152
153enum bpf_stack_slot_type {
154 STACK_INVALID, /* nothing was stored in this stack slot */
155 STACK_SPILL, /* register spilled into stack */
156 STACK_MISC, /* BPF program wrote some data into this slot */
157 STACK_ZERO, /* BPF program wrote constant zero */
158};
159
160#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
161
162struct bpf_stack_state {
163 struct bpf_reg_state spilled_ptr;
164 u8 slot_type[BPF_REG_SIZE];
165};
166
David Brazdil0f672f62019-12-10 10:32:29 +0000167struct bpf_reference_state {
168 /* Track each reference created with a unique id, even if the same
169 * instruction creates the reference multiple times (eg, via CALL).
170 */
171 int id;
172 /* Instruction where the allocation of this reference occurred. This
173 * is used purely to inform the user of a reference leak.
174 */
175 int insn_idx;
176};
177
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178/* state of the program:
179 * type of all registers and stack info
180 */
181struct bpf_func_state {
182 struct bpf_reg_state regs[MAX_BPF_REG];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 /* index of call instruction that called into this func */
184 int callsite;
185 /* stack frame number of this function state from pov of
186 * enclosing bpf_verifier_state.
187 * 0 = main function, 1 = first callee.
188 */
189 u32 frameno;
Olivier Deprez157378f2022-04-04 15:47:50 +0200190 /* subprog number == index within subprog_info
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191 * zero == main subprog
192 */
193 u32 subprogno;
194
David Brazdil0f672f62019-12-10 10:32:29 +0000195 /* The following fields should be last. See copy_func_state() */
196 int acquired_refs;
197 struct bpf_reference_state *refs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000198 int allocated_stack;
199 struct bpf_stack_state *stack;
200};
201
David Brazdil0f672f62019-12-10 10:32:29 +0000202struct bpf_idx_pair {
203 u32 prev_idx;
204 u32 idx;
205};
206
Olivier Deprez0e641232021-09-23 10:07:05 +0200207struct bpf_id_pair {
208 u32 old;
209 u32 cur;
210};
211
212/* Maximum number of register states that can exist at once */
213#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000214#define MAX_CALL_FRAMES 8
215struct bpf_verifier_state {
216 /* call stack tracking */
217 struct bpf_func_state *frame[MAX_CALL_FRAMES];
218 struct bpf_verifier_state *parent;
David Brazdil0f672f62019-12-10 10:32:29 +0000219 /*
220 * 'branches' field is the number of branches left to explore:
221 * 0 - all possible paths from this state reached bpf_exit or
222 * were safely pruned
223 * 1 - at least one path is being explored.
224 * This state hasn't reached bpf_exit
225 * 2 - at least two paths are being explored.
226 * This state is an immediate parent of two children.
227 * One is fallthrough branch with branches==1 and another
228 * state is pushed into stack (to be explored later) also with
229 * branches==1. The parent of this state has branches==1.
230 * The verifier state tree connected via 'parent' pointer looks like:
231 * 1
232 * 1
233 * 2 -> 1 (first 'if' pushed into stack)
234 * 1
235 * 2 -> 1 (second 'if' pushed into stack)
236 * 1
237 * 1
238 * 1 bpf_exit.
239 *
240 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
241 * and the verifier state tree will look:
242 * 1
243 * 1
244 * 2 -> 1 (first 'if' pushed into stack)
245 * 1
246 * 1 -> 1 (second 'if' pushed into stack)
247 * 0
248 * 0
249 * 0 bpf_exit.
250 * After pop_stack() the do_check() will resume at second 'if'.
251 *
252 * If is_state_visited() sees a state with branches > 0 it means
253 * there is a loop. If such state is exactly equal to the current state
254 * it's an infinite loop. Note states_equal() checks for states
255 * equvalency, so two states being 'states_equal' does not mean
256 * infinite loop. The exact comparison is provided by
257 * states_maybe_looping() function. It's a stronger pre-check and
258 * much faster than states_equal().
259 *
260 * This algorithm may not find all possible infinite loops or
261 * loop iteration count may be too high.
262 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
263 */
264 u32 branches;
265 u32 insn_idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266 u32 curframe;
David Brazdil0f672f62019-12-10 10:32:29 +0000267 u32 active_spin_lock;
268 bool speculative;
269
270 /* first and last insn idx of this verifier state */
271 u32 first_insn_idx;
272 u32 last_insn_idx;
273 /* jmp history recorded from first to last.
274 * backtracking is using it to go from last to first.
275 * For most states jmp_history_cnt is [0-3].
276 * For loops can go up to ~40.
277 */
278 struct bpf_idx_pair *jmp_history;
279 u32 jmp_history_cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280};
281
David Brazdil0f672f62019-12-10 10:32:29 +0000282#define bpf_get_spilled_reg(slot, frame) \
283 (((slot < frame->allocated_stack / BPF_REG_SIZE) && \
284 (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
285 ? &frame->stack[slot].spilled_ptr : NULL)
286
287/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
288#define bpf_for_each_spilled_reg(iter, frame, reg) \
289 for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
290 iter < frame->allocated_stack / BPF_REG_SIZE; \
291 iter++, reg = bpf_get_spilled_reg(iter, frame))
292
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293/* linked list of verifier states used to prune search */
294struct bpf_verifier_state_list {
295 struct bpf_verifier_state state;
296 struct bpf_verifier_state_list *next;
David Brazdil0f672f62019-12-10 10:32:29 +0000297 int miss_cnt, hit_cnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298};
299
David Brazdil0f672f62019-12-10 10:32:29 +0000300/* Possible states for alu_state member. */
Olivier Deprez0e641232021-09-23 10:07:05 +0200301#define BPF_ALU_SANITIZE_SRC (1U << 0)
302#define BPF_ALU_SANITIZE_DST (1U << 1)
David Brazdil0f672f62019-12-10 10:32:29 +0000303#define BPF_ALU_NEG_VALUE (1U << 2)
304#define BPF_ALU_NON_POINTER (1U << 3)
Olivier Deprez0e641232021-09-23 10:07:05 +0200305#define BPF_ALU_IMMEDIATE (1U << 4)
David Brazdil0f672f62019-12-10 10:32:29 +0000306#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
307 BPF_ALU_SANITIZE_DST)
308
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000309struct bpf_insn_aux_data {
310 union {
311 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
Olivier Deprez157378f2022-04-04 15:47:50 +0200312 unsigned long map_ptr_state; /* pointer/poison value for maps */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000313 s32 call_imm; /* saved imm field of call insn */
David Brazdil0f672f62019-12-10 10:32:29 +0000314 u32 alu_limit; /* limit for add/sub register with pointer */
315 struct {
316 u32 map_index; /* index into used_maps[] */
317 u32 map_off; /* offset from value base address */
318 };
Olivier Deprez157378f2022-04-04 15:47:50 +0200319 struct {
320 enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
321 union {
322 u32 btf_id; /* btf_id for struct typed var */
323 u32 mem_size; /* mem_size for non-struct typed var */
324 };
325 } btf_var;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 };
Olivier Deprez157378f2022-04-04 15:47:50 +0200327 u64 map_key_state; /* constant (32 bit) key tracking for maps */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000328 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200329 u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
Olivier Deprez0e641232021-09-23 10:07:05 +0200330 bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
David Brazdil0f672f62019-12-10 10:32:29 +0000331 bool zext_dst; /* this insn zero extends dst reg */
332 u8 alu_state; /* used in combination with alu_limit */
Olivier Deprez157378f2022-04-04 15:47:50 +0200333
334 /* below fields are initialized once */
David Brazdil0f672f62019-12-10 10:32:29 +0000335 unsigned int orig_idx; /* original instruction index */
Olivier Deprez157378f2022-04-04 15:47:50 +0200336 bool prune_point;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000337};
338
339#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
340
341#define BPF_VERIFIER_TMP_LOG_SIZE 1024
342
343struct bpf_verifier_log {
344 u32 level;
345 char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
346 char __user *ubuf;
347 u32 len_used;
348 u32 len_total;
349};
350
351static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
352{
353 return log->len_used >= log->len_total - 1;
354}
355
David Brazdil0f672f62019-12-10 10:32:29 +0000356#define BPF_LOG_LEVEL1 1
357#define BPF_LOG_LEVEL2 2
358#define BPF_LOG_STATS 4
359#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
360#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
Olivier Deprez157378f2022-04-04 15:47:50 +0200361#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
David Brazdil0f672f62019-12-10 10:32:29 +0000362
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000363static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
364{
Olivier Deprez157378f2022-04-04 15:47:50 +0200365 return log &&
366 ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
367 log->level == BPF_LOG_KERNEL);
368}
369
370static inline bool
371bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
372{
373 return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
374 log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000375}
376
377#define BPF_MAX_SUBPROGS 256
378
379struct bpf_subprog_info {
Olivier Deprez157378f2022-04-04 15:47:50 +0200380 /* 'start' has to be the first field otherwise find_subprog() won't work */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000381 u32 start; /* insn idx of function entry point */
David Brazdil0f672f62019-12-10 10:32:29 +0000382 u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383 u16 stack_depth; /* max. stack depth used by this function */
Olivier Deprez0e641232021-09-23 10:07:05 +0200384 bool has_tail_call;
Olivier Deprez157378f2022-04-04 15:47:50 +0200385 bool tail_call_reachable;
386 bool has_ld_abs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000387};
388
389/* single container for all structs
390 * one verifier_env per bpf_check() call
391 */
392struct bpf_verifier_env {
David Brazdil0f672f62019-12-10 10:32:29 +0000393 u32 insn_idx;
394 u32 prev_insn_idx;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000395 struct bpf_prog *prog; /* eBPF program being verified */
396 const struct bpf_verifier_ops *ops;
397 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
398 int stack_size; /* number of states to be processed */
399 bool strict_alignment; /* perform strict pointer alignment checks */
David Brazdil0f672f62019-12-10 10:32:29 +0000400 bool test_state_freq; /* test verifier with different pruning frequency */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000401 struct bpf_verifier_state *cur_state; /* current verifier state */
402 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
David Brazdil0f672f62019-12-10 10:32:29 +0000403 struct bpf_verifier_state_list *free_list;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000404 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
405 u32 used_map_cnt; /* number of used maps */
406 u32 id_gen; /* used to generate unique reg IDs */
Olivier Deprez0e641232021-09-23 10:07:05 +0200407 bool explore_alu_limits;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000408 bool allow_ptr_leaks;
Olivier Deprez157378f2022-04-04 15:47:50 +0200409 bool allow_uninit_stack;
410 bool allow_ptr_to_map_access;
411 bool bpf_capable;
412 bool bypass_spec_v1;
413 bool bypass_spec_v4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414 bool seen_direct_write;
415 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
David Brazdil0f672f62019-12-10 10:32:29 +0000416 const struct bpf_line_info *prev_linfo;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 struct bpf_verifier_log log;
418 struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
Olivier Deprez0e641232021-09-23 10:07:05 +0200419 struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
David Brazdil0f672f62019-12-10 10:32:29 +0000420 struct {
421 int *insn_state;
422 int *insn_stack;
423 int cur_stack;
424 } cfg;
Olivier Deprez157378f2022-04-04 15:47:50 +0200425 u32 pass_cnt; /* number of times do_check() was called */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 u32 subprog_cnt;
David Brazdil0f672f62019-12-10 10:32:29 +0000427 /* number of instructions analyzed by the verifier */
428 u32 prev_insn_processed, insn_processed;
429 /* number of jmps, calls, exits analyzed so far */
430 u32 prev_jmps_processed, jmps_processed;
431 /* total verification time */
432 u64 verification_time;
433 /* maximum number of verifier states kept in 'branching' instructions */
434 u32 max_states_per_insn;
435 /* total number of allocated verifier states */
436 u32 total_states;
437 /* some states are freed during program analysis.
438 * this is peak number of states. this number dominates kernel
439 * memory consumption during verification
440 */
441 u32 peak_states;
442 /* longest register parentage chain walked for liveness marking */
443 u32 longest_mark_read_walk;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444};
445
446__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
447 const char *fmt, va_list args);
448__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
449 const char *fmt, ...);
Olivier Deprez157378f2022-04-04 15:47:50 +0200450__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
451 const char *fmt, ...);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000452
David Brazdil0f672f62019-12-10 10:32:29 +0000453static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000454{
455 struct bpf_verifier_state *cur = env->cur_state;
456
David Brazdil0f672f62019-12-10 10:32:29 +0000457 return cur->frame[cur->curframe];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458}
459
David Brazdil0f672f62019-12-10 10:32:29 +0000460static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
461{
462 return cur_func(env)->regs;
463}
464
465int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
467 int insn_idx, int prev_insn_idx);
David Brazdil0f672f62019-12-10 10:32:29 +0000468int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
469void
470bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
471 struct bpf_insn *insn);
472void
473bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474
Olivier Deprez157378f2022-04-04 15:47:50 +0200475int check_ctx_reg(struct bpf_verifier_env *env,
476 const struct bpf_reg_state *reg, int regno);
477
478/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
479static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
480 u32 btf_id)
481{
482 return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id;
483}
484
485int bpf_check_attach_target(struct bpf_verifier_log *log,
486 const struct bpf_prog *prog,
487 const struct bpf_prog *tgt_prog,
488 u32 btf_id,
489 struct bpf_attach_target_info *tgt_info);
490
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491#endif /* _LINUX_BPF_VERIFIER_H */