1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #ifndef _LINUX_BPF_VERIFIER_H
5 #define _LINUX_BPF_VERIFIER_H 1
6 
7 #include <linux/bpf.h> /* for enum bpf_reg_type */
8 #include <linux/btf.h> /* for struct btf and btf_id() */
9 #include <linux/filter.h> /* for MAX_BPF_STACK */
10 #include <linux/tnum.h>
11 
12 /* Maximum variable offset umax_value permitted when resolving memory accesses.
13  * In practice this is far bigger than any realistic pointer offset; this limit
14  * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
15  */
16 #define BPF_MAX_VAR_OFF	(1 << 29)
17 /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO].  This ensures
18  * that converting umax_value to int cannot overflow.
19  */
20 #define BPF_MAX_VAR_SIZ	(1 << 29)
21 /* size of type_str_buf in bpf_verifier. */
22 #define TYPE_STR_BUF_LEN 128
23 
24 /* Liveness marks, used for registers and spilled-regs (in stack slots).
25  * Read marks propagate upwards until they find a write mark; they record that
26  * "one of this state's descendants read this reg" (and therefore the reg is
27  * relevant for states_equal() checks).
28  * Write marks collect downwards and do not propagate; they record that "the
29  * straight-line code that reached this state (from its parent) wrote this reg"
30  * (and therefore that reads propagated from this state or its descendants
31  * should not propagate to its parent).
32  * A state with a write mark can receive read marks; it just won't propagate
33  * them to its parent, since the write mark is a property, not of the state,
34  * but of the link between it and its parent.  See mark_reg_read() and
35  * mark_stack_slot_read() in kernel/bpf/verifier.c.
36  */
37 enum bpf_reg_liveness {
38 	REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
39 	REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */
40 	REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */
41 	REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64,
42 	REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */
43 	REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */
44 };
45 
46 /* For every reg representing a map value or allocated object pointer,
47  * we consider the tuple of (ptr, id) for them to be unique in verifier
48  * context and conside them to not alias each other for the purposes of
49  * tracking lock state.
50  */
51 struct bpf_active_lock {
52 	/* This can either be reg->map_ptr or reg->btf. If ptr is NULL,
53 	 * there's no active lock held, and other fields have no
54 	 * meaning. If non-NULL, it indicates that a lock is held and
55 	 * id member has the reg->id of the register which can be >= 0.
56 	 */
57 	void *ptr;
58 	/* This will be reg->id */
59 	u32 id;
60 };
61 
62 struct bpf_reg_state {
63 	/* Ordering of fields matters.  See states_equal() */
64 	enum bpf_reg_type type;
65 	/* Fixed part of pointer offset, pointer types only */
66 	s32 off;
67 	union {
68 		/* valid when type == PTR_TO_PACKET */
69 		int range;
70 
71 		/* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
72 		 *   PTR_TO_MAP_VALUE_OR_NULL
73 		 */
74 		struct {
75 			struct bpf_map *map_ptr;
76 			/* To distinguish map lookups from outer map
77 			 * the map_uid is non-zero for registers
78 			 * pointing to inner maps.
79 			 */
80 			u32 map_uid;
81 		};
82 
83 		/* for PTR_TO_BTF_ID */
84 		struct {
85 			struct btf *btf;
86 			u32 btf_id;
87 		};
88 
89 		struct { /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */
90 			u32 mem_size;
91 			u32 dynptr_id; /* for dynptr slices */
92 		};
93 
94 		/* For dynptr stack slots */
95 		struct {
96 			enum bpf_dynptr_type type;
97 			/* A dynptr is 16 bytes so it takes up 2 stack slots.
98 			 * We need to track which slot is the first slot
99 			 * to protect against cases where the user may try to
100 			 * pass in an address starting at the second slot of the
101 			 * dynptr.
102 			 */
103 			bool first_slot;
104 		} dynptr;
105 
106 		/* Max size from any of the above. */
107 		struct {
108 			unsigned long raw1;
109 			unsigned long raw2;
110 		} raw;
111 
112 		u32 subprogno; /* for PTR_TO_FUNC */
113 	};
114 	/* For scalar types (SCALAR_VALUE), this represents our knowledge of
115 	 * the actual value.
116 	 * For pointer types, this represents the variable part of the offset
117 	 * from the pointed-to object, and is shared with all bpf_reg_states
118 	 * with the same id as us.
119 	 */
120 	struct tnum var_off;
121 	/* Used to determine if any memory access using this register will
122 	 * result in a bad access.
123 	 * These refer to the same value as var_off, not necessarily the actual
124 	 * contents of the register.
125 	 */
126 	s64 smin_value; /* minimum possible (s64)value */
127 	s64 smax_value; /* maximum possible (s64)value */
128 	u64 umin_value; /* minimum possible (u64)value */
129 	u64 umax_value; /* maximum possible (u64)value */
130 	s32 s32_min_value; /* minimum possible (s32)value */
131 	s32 s32_max_value; /* maximum possible (s32)value */
132 	u32 u32_min_value; /* minimum possible (u32)value */
133 	u32 u32_max_value; /* maximum possible (u32)value */
134 	/* For PTR_TO_PACKET, used to find other pointers with the same variable
135 	 * offset, so they can share range knowledge.
136 	 * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we
137 	 * came from, when one is tested for != NULL.
138 	 * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation
139 	 * for the purpose of tracking that it's freed.
140 	 * For PTR_TO_SOCKET this is used to share which pointers retain the
141 	 * same reference to the socket, to determine proper reference freeing.
142 	 * For stack slots that are dynptrs, this is used to track references to
143 	 * the dynptr to determine proper reference freeing.
144 	 */
145 	u32 id;
146 	/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
147 	 * from a pointer-cast helper, bpf_sk_fullsock() and
148 	 * bpf_tcp_sock().
149 	 *
150 	 * Consider the following where "sk" is a reference counted
151 	 * pointer returned from "sk = bpf_sk_lookup_tcp();":
152 	 *
153 	 * 1: sk = bpf_sk_lookup_tcp();
154 	 * 2: if (!sk) { return 0; }
155 	 * 3: fullsock = bpf_sk_fullsock(sk);
156 	 * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
157 	 * 5: tp = bpf_tcp_sock(fullsock);
158 	 * 6: if (!tp) { bpf_sk_release(sk); return 0; }
159 	 * 7: bpf_sk_release(sk);
160 	 * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
161 	 *
162 	 * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
163 	 * "tp" ptr should be invalidated also.  In order to do that,
164 	 * the reg holding "fullsock" and "sk" need to remember
165 	 * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
166 	 * such that the verifier can reset all regs which have
167 	 * ref_obj_id matching the sk_reg->id.
168 	 *
169 	 * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
170 	 * sk_reg->id will stay as NULL-marking purpose only.
171 	 * After NULL-marking is done, sk_reg->id can be reset to 0.
172 	 *
173 	 * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
174 	 * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
175 	 *
176 	 * After "tp = bpf_tcp_sock(fullsock);" at line 5,
177 	 * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
178 	 * which is the same as sk_reg->ref_obj_id.
179 	 *
180 	 * From the verifier perspective, if sk, fullsock and tp
181 	 * are not NULL, they are the same ptr with different
182 	 * reg->type.  In particular, bpf_sk_release(tp) is also
183 	 * allowed and has the same effect as bpf_sk_release(sk).
184 	 */
185 	u32 ref_obj_id;
186 	/* parentage chain for liveness checking */
187 	struct bpf_reg_state *parent;
188 	/* Inside the callee two registers can be both PTR_TO_STACK like
189 	 * R1=fp-8 and R2=fp-8, but one of them points to this function stack
190 	 * while another to the caller's stack. To differentiate them 'frameno'
191 	 * is used which is an index in bpf_verifier_state->frame[] array
192 	 * pointing to bpf_func_state.
193 	 */
194 	u32 frameno;
195 	/* Tracks subreg definition. The stored value is the insn_idx of the
196 	 * writing insn. This is safe because subreg_def is used before any insn
197 	 * patching which only happens after main verification finished.
198 	 */
199 	s32 subreg_def;
200 	enum bpf_reg_liveness live;
201 	/* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
202 	bool precise;
203 };
204 
205 enum bpf_stack_slot_type {
206 	STACK_INVALID,    /* nothing was stored in this stack slot */
207 	STACK_SPILL,      /* register spilled into stack */
208 	STACK_MISC,	  /* BPF program wrote some data into this slot */
209 	STACK_ZERO,	  /* BPF program wrote constant zero */
210 	/* A dynptr is stored in this stack slot. The type of dynptr
211 	 * is stored in bpf_stack_state->spilled_ptr.dynptr.type
212 	 */
213 	STACK_DYNPTR,
214 };
215 
216 #define BPF_REG_SIZE 8	/* size of eBPF register in bytes */
217 #define BPF_DYNPTR_SIZE		sizeof(struct bpf_dynptr_kern)
218 #define BPF_DYNPTR_NR_SLOTS		(BPF_DYNPTR_SIZE / BPF_REG_SIZE)
219 
220 struct bpf_stack_state {
221 	struct bpf_reg_state spilled_ptr;
222 	u8 slot_type[BPF_REG_SIZE];
223 };
224 
225 struct bpf_reference_state {
226 	/* Track each reference created with a unique id, even if the same
227 	 * instruction creates the reference multiple times (eg, via CALL).
228 	 */
229 	int id;
230 	/* Instruction where the allocation of this reference occurred. This
231 	 * is used purely to inform the user of a reference leak.
232 	 */
233 	int insn_idx;
234 	/* There can be a case like:
235 	 * main (frame 0)
236 	 *  cb (frame 1)
237 	 *   func (frame 3)
238 	 *    cb (frame 4)
239 	 * Hence for frame 4, if callback_ref just stored boolean, it would be
240 	 * impossible to distinguish nested callback refs. Hence store the
241 	 * frameno and compare that to callback_ref in check_reference_leak when
242 	 * exiting a callback function.
243 	 */
244 	int callback_ref;
245 };
246 
247 /* state of the program:
248  * type of all registers and stack info
249  */
250 struct bpf_func_state {
251 	struct bpf_reg_state regs[MAX_BPF_REG];
252 	/* index of call instruction that called into this func */
253 	int callsite;
254 	/* stack frame number of this function state from pov of
255 	 * enclosing bpf_verifier_state.
256 	 * 0 = main function, 1 = first callee.
257 	 */
258 	u32 frameno;
259 	/* subprog number == index within subprog_info
260 	 * zero == main subprog
261 	 */
262 	u32 subprogno;
263 	/* Every bpf_timer_start will increment async_entry_cnt.
264 	 * It's used to distinguish:
265 	 * void foo(void) { for(;;); }
266 	 * void foo(void) { bpf_timer_set_callback(,foo); }
267 	 */
268 	u32 async_entry_cnt;
269 	bool in_callback_fn;
270 	struct tnum callback_ret_range;
271 	bool in_async_callback_fn;
272 
273 	/* The following fields should be last. See copy_func_state() */
274 	int acquired_refs;
275 	struct bpf_reference_state *refs;
276 	int allocated_stack;
277 	struct bpf_stack_state *stack;
278 };
279 
280 struct bpf_idx_pair {
281 	u32 prev_idx;
282 	u32 idx;
283 };
284 
285 struct bpf_id_pair {
286 	u32 old;
287 	u32 cur;
288 };
289 
290 #define MAX_CALL_FRAMES 8
291 /* Maximum number of register states that can exist at once */
292 #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
293 struct bpf_verifier_state {
294 	/* call stack tracking */
295 	struct bpf_func_state *frame[MAX_CALL_FRAMES];
296 	struct bpf_verifier_state *parent;
297 	/*
298 	 * 'branches' field is the number of branches left to explore:
299 	 * 0 - all possible paths from this state reached bpf_exit or
300 	 * were safely pruned
301 	 * 1 - at least one path is being explored.
302 	 * This state hasn't reached bpf_exit
303 	 * 2 - at least two paths are being explored.
304 	 * This state is an immediate parent of two children.
305 	 * One is fallthrough branch with branches==1 and another
306 	 * state is pushed into stack (to be explored later) also with
307 	 * branches==1. The parent of this state has branches==1.
308 	 * The verifier state tree connected via 'parent' pointer looks like:
309 	 * 1
310 	 * 1
311 	 * 2 -> 1 (first 'if' pushed into stack)
312 	 * 1
313 	 * 2 -> 1 (second 'if' pushed into stack)
314 	 * 1
315 	 * 1
316 	 * 1 bpf_exit.
317 	 *
318 	 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
319 	 * and the verifier state tree will look:
320 	 * 1
321 	 * 1
322 	 * 2 -> 1 (first 'if' pushed into stack)
323 	 * 1
324 	 * 1 -> 1 (second 'if' pushed into stack)
325 	 * 0
326 	 * 0
327 	 * 0 bpf_exit.
328 	 * After pop_stack() the do_check() will resume at second 'if'.
329 	 *
330 	 * If is_state_visited() sees a state with branches > 0 it means
331 	 * there is a loop. If such state is exactly equal to the current state
332 	 * it's an infinite loop. Note states_equal() checks for states
333 	 * equivalency, so two states being 'states_equal' does not mean
334 	 * infinite loop. The exact comparison is provided by
335 	 * states_maybe_looping() function. It's a stronger pre-check and
336 	 * much faster than states_equal().
337 	 *
338 	 * This algorithm may not find all possible infinite loops or
339 	 * loop iteration count may be too high.
340 	 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
341 	 */
342 	u32 branches;
343 	u32 insn_idx;
344 	u32 curframe;
345 
346 	struct bpf_active_lock active_lock;
347 	bool speculative;
348 	bool active_rcu_lock;
349 
350 	/* first and last insn idx of this verifier state */
351 	u32 first_insn_idx;
352 	u32 last_insn_idx;
353 	/* jmp history recorded from first to last.
354 	 * backtracking is using it to go from last to first.
355 	 * For most states jmp_history_cnt is [0-3].
356 	 * For loops can go up to ~40.
357 	 */
358 	struct bpf_idx_pair *jmp_history;
359 	u32 jmp_history_cnt;
360 };
361 
362 #define bpf_get_spilled_reg(slot, frame)				\
363 	(((slot < frame->allocated_stack / BPF_REG_SIZE) &&		\
364 	  (frame->stack[slot].slot_type[0] == STACK_SPILL))		\
365 	 ? &frame->stack[slot].spilled_ptr : NULL)
366 
367 /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
368 #define bpf_for_each_spilled_reg(iter, frame, reg)			\
369 	for (iter = 0, reg = bpf_get_spilled_reg(iter, frame);		\
370 	     iter < frame->allocated_stack / BPF_REG_SIZE;		\
371 	     iter++, reg = bpf_get_spilled_reg(iter, frame))
372 
373 /* Invoke __expr over regsiters in __vst, setting __state and __reg */
374 #define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr)   \
375 	({                                                               \
376 		struct bpf_verifier_state *___vstate = __vst;            \
377 		int ___i, ___j;                                          \
378 		for (___i = 0; ___i <= ___vstate->curframe; ___i++) {    \
379 			struct bpf_reg_state *___regs;                   \
380 			__state = ___vstate->frame[___i];                \
381 			___regs = __state->regs;                         \
382 			for (___j = 0; ___j < MAX_BPF_REG; ___j++) {     \
383 				__reg = &___regs[___j];                  \
384 				(void)(__expr);                          \
385 			}                                                \
386 			bpf_for_each_spilled_reg(___j, __state, __reg) { \
387 				if (!__reg)                              \
388 					continue;                        \
389 				(void)(__expr);                          \
390 			}                                                \
391 		}                                                        \
392 	})
393 
394 /* linked list of verifier states used to prune search */
395 struct bpf_verifier_state_list {
396 	struct bpf_verifier_state state;
397 	struct bpf_verifier_state_list *next;
398 	int miss_cnt, hit_cnt;
399 };
400 
401 struct bpf_loop_inline_state {
402 	unsigned int initialized:1; /* set to true upon first entry */
403 	unsigned int fit_for_inline:1; /* true if callback function is the same
404 					* at each call and flags are always zero
405 					*/
406 	u32 callback_subprogno; /* valid when fit_for_inline is true */
407 };
408 
409 /* Possible states for alu_state member. */
410 #define BPF_ALU_SANITIZE_SRC		(1U << 0)
411 #define BPF_ALU_SANITIZE_DST		(1U << 1)
412 #define BPF_ALU_NEG_VALUE		(1U << 2)
413 #define BPF_ALU_NON_POINTER		(1U << 3)
414 #define BPF_ALU_IMMEDIATE		(1U << 4)
415 #define BPF_ALU_SANITIZE		(BPF_ALU_SANITIZE_SRC | \
416 					 BPF_ALU_SANITIZE_DST)
417 
418 struct bpf_insn_aux_data {
419 	union {
420 		enum bpf_reg_type ptr_type;	/* pointer type for load/store insns */
421 		unsigned long map_ptr_state;	/* pointer/poison value for maps */
422 		s32 call_imm;			/* saved imm field of call insn */
423 		u32 alu_limit;			/* limit for add/sub register with pointer */
424 		struct {
425 			u32 map_index;		/* index into used_maps[] */
426 			u32 map_off;		/* offset from value base address */
427 		};
428 		struct {
429 			enum bpf_reg_type reg_type;	/* type of pseudo_btf_id */
430 			union {
431 				struct {
432 					struct btf *btf;
433 					u32 btf_id;	/* btf_id for struct typed var */
434 				};
435 				u32 mem_size;	/* mem_size for non-struct typed var */
436 			};
437 		} btf_var;
438 		/* if instruction is a call to bpf_loop this field tracks
439 		 * the state of the relevant registers to make decision about inlining
440 		 */
441 		struct bpf_loop_inline_state loop_inline_state;
442 	};
443 	u64 obj_new_size; /* remember the size of type passed to bpf_obj_new to rewrite R1 */
444 	struct btf_struct_meta *kptr_struct_meta;
445 	u64 map_key_state; /* constant (32 bit) key tracking for maps */
446 	int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
447 	u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
448 	bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
449 	bool zext_dst; /* this insn zero extends dst reg */
450 	bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
451 	u8 alu_state; /* used in combination with alu_limit */
452 
453 	/* below fields are initialized once */
454 	unsigned int orig_idx; /* original instruction index */
455 	bool prune_point;
456 	bool jmp_point;
457 };
458 
459 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
460 #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */
461 
462 #define BPF_VERIFIER_TMP_LOG_SIZE	1024
463 
464 struct bpf_verifier_log {
465 	u32 level;
466 	char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
467 	char __user *ubuf;
468 	u32 len_used;
469 	u32 len_total;
470 };
471 
bpf_verifier_log_full(const struct bpf_verifier_log * log)472 static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
473 {
474 	return log->len_used >= log->len_total - 1;
475 }
476 
477 #define BPF_LOG_LEVEL1	1
478 #define BPF_LOG_LEVEL2	2
479 #define BPF_LOG_STATS	4
480 #define BPF_LOG_LEVEL	(BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
481 #define BPF_LOG_MASK	(BPF_LOG_LEVEL | BPF_LOG_STATS)
482 #define BPF_LOG_KERNEL	(BPF_LOG_MASK + 1) /* kernel internal flag */
483 #define BPF_LOG_MIN_ALIGNMENT 8U
484 #define BPF_LOG_ALIGNMENT 40U
485 
bpf_verifier_log_needed(const struct bpf_verifier_log * log)486 static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
487 {
488 	return log &&
489 		((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
490 		 log->level == BPF_LOG_KERNEL);
491 }
492 
493 static inline bool
bpf_verifier_log_attr_valid(const struct bpf_verifier_log * log)494 bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
495 {
496 	return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
497 	       log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
498 }
499 
500 #define BPF_MAX_SUBPROGS 256
501 
502 struct bpf_subprog_info {
503 	/* 'start' has to be the first field otherwise find_subprog() won't work */
504 	u32 start; /* insn idx of function entry point */
505 	u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
506 	u16 stack_depth; /* max. stack depth used by this function */
507 	bool has_tail_call;
508 	bool tail_call_reachable;
509 	bool has_ld_abs;
510 	bool is_async_cb;
511 };
512 
513 /* single container for all structs
514  * one verifier_env per bpf_check() call
515  */
516 struct bpf_verifier_env {
517 	u32 insn_idx;
518 	u32 prev_insn_idx;
519 	struct bpf_prog *prog;		/* eBPF program being verified */
520 	const struct bpf_verifier_ops *ops;
521 	struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
522 	int stack_size;			/* number of states to be processed */
523 	bool strict_alignment;		/* perform strict pointer alignment checks */
524 	bool test_state_freq;		/* test verifier with different pruning frequency */
525 	struct bpf_verifier_state *cur_state; /* current verifier state */
526 	struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
527 	struct bpf_verifier_state_list *free_list;
528 	struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
529 	struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */
530 	u32 used_map_cnt;		/* number of used maps */
531 	u32 used_btf_cnt;		/* number of used BTF objects */
532 	u32 id_gen;			/* used to generate unique reg IDs */
533 	bool explore_alu_limits;
534 	bool allow_ptr_leaks;
535 	bool allow_uninit_stack;
536 	bool bpf_capable;
537 	bool bypass_spec_v1;
538 	bool bypass_spec_v4;
539 	bool seen_direct_write;
540 	bool rcu_tag_supported;
541 	struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
542 	const struct bpf_line_info *prev_linfo;
543 	struct bpf_verifier_log log;
544 	struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
545 	struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
546 	struct {
547 		int *insn_state;
548 		int *insn_stack;
549 		int cur_stack;
550 	} cfg;
551 	u32 pass_cnt; /* number of times do_check() was called */
552 	u32 subprog_cnt;
553 	/* number of instructions analyzed by the verifier */
554 	u32 prev_insn_processed, insn_processed;
555 	/* number of jmps, calls, exits analyzed so far */
556 	u32 prev_jmps_processed, jmps_processed;
557 	/* total verification time */
558 	u64 verification_time;
559 	/* maximum number of verifier states kept in 'branching' instructions */
560 	u32 max_states_per_insn;
561 	/* total number of allocated verifier states */
562 	u32 total_states;
563 	/* some states are freed during program analysis.
564 	 * this is peak number of states. this number dominates kernel
565 	 * memory consumption during verification
566 	 */
567 	u32 peak_states;
568 	/* longest register parentage chain walked for liveness marking */
569 	u32 longest_mark_read_walk;
570 	bpfptr_t fd_array;
571 
572 	/* bit mask to keep track of whether a register has been accessed
573 	 * since the last time the function state was printed
574 	 */
575 	u32 scratched_regs;
576 	/* Same as scratched_regs but for stack slots */
577 	u64 scratched_stack_slots;
578 	u32 prev_log_len, prev_insn_print_len;
579 	/* buffer used in reg_type_str() to generate reg_type string */
580 	char type_str_buf[TYPE_STR_BUF_LEN];
581 };
582 
583 __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
584 				      const char *fmt, va_list args);
585 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
586 					   const char *fmt, ...);
587 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
588 			    const char *fmt, ...);
589 
cur_func(struct bpf_verifier_env * env)590 static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
591 {
592 	struct bpf_verifier_state *cur = env->cur_state;
593 
594 	return cur->frame[cur->curframe];
595 }
596 
cur_regs(struct bpf_verifier_env * env)597 static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
598 {
599 	return cur_func(env)->regs;
600 }
601 
602 int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
603 int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
604 				 int insn_idx, int prev_insn_idx);
605 int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
606 void
607 bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
608 			      struct bpf_insn *insn);
609 void
610 bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
611 
612 int check_ptr_off_reg(struct bpf_verifier_env *env,
613 		      const struct bpf_reg_state *reg, int regno);
614 int check_func_arg_reg_off(struct bpf_verifier_env *env,
615 			   const struct bpf_reg_state *reg, int regno,
616 			   enum bpf_arg_type arg_type);
617 int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
618 		   u32 regno, u32 mem_size);
619 struct bpf_call_arg_meta;
620 int process_dynptr_func(struct bpf_verifier_env *env, int regno,
621 			enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta);
622 
623 /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
bpf_trampoline_compute_key(const struct bpf_prog * tgt_prog,struct btf * btf,u32 btf_id)624 static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
625 					     struct btf *btf, u32 btf_id)
626 {
627 	if (tgt_prog)
628 		return ((u64)tgt_prog->aux->id << 32) | btf_id;
629 	else
630 		return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id;
631 }
632 
633 /* unpack the IDs from the key as constructed above */
bpf_trampoline_unpack_key(u64 key,u32 * obj_id,u32 * btf_id)634 static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id)
635 {
636 	if (obj_id)
637 		*obj_id = key >> 32;
638 	if (btf_id)
639 		*btf_id = key & 0x7FFFFFFF;
640 }
641 
642 int bpf_check_attach_target(struct bpf_verifier_log *log,
643 			    const struct bpf_prog *prog,
644 			    const struct bpf_prog *tgt_prog,
645 			    u32 btf_id,
646 			    struct bpf_attach_target_info *tgt_info);
647 void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
648 
649 int mark_chain_precision(struct bpf_verifier_env *env, int regno);
650 
651 #define BPF_BASE_TYPE_MASK	GENMASK(BPF_BASE_TYPE_BITS - 1, 0)
652 
653 /* extract base type from bpf_{arg, return, reg}_type. */
base_type(u32 type)654 static inline u32 base_type(u32 type)
655 {
656 	return type & BPF_BASE_TYPE_MASK;
657 }
658 
659 /* extract flags from an extended type. See bpf_type_flag in bpf.h. */
type_flag(u32 type)660 static inline u32 type_flag(u32 type)
661 {
662 	return type & ~BPF_BASE_TYPE_MASK;
663 }
664 
665 /* only use after check_attach_btf_id() */
resolve_prog_type(const struct bpf_prog * prog)666 static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
667 {
668 	return prog->type == BPF_PROG_TYPE_EXT ?
669 		prog->aux->dst_prog->type : prog->type;
670 }
671 
bpf_prog_check_recur(const struct bpf_prog * prog)672 static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
673 {
674 	switch (resolve_prog_type(prog)) {
675 	case BPF_PROG_TYPE_TRACING:
676 		return prog->expected_attach_type != BPF_TRACE_ITER;
677 	case BPF_PROG_TYPE_STRUCT_OPS:
678 	case BPF_PROG_TYPE_LSM:
679 		return false;
680 	default:
681 		return true;
682 	}
683 }
684 
685 #define BPF_REG_TRUSTED_MODIFIERS (MEM_ALLOC | PTR_TRUSTED)
686 
bpf_type_has_unsafe_modifiers(u32 type)687 static inline bool bpf_type_has_unsafe_modifiers(u32 type)
688 {
689 	return type_flag(type) & ~BPF_REG_TRUSTED_MODIFIERS;
690 }
691 
692 #endif /* _LINUX_BPF_VERIFIER_H */
693