1 {
2 	"pointer/scalar confusion in state equality check (way 1)",
3 	.insns = {
4 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
5 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7 	BPF_LD_MAP_FD(BPF_REG_1, 0),
8 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
9 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
10 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
11 	BPF_JMP_A(1),
12 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
13 	BPF_JMP_A(0),
14 	BPF_EXIT_INSN(),
15 	},
16 	.fixup_map_hash_8b = { 3 },
17 	.result = ACCEPT,
18 	.retval = POINTER_VALUE,
19 	.result_unpriv = REJECT,
20 	.errstr_unpriv = "R0 leaks addr as return value"
21 },
22 {
23 	"pointer/scalar confusion in state equality check (way 2)",
24 	.insns = {
25 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
26 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
27 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
28 	BPF_LD_MAP_FD(BPF_REG_1, 0),
29 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
30 	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
31 	BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
32 	BPF_JMP_A(1),
33 	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
34 	BPF_EXIT_INSN(),
35 	},
36 	.fixup_map_hash_8b = { 3 },
37 	.result = ACCEPT,
38 	.retval = POINTER_VALUE,
39 	.result_unpriv = REJECT,
40 	.errstr_unpriv = "R0 leaks addr as return value"
41 },
42 {
43 	"liveness pruning and write screening",
44 	.insns = {
45 	/* Get an unknown value */
46 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
47 	/* branch conditions teach us nothing about R2 */
48 	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
49 	BPF_MOV64_IMM(BPF_REG_0, 0),
50 	BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
51 	BPF_MOV64_IMM(BPF_REG_0, 0),
52 	BPF_EXIT_INSN(),
53 	},
54 	.errstr = "R0 !read_ok",
55 	.result = REJECT,
56 	.prog_type = BPF_PROG_TYPE_LWT_IN,
57 },
58 {
59 	"varlen_map_value_access pruning",
60 	.insns = {
61 	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
62 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
63 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
64 	BPF_LD_MAP_FD(BPF_REG_1, 0),
65 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
66 	BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
67 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
68 	BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
69 	BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
70 	BPF_MOV32_IMM(BPF_REG_1, 0),
71 	BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
72 	BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
73 	BPF_JMP_IMM(BPF_JA, 0, 0, 0),
74 	BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
75 	BPF_EXIT_INSN(),
76 	},
77 	.fixup_map_hash_48b = { 3 },
78 	.errstr_unpriv = "R0 leaks addr",
79 	.errstr = "R0 unbounded memory access",
80 	.result_unpriv = REJECT,
81 	.result = REJECT,
82 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
83 },
84 {
85 	"search pruning: all branches should be verified (nop operation)",
86 	.insns = {
87 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
88 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
89 		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
90 		BPF_LD_MAP_FD(BPF_REG_1, 0),
91 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
92 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
93 		BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
94 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
95 		BPF_MOV64_IMM(BPF_REG_4, 0),
96 		BPF_JMP_A(1),
97 		BPF_MOV64_IMM(BPF_REG_4, 1),
98 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
99 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
100 		BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
101 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
102 		BPF_MOV64_IMM(BPF_REG_6, 0),
103 		BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
104 		BPF_EXIT_INSN(),
105 	},
106 	.fixup_map_hash_8b = { 3 },
107 	.errstr = "R6 invalid mem access 'scalar'",
108 	.result = REJECT,
109 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
110 },
111 {
112 	"search pruning: all branches should be verified (invalid stack access)",
113 	.insns = {
114 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
115 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
116 		BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
117 		BPF_LD_MAP_FD(BPF_REG_1, 0),
118 		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
119 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
120 		BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
121 		BPF_MOV64_IMM(BPF_REG_4, 0),
122 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
123 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
124 		BPF_JMP_A(1),
125 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
126 		BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
127 		BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
128 		BPF_EXIT_INSN(),
129 	},
130 	.fixup_map_hash_8b = { 3 },
131 	.errstr = "invalid read from stack off -16+0 size 8",
132 	.result = REJECT,
133 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
134 },
135 {
136 	"precision tracking for u32 spill/fill",
137 	.insns = {
138 		BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
139 		BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
140 		BPF_MOV32_IMM(BPF_REG_6, 32),
141 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
142 		BPF_MOV32_IMM(BPF_REG_6, 4),
143 		/* Additional insns to introduce a pruning point. */
144 		BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
145 		BPF_MOV64_IMM(BPF_REG_3, 0),
146 		BPF_MOV64_IMM(BPF_REG_3, 0),
147 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
148 		BPF_MOV64_IMM(BPF_REG_3, 0),
149 		/* u32 spill/fill */
150 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
151 		BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
152 		/* out-of-bound map value access for r6=32 */
153 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
154 		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
155 		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
156 		BPF_LD_MAP_FD(BPF_REG_1, 0),
157 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
158 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
159 		BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
160 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
161 		BPF_MOV64_IMM(BPF_REG_0, 0),
162 		BPF_EXIT_INSN(),
163 	},
164 	.fixup_map_hash_8b = { 15 },
165 	.result = REJECT,
166 	.errstr = "R0 min value is outside of the allowed memory range",
167 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
168 },
169 {
170 	"precision tracking for u32 spills, u64 fill",
171 	.insns = {
172 		BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
173 		BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
174 		BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
175 		/* Additional insns to introduce a pruning point. */
176 		BPF_MOV64_IMM(BPF_REG_3, 1),
177 		BPF_MOV64_IMM(BPF_REG_3, 1),
178 		BPF_MOV64_IMM(BPF_REG_3, 1),
179 		BPF_MOV64_IMM(BPF_REG_3, 1),
180 		BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
181 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
182 		BPF_MOV64_IMM(BPF_REG_3, 1),
183 		BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
184 		/* u32 spills, u64 fill */
185 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
186 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
187 		BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
188 		/* if r8 != X goto pc+1  r8 known in fallthrough branch */
189 		BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
190 		BPF_MOV64_IMM(BPF_REG_3, 1),
191 		/* if r8 == X goto pc+1  condition always true on first
192 		 * traversal, so starts backtracking to mark r8 as requiring
193 		 * precision. r7 marked as needing precision. r6 not marked
194 		 * since it's not tracked.
195 		 */
196 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
197 		/* fails if r8 correctly marked unknown after fill. */
198 		BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
199 		BPF_MOV64_IMM(BPF_REG_0, 0),
200 		BPF_EXIT_INSN(),
201 	},
202 	.result = REJECT,
203 	.errstr = "div by zero",
204 	.prog_type = BPF_PROG_TYPE_TRACEPOINT,
205 },
206 {
207 	"allocated_stack",
208 	.insns = {
209 		BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
210 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
211 		BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
212 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
213 		BPF_MOV64_IMM(BPF_REG_0, 0),
214 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
215 		BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
216 		BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
217 		BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
218 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
219 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
220 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
221 		BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
222 		BPF_EXIT_INSN(),
223 	},
224 	.result = ACCEPT,
225 	.result_unpriv = ACCEPT,
226 	.insn_processed = 15,
227 },
228 /* The test performs a conditional 64-bit write to a stack location
229  * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
230  * then data is read from fp[-8]. This sequence is unsafe.
231  *
232  * The test would be mistakenly marked as safe w/o dst register parent
233  * preservation in verifier.c:copy_register_state() function.
234  *
235  * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
236  * checkpoint state after conditional 64-bit assignment.
237  */
238 {
239 	"write tracking and register parent chain bug",
240 	.insns = {
241 	/* r6 = ktime_get_ns() */
242 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
243 	BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
244 	/* r0 = ktime_get_ns() */
245 	BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
246 	/* if r0 > r6 goto +1 */
247 	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_6, 1),
248 	/* *(u64 *)(r10 - 8) = 0xdeadbeef */
249 	BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0xdeadbeef),
250 	/* r1 = 42 */
251 	BPF_MOV64_IMM(BPF_REG_1, 42),
252 	/* *(u8 *)(r10 - 8) = r1 */
253 	BPF_STX_MEM(BPF_B, BPF_REG_FP, BPF_REG_1, -8),
254 	/* r2 = *(u64 *)(r10 - 8) */
255 	BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_FP, -8),
256 	/* exit(0) */
257 	BPF_MOV64_IMM(BPF_REG_0, 0),
258 	BPF_EXIT_INSN(),
259 	},
260 	.flags = BPF_F_TEST_STATE_FREQ,
261 	.errstr = "invalid read from stack off -8+1 size 8",
262 	.result = REJECT,
263 },
264