1 // SPDX-License-Identifier: GPL-2.0
2 #include <bpf/btf.h>
3 #include <test_btf.h>
4 #include <linux/btf.h>
5 #include <test_progs.h>
6 #include <network_helpers.h>
7
8 #include "linked_list.skel.h"
9 #include "linked_list_fail.skel.h"
10 #include "linked_list_peek.skel.h"
11
12 static char log_buf[1024 * 1024];
13
14 static struct {
15 const char *prog_name;
16 const char *err_msg;
17 } linked_list_fail_tests[] = {
18 #define TEST(test, off) \
19 { #test "_missing_lock_push_front", \
20 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
21 { #test "_missing_lock_push_back", \
22 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
23 { #test "_missing_lock_pop_front", \
24 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
25 { #test "_missing_lock_pop_back", \
26 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
27 TEST(kptr, 40)
28 TEST(global, 16)
29 TEST(map, 0)
30 TEST(inner_map, 0)
31 #undef TEST
32 #define TEST(test, op) \
33 { #test "_kptr_incorrect_lock_" #op, \
34 "held lock and object are not in the same allocation\n" \
35 "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
36 { #test "_global_incorrect_lock_" #op, \
37 "held lock and object are not in the same allocation\n" \
38 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
39 { #test "_map_incorrect_lock_" #op, \
40 "held lock and object are not in the same allocation\n" \
41 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
42 { #test "_inner_map_incorrect_lock_" #op, \
43 "held lock and object are not in the same allocation\n" \
44 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
45 TEST(kptr, push_front)
46 TEST(kptr, push_back)
47 TEST(kptr, pop_front)
48 TEST(kptr, pop_back)
49 TEST(global, push_front)
50 TEST(global, push_back)
51 TEST(global, pop_front)
52 TEST(global, pop_back)
53 TEST(map, push_front)
54 TEST(map, push_back)
55 TEST(map, pop_front)
56 TEST(map, pop_back)
57 TEST(inner_map, push_front)
58 TEST(inner_map, push_back)
59 TEST(inner_map, pop_front)
60 TEST(inner_map, pop_back)
61 #undef TEST
62 { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
68 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
69 { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
70 { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
71 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
72 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
73 { "obj_new_acq", "Unreleased reference id=" },
74 { "use_after_drop", "invalid mem access 'scalar'" },
75 { "ptr_walk_scalar", "type=rdonly_untrusted_mem expected=percpu_ptr_" },
76 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
78 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
80 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
81 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
82 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
83 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
84 { "double_push_front", "arg#1 expected pointer to allocated object" },
85 { "double_push_back", "arg#1 expected pointer to allocated object" },
86 { "no_node_value_type", "bpf_list_node not found at offset=0" },
87 { "incorrect_value_type",
88 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
89 "but arg is at offset=0 in struct bar" },
90 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
91 { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
92 { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
93 { "no_head_type", "bpf_list_head not found at offset=0" },
94 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
95 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
96 { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
97 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
98 { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
99 { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
100 };
101
test_linked_list_fail_prog(const char * prog_name,const char * err_msg)102 static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
103 {
104 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
105 .kernel_log_size = sizeof(log_buf),
106 .kernel_log_level = 1);
107 struct linked_list_fail *skel;
108 struct bpf_program *prog;
109 int ret;
110
111 skel = linked_list_fail__open_opts(&opts);
112 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
113 return;
114
115 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
116 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
117 goto end;
118
119 bpf_program__set_autoload(prog, true);
120
121 ret = linked_list_fail__load(skel);
122 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
123 goto end;
124
125 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
126 fprintf(stderr, "Expected: %s\n", err_msg);
127 fprintf(stderr, "Verifier: %s\n", log_buf);
128 }
129
130 end:
131 linked_list_fail__destroy(skel);
132 }
133
clear_fields(struct bpf_map * map)134 static void clear_fields(struct bpf_map *map)
135 {
136 char buf[24];
137 int key = 0;
138
139 memset(buf, 0xff, sizeof(buf));
140 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
141 }
142
143 enum {
144 TEST_ALL,
145 PUSH_POP,
146 PUSH_POP_MULT,
147 LIST_IN_LIST,
148 };
149
test_linked_list_success(int mode,bool leave_in_map)150 static void test_linked_list_success(int mode, bool leave_in_map)
151 {
152 LIBBPF_OPTS(bpf_test_run_opts, opts,
153 .data_in = &pkt_v4,
154 .data_size_in = sizeof(pkt_v4),
155 .repeat = 1,
156 );
157 struct linked_list *skel;
158 int ret;
159
160 skel = linked_list__open_and_load();
161 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
162 return;
163
164 if (mode == LIST_IN_LIST)
165 goto lil;
166 if (mode == PUSH_POP_MULT)
167 goto ppm;
168
169 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
170 ASSERT_OK(ret, "map_list_push_pop");
171 ASSERT_OK(opts.retval, "map_list_push_pop retval");
172 if (!leave_in_map)
173 clear_fields(skel->maps.array_map);
174
175 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
176 ASSERT_OK(ret, "inner_map_list_push_pop");
177 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
178 if (!leave_in_map)
179 clear_fields(skel->maps.inner_map);
180
181 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
182 ASSERT_OK(ret, "global_list_push_pop");
183 ASSERT_OK(opts.retval, "global_list_push_pop retval");
184 if (!leave_in_map)
185 clear_fields(skel->maps.bss_A);
186
187 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
188 ASSERT_OK(ret, "global_list_push_pop_nested");
189 ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
190 if (!leave_in_map)
191 clear_fields(skel->maps.bss_A);
192
193 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
194 ASSERT_OK(ret, "global_list_array_push_pop");
195 ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
196 if (!leave_in_map)
197 clear_fields(skel->maps.bss_A);
198
199 if (mode == PUSH_POP)
200 goto end;
201
202 ppm:
203 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
204 ASSERT_OK(ret, "map_list_push_pop_multiple");
205 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
206 if (!leave_in_map)
207 clear_fields(skel->maps.array_map);
208
209 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
210 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
211 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
212 if (!leave_in_map)
213 clear_fields(skel->maps.inner_map);
214
215 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
216 ASSERT_OK(ret, "global_list_push_pop_multiple");
217 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
218 if (!leave_in_map)
219 clear_fields(skel->maps.bss_A);
220
221 if (mode == PUSH_POP_MULT)
222 goto end;
223
224 lil:
225 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
226 ASSERT_OK(ret, "map_list_in_list");
227 ASSERT_OK(opts.retval, "map_list_in_list retval");
228 if (!leave_in_map)
229 clear_fields(skel->maps.array_map);
230
231 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
232 ASSERT_OK(ret, "inner_map_list_in_list");
233 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
234 if (!leave_in_map)
235 clear_fields(skel->maps.inner_map);
236
237 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
238 ASSERT_OK(ret, "global_list_in_list");
239 ASSERT_OK(opts.retval, "global_list_in_list retval");
240 if (!leave_in_map)
241 clear_fields(skel->maps.bss_A);
242 end:
243 linked_list__destroy(skel);
244 }
245
246 #define SPIN_LOCK 2
247 #define LIST_HEAD 3
248 #define LIST_NODE 4
249
init_btf(void)250 static struct btf *init_btf(void)
251 {
252 int id, lid, hid, nid;
253 struct btf *btf;
254
255 btf = btf__new_empty();
256 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
257 return NULL;
258 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
259 if (!ASSERT_EQ(id, 1, "btf__add_int"))
260 goto end;
261 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
262 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
263 goto end;
264 hid = btf__add_struct(btf, "bpf_list_head", 16);
265 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
266 goto end;
267 nid = btf__add_struct(btf, "bpf_list_node", 24);
268 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
269 goto end;
270 return btf;
271 end:
272 btf__free(btf);
273 return NULL;
274 }
275
list_and_rb_node_same_struct(bool refcount_field)276 static void list_and_rb_node_same_struct(bool refcount_field)
277 {
278 int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
279 struct btf *btf;
280 int id, err;
281
282 btf = init_btf();
283 if (!ASSERT_OK_PTR(btf, "init_btf"))
284 return;
285
286 bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
287 if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
288 return;
289
290 if (refcount_field) {
291 bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
292 if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
293 return;
294 }
295
296 id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
297 if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
298 return;
299 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
300 if (!ASSERT_OK(err, "btf__add_field bar::a"))
301 return;
302 err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
303 if (!ASSERT_OK(err, "btf__add_field bar::c"))
304 return;
305 if (refcount_field) {
306 err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
307 if (!ASSERT_OK(err, "btf__add_field bar::ref"))
308 return;
309 }
310
311 foo_btf_id = btf__add_struct(btf, "foo", 20);
312 if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
313 return;
314 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
315 if (!ASSERT_OK(err, "btf__add_field foo::a"))
316 return;
317 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
318 if (!ASSERT_OK(err, "btf__add_field foo::b"))
319 return;
320 id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
321 if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
322 return;
323
324 err = btf__load_into_kernel(btf);
325 ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
326 btf__free(btf);
327 }
328
test_btf(void)329 static void test_btf(void)
330 {
331 struct btf *btf = NULL;
332 int id, err;
333
334 while (test__start_subtest("btf: too many locks")) {
335 btf = init_btf();
336 if (!ASSERT_OK_PTR(btf, "init_btf"))
337 break;
338 id = btf__add_struct(btf, "foo", 24);
339 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
340 break;
341 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
342 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
343 break;
344 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
345 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
346 break;
347 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
348 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
349 break;
350
351 err = btf__load_into_kernel(btf);
352 ASSERT_EQ(err, -E2BIG, "check btf");
353 btf__free(btf);
354 break;
355 }
356
357 while (test__start_subtest("btf: missing lock")) {
358 btf = init_btf();
359 if (!ASSERT_OK_PTR(btf, "init_btf"))
360 break;
361 id = btf__add_struct(btf, "foo", 16);
362 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
363 break;
364 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
365 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
366 break;
367 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
368 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
369 break;
370 id = btf__add_struct(btf, "baz", 16);
371 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
372 break;
373 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
374 if (!ASSERT_OK(err, "btf__add_field baz::a"))
375 break;
376
377 err = btf__load_into_kernel(btf);
378 ASSERT_EQ(err, -EINVAL, "check btf");
379 btf__free(btf);
380 break;
381 }
382
383 while (test__start_subtest("btf: bad offset")) {
384 btf = init_btf();
385 if (!ASSERT_OK_PTR(btf, "init_btf"))
386 break;
387 id = btf__add_struct(btf, "foo", 36);
388 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
389 break;
390 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
391 if (!ASSERT_OK(err, "btf__add_field foo::a"))
392 break;
393 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
394 if (!ASSERT_OK(err, "btf__add_field foo::b"))
395 break;
396 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
397 if (!ASSERT_OK(err, "btf__add_field foo::c"))
398 break;
399 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
400 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
401 break;
402
403 err = btf__load_into_kernel(btf);
404 ASSERT_EQ(err, -EEXIST, "check btf");
405 btf__free(btf);
406 break;
407 }
408
409 while (test__start_subtest("btf: missing contains:")) {
410 btf = init_btf();
411 if (!ASSERT_OK_PTR(btf, "init_btf"))
412 break;
413 id = btf__add_struct(btf, "foo", 24);
414 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
415 break;
416 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
417 if (!ASSERT_OK(err, "btf__add_field foo::a"))
418 break;
419 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
420 if (!ASSERT_OK(err, "btf__add_field foo::b"))
421 break;
422
423 err = btf__load_into_kernel(btf);
424 ASSERT_EQ(err, -EINVAL, "check btf");
425 btf__free(btf);
426 break;
427 }
428
429 while (test__start_subtest("btf: missing struct")) {
430 btf = init_btf();
431 if (!ASSERT_OK_PTR(btf, "init_btf"))
432 break;
433 id = btf__add_struct(btf, "foo", 24);
434 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
435 break;
436 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
437 if (!ASSERT_OK(err, "btf__add_field foo::a"))
438 break;
439 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
440 if (!ASSERT_OK(err, "btf__add_field foo::b"))
441 break;
442 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
443 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
444 break;
445
446 err = btf__load_into_kernel(btf);
447 ASSERT_EQ(err, -ENOENT, "check btf");
448 btf__free(btf);
449 break;
450 }
451
452 while (test__start_subtest("btf: missing node")) {
453 btf = init_btf();
454 if (!ASSERT_OK_PTR(btf, "init_btf"))
455 break;
456 id = btf__add_struct(btf, "foo", 24);
457 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
458 break;
459 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
460 if (!ASSERT_OK(err, "btf__add_field foo::a"))
461 break;
462 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
463 if (!ASSERT_OK(err, "btf__add_field foo::b"))
464 break;
465 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
466 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
467 break;
468
469 err = btf__load_into_kernel(btf);
470 btf__free(btf);
471 ASSERT_EQ(err, -ENOENT, "check btf");
472 break;
473 }
474
475 while (test__start_subtest("btf: node incorrect type")) {
476 btf = init_btf();
477 if (!ASSERT_OK_PTR(btf, "init_btf"))
478 break;
479 id = btf__add_struct(btf, "foo", 20);
480 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
481 break;
482 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
483 if (!ASSERT_OK(err, "btf__add_field foo::a"))
484 break;
485 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
486 if (!ASSERT_OK(err, "btf__add_field foo::b"))
487 break;
488 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
489 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
490 break;
491 id = btf__add_struct(btf, "bar", 4);
492 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
493 break;
494 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
495 if (!ASSERT_OK(err, "btf__add_field bar::a"))
496 break;
497
498 err = btf__load_into_kernel(btf);
499 ASSERT_EQ(err, -EINVAL, "check btf");
500 btf__free(btf);
501 break;
502 }
503
504 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
505 btf = init_btf();
506 if (!ASSERT_OK_PTR(btf, "init_btf"))
507 break;
508 id = btf__add_struct(btf, "foo", 52);
509 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
510 break;
511 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
512 if (!ASSERT_OK(err, "btf__add_field foo::a"))
513 break;
514 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
515 if (!ASSERT_OK(err, "btf__add_field foo::b"))
516 break;
517 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
518 if (!ASSERT_OK(err, "btf__add_field foo::c"))
519 break;
520 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
521 if (!ASSERT_OK(err, "btf__add_field foo::d"))
522 break;
523 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
524 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
525 break;
526
527 err = btf__load_into_kernel(btf);
528 ASSERT_EQ(err, -EINVAL, "check btf");
529 btf__free(btf);
530 break;
531 }
532
533 while (test__start_subtest("btf: owning | owned AA cycle")) {
534 btf = init_btf();
535 if (!ASSERT_OK_PTR(btf, "init_btf"))
536 break;
537 id = btf__add_struct(btf, "foo", 44);
538 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
539 break;
540 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
541 if (!ASSERT_OK(err, "btf__add_field foo::a"))
542 break;
543 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
544 if (!ASSERT_OK(err, "btf__add_field foo::b"))
545 break;
546 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
547 if (!ASSERT_OK(err, "btf__add_field foo::c"))
548 break;
549 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
550 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
551 break;
552
553 err = btf__load_into_kernel(btf);
554 ASSERT_EQ(err, -ELOOP, "check btf");
555 btf__free(btf);
556 break;
557 }
558
559 while (test__start_subtest("btf: owning | owned ABA cycle")) {
560 btf = init_btf();
561 if (!ASSERT_OK_PTR(btf, "init_btf"))
562 break;
563 id = btf__add_struct(btf, "foo", 44);
564 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
565 break;
566 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
567 if (!ASSERT_OK(err, "btf__add_field foo::a"))
568 break;
569 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
570 if (!ASSERT_OK(err, "btf__add_field foo::b"))
571 break;
572 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
573 if (!ASSERT_OK(err, "btf__add_field foo::c"))
574 break;
575 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
576 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
577 break;
578 id = btf__add_struct(btf, "bar", 44);
579 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
580 break;
581 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
582 if (!ASSERT_OK(err, "btf__add_field bar::a"))
583 break;
584 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
585 if (!ASSERT_OK(err, "btf__add_field bar::b"))
586 break;
587 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
588 if (!ASSERT_OK(err, "btf__add_field bar::c"))
589 break;
590 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
591 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
592 break;
593
594 err = btf__load_into_kernel(btf);
595 ASSERT_EQ(err, -ELOOP, "check btf");
596 btf__free(btf);
597 break;
598 }
599
600 while (test__start_subtest("btf: owning -> owned")) {
601 btf = init_btf();
602 if (!ASSERT_OK_PTR(btf, "init_btf"))
603 break;
604 id = btf__add_struct(btf, "foo", 28);
605 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
606 break;
607 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
608 if (!ASSERT_OK(err, "btf__add_field foo::a"))
609 break;
610 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
611 if (!ASSERT_OK(err, "btf__add_field foo::b"))
612 break;
613 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
614 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
615 break;
616 id = btf__add_struct(btf, "bar", 24);
617 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
618 break;
619 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
620 if (!ASSERT_OK(err, "btf__add_field bar::a"))
621 break;
622
623 err = btf__load_into_kernel(btf);
624 ASSERT_EQ(err, 0, "check btf");
625 btf__free(btf);
626 break;
627 }
628
629 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
630 btf = init_btf();
631 if (!ASSERT_OK_PTR(btf, "init_btf"))
632 break;
633 id = btf__add_struct(btf, "foo", 28);
634 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
635 break;
636 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
637 if (!ASSERT_OK(err, "btf__add_field foo::a"))
638 break;
639 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
640 if (!ASSERT_OK(err, "btf__add_field foo::b"))
641 break;
642 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
643 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
644 break;
645 id = btf__add_struct(btf, "bar", 44);
646 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
647 break;
648 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
649 if (!ASSERT_OK(err, "btf__add_field bar::a"))
650 break;
651 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
652 if (!ASSERT_OK(err, "btf__add_field bar::b"))
653 break;
654 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
655 if (!ASSERT_OK(err, "btf__add_field bar::c"))
656 break;
657 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
658 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
659 break;
660 id = btf__add_struct(btf, "baz", 24);
661 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
662 break;
663 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
664 if (!ASSERT_OK(err, "btf__add_field baz:a"))
665 break;
666
667 err = btf__load_into_kernel(btf);
668 ASSERT_EQ(err, 0, "check btf");
669 btf__free(btf);
670 break;
671 }
672
673 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
674 btf = init_btf();
675 if (!ASSERT_OK_PTR(btf, "init_btf"))
676 break;
677 id = btf__add_struct(btf, "foo", 44);
678 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
679 break;
680 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
681 if (!ASSERT_OK(err, "btf__add_field foo::a"))
682 break;
683 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
684 if (!ASSERT_OK(err, "btf__add_field foo::b"))
685 break;
686 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
687 if (!ASSERT_OK(err, "btf__add_field foo::c"))
688 break;
689 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
690 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
691 break;
692 id = btf__add_struct(btf, "bar", 44);
693 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
694 break;
695 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
696 if (!ASSERT_OK(err, "btf__add_field bar:a"))
697 break;
698 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
699 if (!ASSERT_OK(err, "btf__add_field bar:b"))
700 break;
701 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
702 if (!ASSERT_OK(err, "btf__add_field bar:c"))
703 break;
704 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
705 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
706 break;
707 id = btf__add_struct(btf, "baz", 24);
708 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
709 break;
710 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
711 if (!ASSERT_OK(err, "btf__add_field baz:a"))
712 break;
713
714 err = btf__load_into_kernel(btf);
715 ASSERT_EQ(err, -ELOOP, "check btf");
716 btf__free(btf);
717 break;
718 }
719
720 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
721 btf = init_btf();
722 if (!ASSERT_OK_PTR(btf, "init_btf"))
723 break;
724 id = btf__add_struct(btf, "foo", 20);
725 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
726 break;
727 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
728 if (!ASSERT_OK(err, "btf__add_field foo::a"))
729 break;
730 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
731 if (!ASSERT_OK(err, "btf__add_field foo::b"))
732 break;
733 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
734 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
735 break;
736 id = btf__add_struct(btf, "bar", 44);
737 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
738 break;
739 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
740 if (!ASSERT_OK(err, "btf__add_field bar::a"))
741 break;
742 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
743 if (!ASSERT_OK(err, "btf__add_field bar::b"))
744 break;
745 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
746 if (!ASSERT_OK(err, "btf__add_field bar::c"))
747 break;
748 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
749 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
750 break;
751 id = btf__add_struct(btf, "baz", 44);
752 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
753 break;
754 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
755 if (!ASSERT_OK(err, "btf__add_field bar::a"))
756 break;
757 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
758 if (!ASSERT_OK(err, "btf__add_field bar::b"))
759 break;
760 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
761 if (!ASSERT_OK(err, "btf__add_field bar::c"))
762 break;
763 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
764 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
765 break;
766 id = btf__add_struct(btf, "bam", 24);
767 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
768 break;
769 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
770 if (!ASSERT_OK(err, "btf__add_field bam::a"))
771 break;
772
773 err = btf__load_into_kernel(btf);
774 ASSERT_EQ(err, -ELOOP, "check btf");
775 btf__free(btf);
776 break;
777 }
778
779 while (test__start_subtest("btf: list_node and rb_node in same struct")) {
780 list_and_rb_node_same_struct(true);
781 break;
782 }
783
784 while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
785 list_and_rb_node_same_struct(false);
786 break;
787 }
788 }
789
test_linked_list(void)790 void test_linked_list(void)
791 {
792 int i;
793
794 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
795 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
796 continue;
797 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
798 linked_list_fail_tests[i].err_msg);
799 }
800 test_btf();
801 test_linked_list_success(PUSH_POP, false);
802 test_linked_list_success(PUSH_POP, true);
803 test_linked_list_success(PUSH_POP_MULT, false);
804 test_linked_list_success(PUSH_POP_MULT, true);
805 test_linked_list_success(LIST_IN_LIST, false);
806 test_linked_list_success(LIST_IN_LIST, true);
807 test_linked_list_success(TEST_ALL, false);
808 }
809
test_linked_list_peek(void)810 void test_linked_list_peek(void)
811 {
812 RUN_TESTS(linked_list_peek);
813 }
814