1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 
5 #include "test_spin_lock.skel.h"
6 #include "test_spin_lock_fail.skel.h"
7 
8 static char log_buf[1024 * 1024];
9 
10 static struct {
11 	const char *prog_name;
12 	const char *err_msg;
13 } spin_lock_fail_tests[] = {
14 	{ "lock_id_kptr_preserve",
15 	  "5: (bf) r1 = r0                       ; R0_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) "
16 	  "R1_w=ptr_foo(id=2,ref_obj_id=2,off=0,imm=0) refs=2\n6: (85) call bpf_this_cpu_ptr#154\n"
17 	  "R1 type=ptr_ expected=percpu_ptr_" },
18 	{ "lock_id_global_zero",
19 	  "; R1_w=map_value(off=0,ks=4,vs=4,imm=0)\n2: (85) call bpf_this_cpu_ptr#154\n"
20 	  "R1 type=map_value expected=percpu_ptr_" },
21 	{ "lock_id_mapval_preserve",
22 	  "8: (bf) r1 = r0                       ; R0_w=map_value(id=1,off=0,ks=4,vs=8,imm=0) "
23 	  "R1_w=map_value(id=1,off=0,ks=4,vs=8,imm=0)\n9: (85) call bpf_this_cpu_ptr#154\n"
24 	  "R1 type=map_value expected=percpu_ptr_" },
25 	{ "lock_id_innermapval_preserve",
26 	  "13: (bf) r1 = r0                      ; R0=map_value(id=2,off=0,ks=4,vs=8,imm=0) "
27 	  "R1_w=map_value(id=2,off=0,ks=4,vs=8,imm=0)\n14: (85) call bpf_this_cpu_ptr#154\n"
28 	  "R1 type=map_value expected=percpu_ptr_" },
29 	{ "lock_id_mismatch_kptr_kptr", "bpf_spin_unlock of different lock" },
30 	{ "lock_id_mismatch_kptr_global", "bpf_spin_unlock of different lock" },
31 	{ "lock_id_mismatch_kptr_mapval", "bpf_spin_unlock of different lock" },
32 	{ "lock_id_mismatch_kptr_innermapval", "bpf_spin_unlock of different lock" },
33 	{ "lock_id_mismatch_global_global", "bpf_spin_unlock of different lock" },
34 	{ "lock_id_mismatch_global_kptr", "bpf_spin_unlock of different lock" },
35 	{ "lock_id_mismatch_global_mapval", "bpf_spin_unlock of different lock" },
36 	{ "lock_id_mismatch_global_innermapval", "bpf_spin_unlock of different lock" },
37 	{ "lock_id_mismatch_mapval_mapval", "bpf_spin_unlock of different lock" },
38 	{ "lock_id_mismatch_mapval_kptr", "bpf_spin_unlock of different lock" },
39 	{ "lock_id_mismatch_mapval_global", "bpf_spin_unlock of different lock" },
40 	{ "lock_id_mismatch_mapval_innermapval", "bpf_spin_unlock of different lock" },
41 	{ "lock_id_mismatch_innermapval_innermapval1", "bpf_spin_unlock of different lock" },
42 	{ "lock_id_mismatch_innermapval_innermapval2", "bpf_spin_unlock of different lock" },
43 	{ "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" },
44 	{ "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" },
45 	{ "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" },
46 };
47 
test_spin_lock_fail_prog(const char * prog_name,const char * err_msg)48 static void test_spin_lock_fail_prog(const char *prog_name, const char *err_msg)
49 {
50 	LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
51 						.kernel_log_size = sizeof(log_buf),
52 						.kernel_log_level = 1);
53 	struct test_spin_lock_fail *skel;
54 	struct bpf_program *prog;
55 	int ret;
56 
57 	skel = test_spin_lock_fail__open_opts(&opts);
58 	if (!ASSERT_OK_PTR(skel, "test_spin_lock_fail__open_opts"))
59 		return;
60 
61 	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
62 	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
63 		goto end;
64 
65 	bpf_program__set_autoload(prog, true);
66 
67 	ret = test_spin_lock_fail__load(skel);
68 	if (!ASSERT_ERR(ret, "test_spin_lock_fail__load must fail"))
69 		goto end;
70 
71 	/* Skip check if JIT does not support kfuncs */
72 	if (strstr(log_buf, "JIT does not support calling kernel function")) {
73 		test__skip();
74 		goto end;
75 	}
76 
77 	if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
78 		fprintf(stderr, "Expected: %s\n", err_msg);
79 		fprintf(stderr, "Verifier: %s\n", log_buf);
80 	}
81 
82 end:
83 	test_spin_lock_fail__destroy(skel);
84 }
85 
spin_lock_thread(void * arg)86 static void *spin_lock_thread(void *arg)
87 {
88 	int err, prog_fd = *(u32 *) arg;
89 	LIBBPF_OPTS(bpf_test_run_opts, topts,
90 		.data_in = &pkt_v4,
91 		.data_size_in = sizeof(pkt_v4),
92 		.repeat = 10000,
93 	);
94 
95 	err = bpf_prog_test_run_opts(prog_fd, &topts);
96 	ASSERT_OK(err, "test_run");
97 	ASSERT_OK(topts.retval, "test_run retval");
98 	pthread_exit(arg);
99 }
100 
test_spin_lock_success(void)101 void test_spin_lock_success(void)
102 {
103 	struct test_spin_lock *skel;
104 	pthread_t thread_id[4];
105 	int prog_fd, i;
106 	void *ret;
107 
108 	skel = test_spin_lock__open_and_load();
109 	if (!ASSERT_OK_PTR(skel, "test_spin_lock__open_and_load"))
110 		return;
111 	prog_fd = bpf_program__fd(skel->progs.bpf_spin_lock_test);
112 	for (i = 0; i < 4; i++) {
113 		int err;
114 
115 		err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
116 		if (!ASSERT_OK(err, "pthread_create"))
117 			goto end;
118 	}
119 
120 	for (i = 0; i < 4; i++) {
121 		if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
122 			goto end;
123 		if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
124 			goto end;
125 	}
126 end:
127 	test_spin_lock__destroy(skel);
128 }
129 
test_spin_lock(void)130 void test_spin_lock(void)
131 {
132 	int i;
133 
134 	test_spin_lock_success();
135 
136 	for (i = 0; i < ARRAY_SIZE(spin_lock_fail_tests); i++) {
137 		if (!test__start_subtest(spin_lock_fail_tests[i].prog_name))
138 			continue;
139 		test_spin_lock_fail_prog(spin_lock_fail_tests[i].prog_name,
140 					 spin_lock_fail_tests[i].err_msg);
141 	}
142 }
143