1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3
4 #define _GNU_SOURCE /* See feature_test_macros(7) */
5 #include <unistd.h>
6 #include <sched.h>
7 #include <pthread.h>
8 #include <sys/syscall.h> /* For SYS_xxx definitions */
9 #include <sys/types.h>
10 #include <test_progs.h>
11 #include "task_local_storage_helpers.h"
12 #include "task_local_storage.skel.h"
13 #include "task_local_storage_exit_creds.skel.h"
14 #include "task_ls_recursion.skel.h"
15 #include "task_storage_nodeadlock.skel.h"
16
test_sys_enter_exit(void)17 static void test_sys_enter_exit(void)
18 {
19 struct task_local_storage *skel;
20 int err;
21
22 skel = task_local_storage__open_and_load();
23 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
24 return;
25
26 skel->bss->target_pid = syscall(SYS_gettid);
27
28 err = task_local_storage__attach(skel);
29 if (!ASSERT_OK(err, "skel_attach"))
30 goto out;
31
32 syscall(SYS_gettid);
33 syscall(SYS_gettid);
34
35 /* 3x syscalls: 1x attach and 2x gettid */
36 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
37 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
38 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
39 out:
40 task_local_storage__destroy(skel);
41 }
42
test_exit_creds(void)43 static void test_exit_creds(void)
44 {
45 struct task_local_storage_exit_creds *skel;
46 int err, run_count, sync_rcu_calls = 0;
47 const int MAX_SYNC_RCU_CALLS = 1000;
48
49 skel = task_local_storage_exit_creds__open_and_load();
50 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
51 return;
52
53 err = task_local_storage_exit_creds__attach(skel);
54 if (!ASSERT_OK(err, "skel_attach"))
55 goto out;
56
57 /* trigger at least one exit_creds() */
58 if (CHECK_FAIL(system("ls > /dev/null")))
59 goto out;
60
61 /* kern_sync_rcu is not enough on its own as the read section we want
62 * to wait for may start after we enter synchronize_rcu, so our call
63 * won't wait for the section to finish. Loop on the run counter
64 * as well to ensure the program has run.
65 */
66 do {
67 kern_sync_rcu();
68 run_count = __atomic_load_n(&skel->bss->run_count, __ATOMIC_SEQ_CST);
69 } while (run_count == 0 && ++sync_rcu_calls < MAX_SYNC_RCU_CALLS);
70
71 ASSERT_NEQ(sync_rcu_calls, MAX_SYNC_RCU_CALLS,
72 "sync_rcu count too high");
73 ASSERT_NEQ(run_count, 0, "run_count");
74 ASSERT_EQ(skel->bss->valid_ptr_count, 0, "valid_ptr_count");
75 ASSERT_NEQ(skel->bss->null_ptr_count, 0, "null_ptr_count");
76 out:
77 task_local_storage_exit_creds__destroy(skel);
78 }
79
test_recursion(void)80 static void test_recursion(void)
81 {
82 int err, map_fd, prog_fd, task_fd;
83 struct task_ls_recursion *skel;
84 struct bpf_prog_info info;
85 __u32 info_len = sizeof(info);
86 long value;
87
88 task_fd = sys_pidfd_open(getpid(), 0);
89 if (!ASSERT_NEQ(task_fd, -1, "sys_pidfd_open"))
90 return;
91
92 skel = task_ls_recursion__open_and_load();
93 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
94 goto out;
95
96 err = task_ls_recursion__attach(skel);
97 if (!ASSERT_OK(err, "skel_attach"))
98 goto out;
99
100 /* trigger sys_enter, make sure it does not cause deadlock */
101 skel->bss->test_pid = getpid();
102 syscall(SYS_gettid);
103 skel->bss->test_pid = 0;
104 task_ls_recursion__detach(skel);
105
106 /* Refer to the comment in BPF_PROG(on_update) for
107 * the explanation on the value 201 and 100.
108 */
109 map_fd = bpf_map__fd(skel->maps.map_a);
110 err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
111 ASSERT_OK(err, "lookup map_a");
112 ASSERT_EQ(value, 201, "map_a value");
113 ASSERT_EQ(skel->bss->nr_del_errs, 1, "bpf_task_storage_delete busy");
114
115 map_fd = bpf_map__fd(skel->maps.map_b);
116 err = bpf_map_lookup_elem(map_fd, &task_fd, &value);
117 ASSERT_OK(err, "lookup map_b");
118 ASSERT_EQ(value, 100, "map_b value");
119
120 prog_fd = bpf_program__fd(skel->progs.on_lookup);
121 memset(&info, 0, sizeof(info));
122 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
123 ASSERT_OK(err, "get prog info");
124 ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion");
125
126 prog_fd = bpf_program__fd(skel->progs.on_update);
127 memset(&info, 0, sizeof(info));
128 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
129 ASSERT_OK(err, "get prog info");
130 ASSERT_EQ(info.recursion_misses, 0, "on_update prog recursion");
131
132 prog_fd = bpf_program__fd(skel->progs.on_enter);
133 memset(&info, 0, sizeof(info));
134 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
135 ASSERT_OK(err, "get prog info");
136 ASSERT_EQ(info.recursion_misses, 0, "on_enter prog recursion");
137
138 out:
139 close(task_fd);
140 task_ls_recursion__destroy(skel);
141 }
142
143 static bool stop;
144
waitall(const pthread_t * tids,int nr)145 static void waitall(const pthread_t *tids, int nr)
146 {
147 int i;
148
149 stop = true;
150 for (i = 0; i < nr; i++)
151 pthread_join(tids[i], NULL);
152 }
153
sock_create_loop(void * arg)154 static void *sock_create_loop(void *arg)
155 {
156 struct task_storage_nodeadlock *skel = arg;
157 int fd;
158
159 while (!stop) {
160 fd = socket(AF_INET, SOCK_STREAM, 0);
161 close(fd);
162 if (skel->bss->nr_get_errs || skel->bss->nr_del_errs)
163 stop = true;
164 }
165
166 return NULL;
167 }
168
test_nodeadlock(void)169 static void test_nodeadlock(void)
170 {
171 struct task_storage_nodeadlock *skel;
172 struct bpf_prog_info info = {};
173 __u32 info_len = sizeof(info);
174 const int nr_threads = 32;
175 pthread_t tids[nr_threads];
176 int i, prog_fd, err;
177 cpu_set_t old, new;
178
179 /* Pin all threads to one cpu to increase the chance of preemption
180 * in a sleepable bpf prog.
181 */
182 CPU_ZERO(&new);
183 CPU_SET(0, &new);
184 err = sched_getaffinity(getpid(), sizeof(old), &old);
185 if (!ASSERT_OK(err, "getaffinity"))
186 return;
187 err = sched_setaffinity(getpid(), sizeof(new), &new);
188 if (!ASSERT_OK(err, "setaffinity"))
189 return;
190
191 skel = task_storage_nodeadlock__open_and_load();
192 if (!ASSERT_OK_PTR(skel, "open_and_load"))
193 goto done;
194
195 /* Unnecessary recursion and deadlock detection are reproducible
196 * in the preemptible kernel.
197 */
198 if (!skel->kconfig->CONFIG_PREEMPT) {
199 test__skip();
200 goto done;
201 }
202
203 err = task_storage_nodeadlock__attach(skel);
204 ASSERT_OK(err, "attach prog");
205
206 for (i = 0; i < nr_threads; i++) {
207 err = pthread_create(&tids[i], NULL, sock_create_loop, skel);
208 if (err) {
209 /* Only assert once here to avoid excessive
210 * PASS printing during test failure.
211 */
212 ASSERT_OK(err, "pthread_create");
213 waitall(tids, i);
214 goto done;
215 }
216 }
217
218 /* With 32 threads, 1s is enough to reproduce the issue */
219 sleep(1);
220 waitall(tids, nr_threads);
221
222 info_len = sizeof(info);
223 prog_fd = bpf_program__fd(skel->progs.socket_post_create);
224 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
225 ASSERT_OK(err, "get prog info");
226 ASSERT_EQ(info.recursion_misses, 0, "prog recursion");
227
228 ASSERT_EQ(skel->bss->nr_get_errs, 0, "bpf_task_storage_get busy");
229 ASSERT_EQ(skel->bss->nr_del_errs, 0, "bpf_task_storage_delete busy");
230
231 done:
232 task_storage_nodeadlock__destroy(skel);
233 sched_setaffinity(getpid(), sizeof(old), &old);
234 }
235
test_task_local_storage(void)236 void test_task_local_storage(void)
237 {
238 if (test__start_subtest("sys_enter_exit"))
239 test_sys_enter_exit();
240 if (test__start_subtest("exit_creds"))
241 test_exit_creds();
242 if (test__start_subtest("recursion"))
243 test_recursion();
244 if (test__start_subtest("nodeadlock"))
245 test_nodeadlock();
246 }
247