1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
3 */
4 #define _GNU_SOURCE
5 #include "test_progs.h"
6 #include "testing_helpers.h"
7 #include "cgroup_helpers.h"
8 #include <argp.h>
9 #include <pthread.h>
10 #include <sched.h>
11 #include <signal.h>
12 #include <string.h>
13 #include <execinfo.h> /* backtrace */
14 #include <linux/membarrier.h>
15 #include <sys/sysinfo.h> /* get_nprocs */
16 #include <netinet/in.h>
17 #include <sys/select.h>
18 #include <sys/socket.h>
19 #include <sys/un.h>
20 #include <bpf/btf.h>
21
verbose(void)22 static bool verbose(void)
23 {
24 return env.verbosity > VERBOSE_NONE;
25 }
26
stdio_hijack_init(char ** log_buf,size_t * log_cnt)27 static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
28 {
29 #ifdef __GLIBC__
30 if (verbose() && env.worker_id == -1) {
31 /* nothing to do, output to stdout by default */
32 return;
33 }
34
35 fflush(stdout);
36 fflush(stderr);
37
38 stdout = open_memstream(log_buf, log_cnt);
39 if (!stdout) {
40 stdout = env.stdout;
41 perror("open_memstream");
42 return;
43 }
44
45 if (env.subtest_state)
46 env.subtest_state->stdout = stdout;
47 else
48 env.test_state->stdout = stdout;
49
50 stderr = stdout;
51 #endif
52 }
53
stdio_hijack(char ** log_buf,size_t * log_cnt)54 static void stdio_hijack(char **log_buf, size_t *log_cnt)
55 {
56 #ifdef __GLIBC__
57 if (verbose() && env.worker_id == -1) {
58 /* nothing to do, output to stdout by default */
59 return;
60 }
61
62 env.stdout = stdout;
63 env.stderr = stderr;
64
65 stdio_hijack_init(log_buf, log_cnt);
66 #endif
67 }
68
stdio_restore_cleanup(void)69 static void stdio_restore_cleanup(void)
70 {
71 #ifdef __GLIBC__
72 if (verbose() && env.worker_id == -1) {
73 /* nothing to do, output to stdout by default */
74 return;
75 }
76
77 fflush(stdout);
78
79 if (env.subtest_state) {
80 fclose(env.subtest_state->stdout);
81 env.subtest_state->stdout = NULL;
82 stdout = env.test_state->stdout;
83 stderr = env.test_state->stdout;
84 } else {
85 fclose(env.test_state->stdout);
86 env.test_state->stdout = NULL;
87 }
88 #endif
89 }
90
stdio_restore(void)91 static void stdio_restore(void)
92 {
93 #ifdef __GLIBC__
94 if (verbose() && env.worker_id == -1) {
95 /* nothing to do, output to stdout by default */
96 return;
97 }
98
99 if (stdout == env.stdout)
100 return;
101
102 stdio_restore_cleanup();
103
104 stdout = env.stdout;
105 stderr = env.stderr;
106 #endif
107 }
108
109 /* Adapted from perf/util/string.c */
glob_match(const char * str,const char * pat)110 static bool glob_match(const char *str, const char *pat)
111 {
112 while (*str && *pat && *pat != '*') {
113 if (*str != *pat)
114 return false;
115 str++;
116 pat++;
117 }
118 /* Check wild card */
119 if (*pat == '*') {
120 while (*pat == '*')
121 pat++;
122 if (!*pat) /* Tail wild card matches all */
123 return true;
124 while (*str)
125 if (glob_match(str++, pat))
126 return true;
127 }
128 return !*str && !*pat;
129 }
130
131 #define EXIT_NO_TEST 2
132 #define EXIT_ERR_SETUP_INFRA 3
133
134 /* defined in test_progs.h */
135 struct test_env env = {};
136
137 struct prog_test_def {
138 const char *test_name;
139 int test_num;
140 void (*run_test)(void);
141 void (*run_serial_test)(void);
142 bool should_run;
143 bool need_cgroup_cleanup;
144 };
145
146 /* Override C runtime library's usleep() implementation to ensure nanosleep()
147 * is always called. Usleep is frequently used in selftests as a way to
148 * trigger kprobe and tracepoints.
149 */
usleep(useconds_t usec)150 int usleep(useconds_t usec)
151 {
152 struct timespec ts = {
153 .tv_sec = usec / 1000000,
154 .tv_nsec = (usec % 1000000) * 1000,
155 };
156
157 return syscall(__NR_nanosleep, &ts, NULL);
158 }
159
should_run(struct test_selector * sel,int num,const char * name)160 static bool should_run(struct test_selector *sel, int num, const char *name)
161 {
162 int i;
163
164 for (i = 0; i < sel->blacklist.cnt; i++) {
165 if (glob_match(name, sel->blacklist.tests[i].name) &&
166 !sel->blacklist.tests[i].subtest_cnt)
167 return false;
168 }
169
170 for (i = 0; i < sel->whitelist.cnt; i++) {
171 if (glob_match(name, sel->whitelist.tests[i].name))
172 return true;
173 }
174
175 if (!sel->whitelist.cnt && !sel->num_set)
176 return true;
177
178 return num < sel->num_set_len && sel->num_set[num];
179 }
180
should_run_subtest(struct test_selector * sel,struct test_selector * subtest_sel,int subtest_num,const char * test_name,const char * subtest_name)181 static bool should_run_subtest(struct test_selector *sel,
182 struct test_selector *subtest_sel,
183 int subtest_num,
184 const char *test_name,
185 const char *subtest_name)
186 {
187 int i, j;
188
189 for (i = 0; i < sel->blacklist.cnt; i++) {
190 if (glob_match(test_name, sel->blacklist.tests[i].name)) {
191 if (!sel->blacklist.tests[i].subtest_cnt)
192 return false;
193
194 for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
195 if (glob_match(subtest_name,
196 sel->blacklist.tests[i].subtests[j]))
197 return false;
198 }
199 }
200 }
201
202 for (i = 0; i < sel->whitelist.cnt; i++) {
203 if (glob_match(test_name, sel->whitelist.tests[i].name)) {
204 if (!sel->whitelist.tests[i].subtest_cnt)
205 return true;
206
207 for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
208 if (glob_match(subtest_name,
209 sel->whitelist.tests[i].subtests[j]))
210 return true;
211 }
212 }
213 }
214
215 if (!sel->whitelist.cnt && !subtest_sel->num_set)
216 return true;
217
218 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
219 }
220
test_result(bool failed,bool skipped)221 static char *test_result(bool failed, bool skipped)
222 {
223 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
224 }
225
226 #define TEST_NUM_WIDTH 7
227
print_test_result(const struct prog_test_def * test,const struct test_state * test_state)228 static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
229 {
230 int skipped_cnt = test_state->skip_cnt;
231 int subtests_cnt = test_state->subtest_num;
232
233 fprintf(env.stdout, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
234 if (test_state->error_cnt)
235 fprintf(env.stdout, "FAIL");
236 else if (!skipped_cnt)
237 fprintf(env.stdout, "OK");
238 else if (skipped_cnt == subtests_cnt || !subtests_cnt)
239 fprintf(env.stdout, "SKIP");
240 else
241 fprintf(env.stdout, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
242
243 fprintf(env.stdout, "\n");
244 }
245
print_test_log(char * log_buf,size_t log_cnt)246 static void print_test_log(char *log_buf, size_t log_cnt)
247 {
248 log_buf[log_cnt] = '\0';
249 fprintf(env.stdout, "%s", log_buf);
250 if (log_buf[log_cnt - 1] != '\n')
251 fprintf(env.stdout, "\n");
252 }
253
print_subtest_name(int test_num,int subtest_num,const char * test_name,char * subtest_name,char * result)254 static void print_subtest_name(int test_num, int subtest_num,
255 const char *test_name, char *subtest_name,
256 char *result)
257 {
258 char test_num_str[TEST_NUM_WIDTH + 1];
259
260 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
261
262 fprintf(env.stdout, "#%-*s %s/%s",
263 TEST_NUM_WIDTH, test_num_str,
264 test_name, subtest_name);
265
266 if (result)
267 fprintf(env.stdout, ":%s", result);
268
269 fprintf(env.stdout, "\n");
270 }
271
dump_test_log(const struct prog_test_def * test,const struct test_state * test_state,bool skip_ok_subtests,bool par_exec_result)272 static void dump_test_log(const struct prog_test_def *test,
273 const struct test_state *test_state,
274 bool skip_ok_subtests,
275 bool par_exec_result)
276 {
277 bool test_failed = test_state->error_cnt > 0;
278 bool force_log = test_state->force_log;
279 bool print_test = verbose() || force_log || test_failed;
280 int i;
281 struct subtest_state *subtest_state;
282 bool subtest_failed;
283 bool subtest_filtered;
284 bool print_subtest;
285
286 /* we do not print anything in the worker thread */
287 if (env.worker_id != -1)
288 return;
289
290 /* there is nothing to print when verbose log is used and execution
291 * is not in parallel mode
292 */
293 if (verbose() && !par_exec_result)
294 return;
295
296 if (test_state->log_cnt && print_test)
297 print_test_log(test_state->log_buf, test_state->log_cnt);
298
299 for (i = 0; i < test_state->subtest_num; i++) {
300 subtest_state = &test_state->subtest_states[i];
301 subtest_failed = subtest_state->error_cnt;
302 subtest_filtered = subtest_state->filtered;
303 print_subtest = verbose() || force_log || subtest_failed;
304
305 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
306 continue;
307
308 if (subtest_state->log_cnt && print_subtest) {
309 print_test_log(subtest_state->log_buf,
310 subtest_state->log_cnt);
311 }
312
313 print_subtest_name(test->test_num, i + 1,
314 test->test_name, subtest_state->name,
315 test_result(subtest_state->error_cnt,
316 subtest_state->skipped));
317 }
318
319 print_test_result(test, test_state);
320 }
321
322 static void stdio_restore(void);
323
324 /* A bunch of tests set custom affinity per-thread and/or per-process. Reset
325 * it after each test/sub-test.
326 */
reset_affinity(void)327 static void reset_affinity(void)
328 {
329 cpu_set_t cpuset;
330 int i, err;
331
332 CPU_ZERO(&cpuset);
333 for (i = 0; i < env.nr_cpus; i++)
334 CPU_SET(i, &cpuset);
335
336 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
337 if (err < 0) {
338 stdio_restore();
339 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
340 exit(EXIT_ERR_SETUP_INFRA);
341 }
342 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
343 if (err < 0) {
344 stdio_restore();
345 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
346 exit(EXIT_ERR_SETUP_INFRA);
347 }
348 }
349
save_netns(void)350 static void save_netns(void)
351 {
352 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
353 if (env.saved_netns_fd == -1) {
354 perror("open(/proc/self/ns/net)");
355 exit(EXIT_ERR_SETUP_INFRA);
356 }
357 }
358
restore_netns(void)359 static void restore_netns(void)
360 {
361 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
362 stdio_restore();
363 perror("setns(CLONE_NEWNS)");
364 exit(EXIT_ERR_SETUP_INFRA);
365 }
366 }
367
test__end_subtest(void)368 void test__end_subtest(void)
369 {
370 struct prog_test_def *test = env.test;
371 struct test_state *test_state = env.test_state;
372 struct subtest_state *subtest_state = env.subtest_state;
373
374 if (subtest_state->error_cnt) {
375 test_state->error_cnt++;
376 } else {
377 if (!subtest_state->skipped)
378 test_state->sub_succ_cnt++;
379 else
380 test_state->skip_cnt++;
381 }
382
383 if (verbose() && !env.workers)
384 print_subtest_name(test->test_num, test_state->subtest_num,
385 test->test_name, subtest_state->name,
386 test_result(subtest_state->error_cnt,
387 subtest_state->skipped));
388
389 stdio_restore_cleanup();
390 env.subtest_state = NULL;
391 }
392
test__start_subtest(const char * subtest_name)393 bool test__start_subtest(const char *subtest_name)
394 {
395 struct prog_test_def *test = env.test;
396 struct test_state *state = env.test_state;
397 struct subtest_state *subtest_state;
398 size_t sub_state_size = sizeof(*subtest_state);
399
400 if (env.subtest_state)
401 test__end_subtest();
402
403 state->subtest_num++;
404 state->subtest_states =
405 realloc(state->subtest_states,
406 state->subtest_num * sub_state_size);
407 if (!state->subtest_states) {
408 fprintf(stderr, "Not enough memory to allocate subtest result\n");
409 return false;
410 }
411
412 subtest_state = &state->subtest_states[state->subtest_num - 1];
413
414 memset(subtest_state, 0, sub_state_size);
415
416 if (!subtest_name || !subtest_name[0]) {
417 fprintf(env.stderr,
418 "Subtest #%d didn't provide sub-test name!\n",
419 state->subtest_num);
420 return false;
421 }
422
423 subtest_state->name = strdup(subtest_name);
424 if (!subtest_state->name) {
425 fprintf(env.stderr,
426 "Subtest #%d: failed to copy subtest name!\n",
427 state->subtest_num);
428 return false;
429 }
430
431 if (!should_run_subtest(&env.test_selector,
432 &env.subtest_selector,
433 state->subtest_num,
434 test->test_name,
435 subtest_name)) {
436 subtest_state->filtered = true;
437 return false;
438 }
439
440 env.subtest_state = subtest_state;
441 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
442
443 return true;
444 }
445
test__force_log(void)446 void test__force_log(void)
447 {
448 env.test_state->force_log = true;
449 }
450
test__skip(void)451 void test__skip(void)
452 {
453 if (env.subtest_state)
454 env.subtest_state->skipped = true;
455 else
456 env.test_state->skip_cnt++;
457 }
458
test__fail(void)459 void test__fail(void)
460 {
461 if (env.subtest_state)
462 env.subtest_state->error_cnt++;
463 else
464 env.test_state->error_cnt++;
465 }
466
test__join_cgroup(const char * path)467 int test__join_cgroup(const char *path)
468 {
469 int fd;
470
471 if (!env.test->need_cgroup_cleanup) {
472 if (setup_cgroup_environment()) {
473 fprintf(stderr,
474 "#%d %s: Failed to setup cgroup environment\n",
475 env.test->test_num, env.test->test_name);
476 return -1;
477 }
478
479 env.test->need_cgroup_cleanup = true;
480 }
481
482 fd = create_and_get_cgroup(path);
483 if (fd < 0) {
484 fprintf(stderr,
485 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
486 env.test->test_num, env.test->test_name, path, errno);
487 return fd;
488 }
489
490 if (join_cgroup(path)) {
491 fprintf(stderr,
492 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
493 env.test->test_num, env.test->test_name, path, errno);
494 return -1;
495 }
496
497 return fd;
498 }
499
bpf_find_map(const char * test,struct bpf_object * obj,const char * name)500 int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
501 {
502 struct bpf_map *map;
503
504 map = bpf_object__find_map_by_name(obj, name);
505 if (!map) {
506 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
507 test__fail();
508 return -1;
509 }
510 return bpf_map__fd(map);
511 }
512
is_jit_enabled(void)513 static bool is_jit_enabled(void)
514 {
515 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
516 bool enabled = false;
517 int sysctl_fd;
518
519 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
520 if (sysctl_fd != -1) {
521 char tmpc;
522
523 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
524 enabled = (tmpc != '0');
525 close(sysctl_fd);
526 }
527
528 return enabled;
529 }
530
compare_map_keys(int map1_fd,int map2_fd)531 int compare_map_keys(int map1_fd, int map2_fd)
532 {
533 __u32 key, next_key;
534 char val_buf[PERF_MAX_STACK_DEPTH *
535 sizeof(struct bpf_stack_build_id)];
536 int err;
537
538 err = bpf_map_get_next_key(map1_fd, NULL, &key);
539 if (err)
540 return err;
541 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
542 if (err)
543 return err;
544
545 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
546 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
547 if (err)
548 return err;
549
550 key = next_key;
551 }
552 if (errno != ENOENT)
553 return -1;
554
555 return 0;
556 }
557
compare_stack_ips(int smap_fd,int amap_fd,int stack_trace_len)558 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
559 {
560 __u32 key, next_key, *cur_key_p, *next_key_p;
561 char *val_buf1, *val_buf2;
562 int i, err = 0;
563
564 val_buf1 = malloc(stack_trace_len);
565 val_buf2 = malloc(stack_trace_len);
566 cur_key_p = NULL;
567 next_key_p = &key;
568 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
569 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
570 if (err)
571 goto out;
572 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
573 if (err)
574 goto out;
575 for (i = 0; i < stack_trace_len; i++) {
576 if (val_buf1[i] != val_buf2[i]) {
577 err = -1;
578 goto out;
579 }
580 }
581 key = *next_key_p;
582 cur_key_p = &key;
583 next_key_p = &next_key;
584 }
585 if (errno != ENOENT)
586 err = -1;
587
588 out:
589 free(val_buf1);
590 free(val_buf2);
591 return err;
592 }
593
extract_build_id(char * build_id,size_t size)594 int extract_build_id(char *build_id, size_t size)
595 {
596 FILE *fp;
597 char *line = NULL;
598 size_t len = 0;
599
600 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
601 if (fp == NULL)
602 return -1;
603
604 if (getline(&line, &len, fp) == -1)
605 goto err;
606 pclose(fp);
607
608 if (len > size)
609 len = size;
610 memcpy(build_id, line, len);
611 build_id[len] = '\0';
612 free(line);
613 return 0;
614 err:
615 pclose(fp);
616 return -1;
617 }
618
finit_module(int fd,const char * param_values,int flags)619 static int finit_module(int fd, const char *param_values, int flags)
620 {
621 return syscall(__NR_finit_module, fd, param_values, flags);
622 }
623
delete_module(const char * name,int flags)624 static int delete_module(const char *name, int flags)
625 {
626 return syscall(__NR_delete_module, name, flags);
627 }
628
629 /*
630 * Trigger synchronize_rcu() in kernel.
631 */
kern_sync_rcu(void)632 int kern_sync_rcu(void)
633 {
634 return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
635 }
636
unload_bpf_testmod(void)637 static void unload_bpf_testmod(void)
638 {
639 if (kern_sync_rcu())
640 fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n");
641 if (delete_module("bpf_testmod", 0)) {
642 if (errno == ENOENT) {
643 if (verbose())
644 fprintf(stdout, "bpf_testmod.ko is already unloaded.\n");
645 return;
646 }
647 fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno);
648 return;
649 }
650 if (verbose())
651 fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n");
652 }
653
load_bpf_testmod(void)654 static int load_bpf_testmod(void)
655 {
656 int fd;
657
658 /* ensure previous instance of the module is unloaded */
659 unload_bpf_testmod();
660
661 if (verbose())
662 fprintf(stdout, "Loading bpf_testmod.ko...\n");
663
664 fd = open("bpf_testmod.ko", O_RDONLY);
665 if (fd < 0) {
666 fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno);
667 return -ENOENT;
668 }
669 if (finit_module(fd, "", 0)) {
670 fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno);
671 close(fd);
672 return -EINVAL;
673 }
674 close(fd);
675
676 if (verbose())
677 fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n");
678 return 0;
679 }
680
681 /* extern declarations for test funcs */
682 #define DEFINE_TEST(name) \
683 extern void test_##name(void) __weak; \
684 extern void serial_test_##name(void) __weak;
685 #include <prog_tests/tests.h>
686 #undef DEFINE_TEST
687
688 static struct prog_test_def prog_test_defs[] = {
689 #define DEFINE_TEST(name) { \
690 .test_name = #name, \
691 .run_test = &test_##name, \
692 .run_serial_test = &serial_test_##name, \
693 },
694 #include <prog_tests/tests.h>
695 #undef DEFINE_TEST
696 };
697
698 static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
699
700 static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
701
702 const char *argp_program_version = "test_progs 0.1";
703 const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
704 static const char argp_program_doc[] = "BPF selftests test runner";
705
706 enum ARG_KEYS {
707 ARG_TEST_NUM = 'n',
708 ARG_TEST_NAME = 't',
709 ARG_TEST_NAME_BLACKLIST = 'b',
710 ARG_VERIFIER_STATS = 's',
711 ARG_VERBOSE = 'v',
712 ARG_GET_TEST_CNT = 'c',
713 ARG_LIST_TEST_NAMES = 'l',
714 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
715 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
716 ARG_NUM_WORKERS = 'j',
717 ARG_DEBUG = -1,
718 };
719
720 static const struct argp_option opts[] = {
721 { "num", ARG_TEST_NUM, "NUM", 0,
722 "Run test number NUM only " },
723 { "name", ARG_TEST_NAME, "NAMES", 0,
724 "Run tests with names containing any string from NAMES list" },
725 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
726 "Don't run tests with names containing any string from NAMES list" },
727 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
728 "Output verifier statistics", },
729 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
730 "Verbose output (use -vv or -vvv for progressively verbose output)" },
731 { "count", ARG_GET_TEST_CNT, NULL, 0,
732 "Get number of selected top-level tests " },
733 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
734 "List test names that would run (without running them) " },
735 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
736 "Run tests with name matching the pattern (supports '*' wildcard)." },
737 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
738 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
739 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
740 "Number of workers to run in parallel, default to number of cpus." },
741 { "debug", ARG_DEBUG, NULL, 0,
742 "print extra debug information for test_progs." },
743 {},
744 };
745
libbpf_print_fn(enum libbpf_print_level level,const char * format,va_list args)746 static int libbpf_print_fn(enum libbpf_print_level level,
747 const char *format, va_list args)
748 {
749 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
750 return 0;
751 vfprintf(stdout, format, args);
752 return 0;
753 }
754
free_test_filter_set(const struct test_filter_set * set)755 static void free_test_filter_set(const struct test_filter_set *set)
756 {
757 int i, j;
758
759 if (!set)
760 return;
761
762 for (i = 0; i < set->cnt; i++) {
763 free((void *)set->tests[i].name);
764 for (j = 0; j < set->tests[i].subtest_cnt; j++)
765 free((void *)set->tests[i].subtests[j]);
766
767 free((void *)set->tests[i].subtests);
768 }
769
770 free((void *)set->tests);
771 }
772
free_test_selector(struct test_selector * test_selector)773 static void free_test_selector(struct test_selector *test_selector)
774 {
775 free_test_filter_set(&test_selector->blacklist);
776 free_test_filter_set(&test_selector->whitelist);
777 free(test_selector->num_set);
778 }
779
780 extern int extra_prog_load_log_flags;
781
parse_arg(int key,char * arg,struct argp_state * state)782 static error_t parse_arg(int key, char *arg, struct argp_state *state)
783 {
784 struct test_env *env = state->input;
785
786 switch (key) {
787 case ARG_TEST_NUM: {
788 char *subtest_str = strchr(arg, '/');
789
790 if (subtest_str) {
791 *subtest_str = '\0';
792 if (parse_num_list(subtest_str + 1,
793 &env->subtest_selector.num_set,
794 &env->subtest_selector.num_set_len)) {
795 fprintf(stderr,
796 "Failed to parse subtest numbers.\n");
797 return -EINVAL;
798 }
799 }
800 if (parse_num_list(arg, &env->test_selector.num_set,
801 &env->test_selector.num_set_len)) {
802 fprintf(stderr, "Failed to parse test numbers.\n");
803 return -EINVAL;
804 }
805 break;
806 }
807 case ARG_TEST_NAME_GLOB_ALLOWLIST:
808 case ARG_TEST_NAME: {
809 if (parse_test_list(arg,
810 &env->test_selector.whitelist,
811 key == ARG_TEST_NAME_GLOB_ALLOWLIST))
812 return -ENOMEM;
813 break;
814 }
815 case ARG_TEST_NAME_GLOB_DENYLIST:
816 case ARG_TEST_NAME_BLACKLIST: {
817 if (parse_test_list(arg,
818 &env->test_selector.blacklist,
819 key == ARG_TEST_NAME_GLOB_DENYLIST))
820 return -ENOMEM;
821 break;
822 }
823 case ARG_VERIFIER_STATS:
824 env->verifier_stats = true;
825 break;
826 case ARG_VERBOSE:
827 env->verbosity = VERBOSE_NORMAL;
828 if (arg) {
829 if (strcmp(arg, "v") == 0) {
830 env->verbosity = VERBOSE_VERY;
831 extra_prog_load_log_flags = 1;
832 } else if (strcmp(arg, "vv") == 0) {
833 env->verbosity = VERBOSE_SUPER;
834 extra_prog_load_log_flags = 2;
835 } else {
836 fprintf(stderr,
837 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
838 arg);
839 return -EINVAL;
840 }
841 }
842
843 if (verbose()) {
844 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
845 fprintf(stderr,
846 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
847 errno);
848 return -EINVAL;
849 }
850 }
851
852 break;
853 case ARG_GET_TEST_CNT:
854 env->get_test_cnt = true;
855 break;
856 case ARG_LIST_TEST_NAMES:
857 env->list_test_names = true;
858 break;
859 case ARG_NUM_WORKERS:
860 if (arg) {
861 env->workers = atoi(arg);
862 if (!env->workers) {
863 fprintf(stderr, "Invalid number of worker: %s.", arg);
864 return -EINVAL;
865 }
866 } else {
867 env->workers = get_nprocs();
868 }
869 break;
870 case ARG_DEBUG:
871 env->debug = true;
872 break;
873 case ARGP_KEY_ARG:
874 argp_usage(state);
875 break;
876 case ARGP_KEY_END:
877 break;
878 default:
879 return ARGP_ERR_UNKNOWN;
880 }
881 return 0;
882 }
883
884 /*
885 * Determine if test_progs is running as a "flavored" test runner and switch
886 * into corresponding sub-directory to load correct BPF objects.
887 *
888 * This is done by looking at executable name. If it contains "-flavor"
889 * suffix, then we are running as a flavored test runner.
890 */
cd_flavor_subdir(const char * exec_name)891 int cd_flavor_subdir(const char *exec_name)
892 {
893 /* General form of argv[0] passed here is:
894 * some/path/to/test_progs[-flavor], where -flavor part is optional.
895 * First cut out "test_progs[-flavor]" part, then extract "flavor"
896 * part, if it's there.
897 */
898 const char *flavor = strrchr(exec_name, '/');
899
900 if (!flavor)
901 flavor = exec_name;
902 else
903 flavor++;
904
905 flavor = strrchr(flavor, '-');
906 if (!flavor)
907 return 0;
908 flavor++;
909 if (verbose())
910 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
911
912 return chdir(flavor);
913 }
914
trigger_module_test_read(int read_sz)915 int trigger_module_test_read(int read_sz)
916 {
917 int fd, err;
918
919 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
920 err = -errno;
921 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
922 return err;
923
924 read(fd, NULL, read_sz);
925 close(fd);
926
927 return 0;
928 }
929
trigger_module_test_write(int write_sz)930 int trigger_module_test_write(int write_sz)
931 {
932 int fd, err;
933 char *buf = malloc(write_sz);
934
935 if (!buf)
936 return -ENOMEM;
937
938 memset(buf, 'a', write_sz);
939 buf[write_sz-1] = '\0';
940
941 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
942 err = -errno;
943 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
944 free(buf);
945 return err;
946 }
947
948 write(fd, buf, write_sz);
949 close(fd);
950 free(buf);
951 return 0;
952 }
953
write_sysctl(const char * sysctl,const char * value)954 int write_sysctl(const char *sysctl, const char *value)
955 {
956 int fd, err, len;
957
958 fd = open(sysctl, O_WRONLY);
959 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
960 return -1;
961
962 len = strlen(value);
963 err = write(fd, value, len);
964 close(fd);
965 if (!ASSERT_EQ(err, len, "write sysctl"))
966 return -1;
967
968 return 0;
969 }
970
get_bpf_max_tramp_links_from(struct btf * btf)971 int get_bpf_max_tramp_links_from(struct btf *btf)
972 {
973 const struct btf_enum *e;
974 const struct btf_type *t;
975 __u32 i, type_cnt;
976 const char *name;
977 __u16 j, vlen;
978
979 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
980 t = btf__type_by_id(btf, i);
981 if (!t || !btf_is_enum(t) || t->name_off)
982 continue;
983 e = btf_enum(t);
984 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
985 name = btf__str_by_offset(btf, e->name_off);
986 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
987 return e->val;
988 }
989 }
990
991 return -1;
992 }
993
get_bpf_max_tramp_links(void)994 int get_bpf_max_tramp_links(void)
995 {
996 struct btf *vmlinux_btf;
997 int ret;
998
999 vmlinux_btf = btf__load_vmlinux_btf();
1000 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
1001 return -1;
1002 ret = get_bpf_max_tramp_links_from(vmlinux_btf);
1003 btf__free(vmlinux_btf);
1004
1005 return ret;
1006 }
1007
1008 #define MAX_BACKTRACE_SZ 128
crash_handler(int signum)1009 void crash_handler(int signum)
1010 {
1011 void *bt[MAX_BACKTRACE_SZ];
1012 size_t sz;
1013
1014 sz = backtrace(bt, ARRAY_SIZE(bt));
1015
1016 if (env.stdout)
1017 stdio_restore();
1018 if (env.test) {
1019 env.test_state->error_cnt++;
1020 dump_test_log(env.test, env.test_state, true, false);
1021 }
1022 if (env.worker_id != -1)
1023 fprintf(stderr, "[%d]: ", env.worker_id);
1024 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
1025 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1026 }
1027
sigint_handler(int signum)1028 static void sigint_handler(int signum)
1029 {
1030 int i;
1031
1032 for (i = 0; i < env.workers; i++)
1033 if (env.worker_socks[i] > 0)
1034 close(env.worker_socks[i]);
1035 }
1036
1037 static int current_test_idx;
1038 static pthread_mutex_t current_test_lock;
1039 static pthread_mutex_t stdout_output_lock;
1040
str_msg(const struct msg * msg,char * buf)1041 static inline const char *str_msg(const struct msg *msg, char *buf)
1042 {
1043 switch (msg->type) {
1044 case MSG_DO_TEST:
1045 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1046 break;
1047 case MSG_TEST_DONE:
1048 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1049 msg->test_done.num,
1050 msg->test_done.have_log);
1051 break;
1052 case MSG_SUBTEST_DONE:
1053 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1054 msg->subtest_done.num,
1055 msg->subtest_done.have_log);
1056 break;
1057 case MSG_TEST_LOG:
1058 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1059 strlen(msg->test_log.log_buf),
1060 msg->test_log.is_last);
1061 break;
1062 case MSG_EXIT:
1063 sprintf(buf, "MSG_EXIT");
1064 break;
1065 default:
1066 sprintf(buf, "UNKNOWN");
1067 break;
1068 }
1069
1070 return buf;
1071 }
1072
send_message(int sock,const struct msg * msg)1073 static int send_message(int sock, const struct msg *msg)
1074 {
1075 char buf[256];
1076
1077 if (env.debug)
1078 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1079 return send(sock, msg, sizeof(*msg), 0);
1080 }
1081
recv_message(int sock,struct msg * msg)1082 static int recv_message(int sock, struct msg *msg)
1083 {
1084 int ret;
1085 char buf[256];
1086
1087 memset(msg, 0, sizeof(*msg));
1088 ret = recv(sock, msg, sizeof(*msg), 0);
1089 if (ret >= 0) {
1090 if (env.debug)
1091 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1092 }
1093 return ret;
1094 }
1095
run_one_test(int test_num)1096 static void run_one_test(int test_num)
1097 {
1098 struct prog_test_def *test = &prog_test_defs[test_num];
1099 struct test_state *state = &test_states[test_num];
1100
1101 env.test = test;
1102 env.test_state = state;
1103
1104 stdio_hijack(&state->log_buf, &state->log_cnt);
1105
1106 if (test->run_test)
1107 test->run_test();
1108 else if (test->run_serial_test)
1109 test->run_serial_test();
1110
1111 /* ensure last sub-test is finalized properly */
1112 if (env.subtest_state)
1113 test__end_subtest();
1114
1115 state->tested = true;
1116
1117 if (verbose() && env.worker_id == -1)
1118 print_test_result(test, state);
1119
1120 reset_affinity();
1121 restore_netns();
1122 if (test->need_cgroup_cleanup)
1123 cleanup_cgroup_environment();
1124
1125 stdio_restore();
1126
1127 dump_test_log(test, state, false, false);
1128 }
1129
1130 struct dispatch_data {
1131 int worker_id;
1132 int sock_fd;
1133 };
1134
read_prog_test_msg(int sock_fd,struct msg * msg,enum msg_type type)1135 static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1136 {
1137 if (recv_message(sock_fd, msg) < 0)
1138 return 1;
1139
1140 if (msg->type != type) {
1141 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1142 return 1;
1143 }
1144
1145 return 0;
1146 }
1147
dispatch_thread_read_log(int sock_fd,char ** log_buf,size_t * log_cnt)1148 static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1149 {
1150 FILE *log_fp = NULL;
1151 int result = 0;
1152
1153 log_fp = open_memstream(log_buf, log_cnt);
1154 if (!log_fp)
1155 return 1;
1156
1157 while (true) {
1158 struct msg msg;
1159
1160 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1161 result = 1;
1162 goto out;
1163 }
1164
1165 fprintf(log_fp, "%s", msg.test_log.log_buf);
1166 if (msg.test_log.is_last)
1167 break;
1168 }
1169
1170 out:
1171 fclose(log_fp);
1172 log_fp = NULL;
1173 return result;
1174 }
1175
dispatch_thread_send_subtests(int sock_fd,struct test_state * state)1176 static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1177 {
1178 struct msg msg;
1179 struct subtest_state *subtest_state;
1180 int subtest_num = state->subtest_num;
1181
1182 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1183
1184 for (int i = 0; i < subtest_num; i++) {
1185 subtest_state = &state->subtest_states[i];
1186
1187 memset(subtest_state, 0, sizeof(*subtest_state));
1188
1189 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1190 return 1;
1191
1192 subtest_state->name = strdup(msg.subtest_done.name);
1193 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1194 subtest_state->skipped = msg.subtest_done.skipped;
1195 subtest_state->filtered = msg.subtest_done.filtered;
1196
1197 /* collect all logs */
1198 if (msg.subtest_done.have_log)
1199 if (dispatch_thread_read_log(sock_fd,
1200 &subtest_state->log_buf,
1201 &subtest_state->log_cnt))
1202 return 1;
1203 }
1204
1205 return 0;
1206 }
1207
dispatch_thread(void * ctx)1208 static void *dispatch_thread(void *ctx)
1209 {
1210 struct dispatch_data *data = ctx;
1211 int sock_fd;
1212
1213 sock_fd = data->sock_fd;
1214
1215 while (true) {
1216 int test_to_run = -1;
1217 struct prog_test_def *test;
1218 struct test_state *state;
1219
1220 /* grab a test */
1221 {
1222 pthread_mutex_lock(¤t_test_lock);
1223
1224 if (current_test_idx >= prog_test_cnt) {
1225 pthread_mutex_unlock(¤t_test_lock);
1226 goto done;
1227 }
1228
1229 test = &prog_test_defs[current_test_idx];
1230 test_to_run = current_test_idx;
1231 current_test_idx++;
1232
1233 pthread_mutex_unlock(¤t_test_lock);
1234 }
1235
1236 if (!test->should_run || test->run_serial_test)
1237 continue;
1238
1239 /* run test through worker */
1240 {
1241 struct msg msg_do_test;
1242
1243 memset(&msg_do_test, 0, sizeof(msg_do_test));
1244 msg_do_test.type = MSG_DO_TEST;
1245 msg_do_test.do_test.num = test_to_run;
1246 if (send_message(sock_fd, &msg_do_test) < 0) {
1247 perror("Fail to send command");
1248 goto done;
1249 }
1250 env.worker_current_test[data->worker_id] = test_to_run;
1251 }
1252
1253 /* wait for test done */
1254 do {
1255 struct msg msg;
1256
1257 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1258 goto error;
1259 if (test_to_run != msg.test_done.num)
1260 goto error;
1261
1262 state = &test_states[test_to_run];
1263 state->tested = true;
1264 state->error_cnt = msg.test_done.error_cnt;
1265 state->skip_cnt = msg.test_done.skip_cnt;
1266 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1267 state->subtest_num = msg.test_done.subtest_num;
1268
1269 /* collect all logs */
1270 if (msg.test_done.have_log) {
1271 if (dispatch_thread_read_log(sock_fd,
1272 &state->log_buf,
1273 &state->log_cnt))
1274 goto error;
1275 }
1276
1277 /* collect all subtests and subtest logs */
1278 if (!state->subtest_num)
1279 break;
1280
1281 if (dispatch_thread_send_subtests(sock_fd, state))
1282 goto error;
1283 } while (false);
1284
1285 pthread_mutex_lock(&stdout_output_lock);
1286 dump_test_log(test, state, false, true);
1287 pthread_mutex_unlock(&stdout_output_lock);
1288 } /* while (true) */
1289 error:
1290 if (env.debug)
1291 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1292
1293 done:
1294 {
1295 struct msg msg_exit;
1296
1297 msg_exit.type = MSG_EXIT;
1298 if (send_message(sock_fd, &msg_exit) < 0) {
1299 if (env.debug)
1300 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1301 data->worker_id, strerror(errno));
1302 }
1303 }
1304 return NULL;
1305 }
1306
calculate_summary_and_print_errors(struct test_env * env)1307 static void calculate_summary_and_print_errors(struct test_env *env)
1308 {
1309 int i;
1310 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1311
1312 for (i = 0; i < prog_test_cnt; i++) {
1313 struct test_state *state = &test_states[i];
1314
1315 if (!state->tested)
1316 continue;
1317
1318 sub_succ_cnt += state->sub_succ_cnt;
1319 skip_cnt += state->skip_cnt;
1320
1321 if (state->error_cnt)
1322 fail_cnt++;
1323 else
1324 succ_cnt++;
1325 }
1326
1327 /*
1328 * We only print error logs summary when there are failed tests and
1329 * verbose mode is not enabled. Otherwise, results may be incosistent.
1330 *
1331 */
1332 if (!verbose() && fail_cnt) {
1333 printf("\nAll error logs:\n");
1334
1335 /* print error logs again */
1336 for (i = 0; i < prog_test_cnt; i++) {
1337 struct prog_test_def *test = &prog_test_defs[i];
1338 struct test_state *state = &test_states[i];
1339
1340 if (!state->tested || !state->error_cnt)
1341 continue;
1342
1343 dump_test_log(test, state, true, true);
1344 }
1345 }
1346
1347 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1348 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1349
1350 env->succ_cnt = succ_cnt;
1351 env->sub_succ_cnt = sub_succ_cnt;
1352 env->fail_cnt = fail_cnt;
1353 env->skip_cnt = skip_cnt;
1354 }
1355
server_main(void)1356 static void server_main(void)
1357 {
1358 pthread_t *dispatcher_threads;
1359 struct dispatch_data *data;
1360 struct sigaction sigact_int = {
1361 .sa_handler = sigint_handler,
1362 .sa_flags = SA_RESETHAND,
1363 };
1364 int i;
1365
1366 sigaction(SIGINT, &sigact_int, NULL);
1367
1368 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1369 data = calloc(sizeof(struct dispatch_data), env.workers);
1370
1371 env.worker_current_test = calloc(sizeof(int), env.workers);
1372 for (i = 0; i < env.workers; i++) {
1373 int rc;
1374
1375 data[i].worker_id = i;
1376 data[i].sock_fd = env.worker_socks[i];
1377 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1378 if (rc < 0) {
1379 perror("Failed to launch dispatcher thread");
1380 exit(EXIT_ERR_SETUP_INFRA);
1381 }
1382 }
1383
1384 /* wait for all dispatcher to finish */
1385 for (i = 0; i < env.workers; i++) {
1386 while (true) {
1387 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1388
1389 if (!ret) {
1390 break;
1391 } else if (ret == EBUSY) {
1392 if (env.debug)
1393 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1394 i, env.worker_current_test[i] + 1);
1395 usleep(1000 * 1000);
1396 continue;
1397 } else {
1398 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1399 break;
1400 }
1401 }
1402 }
1403 free(dispatcher_threads);
1404 free(env.worker_current_test);
1405 free(data);
1406
1407 /* run serial tests */
1408 save_netns();
1409
1410 for (int i = 0; i < prog_test_cnt; i++) {
1411 struct prog_test_def *test = &prog_test_defs[i];
1412
1413 if (!test->should_run || !test->run_serial_test)
1414 continue;
1415
1416 run_one_test(i);
1417 }
1418
1419 /* generate summary */
1420 fflush(stderr);
1421 fflush(stdout);
1422
1423 calculate_summary_and_print_errors(&env);
1424
1425 /* reap all workers */
1426 for (i = 0; i < env.workers; i++) {
1427 int wstatus, pid;
1428
1429 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1430 if (pid != env.worker_pids[i])
1431 perror("Unable to reap worker");
1432 }
1433 }
1434
worker_main_send_log(int sock,char * log_buf,size_t log_cnt)1435 static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1436 {
1437 char *src;
1438 size_t slen;
1439
1440 src = log_buf;
1441 slen = log_cnt;
1442 while (slen) {
1443 struct msg msg_log;
1444 char *dest;
1445 size_t len;
1446
1447 memset(&msg_log, 0, sizeof(msg_log));
1448 msg_log.type = MSG_TEST_LOG;
1449 dest = msg_log.test_log.log_buf;
1450 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1451 memcpy(dest, src, len);
1452
1453 src += len;
1454 slen -= len;
1455 if (!slen)
1456 msg_log.test_log.is_last = true;
1457
1458 assert(send_message(sock, &msg_log) >= 0);
1459 }
1460 }
1461
free_subtest_state(struct subtest_state * state)1462 static void free_subtest_state(struct subtest_state *state)
1463 {
1464 if (state->log_buf) {
1465 free(state->log_buf);
1466 state->log_buf = NULL;
1467 state->log_cnt = 0;
1468 }
1469 free(state->name);
1470 state->name = NULL;
1471 }
1472
worker_main_send_subtests(int sock,struct test_state * state)1473 static int worker_main_send_subtests(int sock, struct test_state *state)
1474 {
1475 int i, result = 0;
1476 struct msg msg;
1477 struct subtest_state *subtest_state;
1478
1479 memset(&msg, 0, sizeof(msg));
1480 msg.type = MSG_SUBTEST_DONE;
1481
1482 for (i = 0; i < state->subtest_num; i++) {
1483 subtest_state = &state->subtest_states[i];
1484
1485 msg.subtest_done.num = i;
1486
1487 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1488
1489 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1490 msg.subtest_done.skipped = subtest_state->skipped;
1491 msg.subtest_done.filtered = subtest_state->filtered;
1492 msg.subtest_done.have_log = false;
1493
1494 if (verbose() || state->force_log || subtest_state->error_cnt) {
1495 if (subtest_state->log_cnt)
1496 msg.subtest_done.have_log = true;
1497 }
1498
1499 if (send_message(sock, &msg) < 0) {
1500 perror("Fail to send message done");
1501 result = 1;
1502 goto out;
1503 }
1504
1505 /* send logs */
1506 if (msg.subtest_done.have_log)
1507 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1508
1509 free_subtest_state(subtest_state);
1510 free(subtest_state->name);
1511 }
1512
1513 out:
1514 for (; i < state->subtest_num; i++)
1515 free_subtest_state(&state->subtest_states[i]);
1516 free(state->subtest_states);
1517 return result;
1518 }
1519
worker_main(int sock)1520 static int worker_main(int sock)
1521 {
1522 save_netns();
1523
1524 while (true) {
1525 /* receive command */
1526 struct msg msg;
1527
1528 if (recv_message(sock, &msg) < 0)
1529 goto out;
1530
1531 switch (msg.type) {
1532 case MSG_EXIT:
1533 if (env.debug)
1534 fprintf(stderr, "[%d]: worker exit.\n",
1535 env.worker_id);
1536 goto out;
1537 case MSG_DO_TEST: {
1538 int test_to_run = msg.do_test.num;
1539 struct prog_test_def *test = &prog_test_defs[test_to_run];
1540 struct test_state *state = &test_states[test_to_run];
1541 struct msg msg;
1542
1543 if (env.debug)
1544 fprintf(stderr, "[%d]: #%d:%s running.\n",
1545 env.worker_id,
1546 test_to_run + 1,
1547 test->test_name);
1548
1549 run_one_test(test_to_run);
1550
1551 memset(&msg, 0, sizeof(msg));
1552 msg.type = MSG_TEST_DONE;
1553 msg.test_done.num = test_to_run;
1554 msg.test_done.error_cnt = state->error_cnt;
1555 msg.test_done.skip_cnt = state->skip_cnt;
1556 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1557 msg.test_done.subtest_num = state->subtest_num;
1558 msg.test_done.have_log = false;
1559
1560 if (verbose() || state->force_log || state->error_cnt) {
1561 if (state->log_cnt)
1562 msg.test_done.have_log = true;
1563 }
1564 if (send_message(sock, &msg) < 0) {
1565 perror("Fail to send message done");
1566 goto out;
1567 }
1568
1569 /* send logs */
1570 if (msg.test_done.have_log)
1571 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1572
1573 if (state->log_buf) {
1574 free(state->log_buf);
1575 state->log_buf = NULL;
1576 state->log_cnt = 0;
1577 }
1578
1579 if (state->subtest_num)
1580 if (worker_main_send_subtests(sock, state))
1581 goto out;
1582
1583 if (env.debug)
1584 fprintf(stderr, "[%d]: #%d:%s done.\n",
1585 env.worker_id,
1586 test_to_run + 1,
1587 test->test_name);
1588 break;
1589 } /* case MSG_DO_TEST */
1590 default:
1591 if (env.debug)
1592 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1593 return -1;
1594 }
1595 }
1596 out:
1597 return 0;
1598 }
1599
free_test_states(void)1600 static void free_test_states(void)
1601 {
1602 int i, j;
1603
1604 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1605 struct test_state *test_state = &test_states[i];
1606
1607 for (j = 0; j < test_state->subtest_num; j++)
1608 free_subtest_state(&test_state->subtest_states[j]);
1609
1610 free(test_state->subtest_states);
1611 free(test_state->log_buf);
1612 test_state->subtest_states = NULL;
1613 test_state->log_buf = NULL;
1614 }
1615 }
1616
main(int argc,char ** argv)1617 int main(int argc, char **argv)
1618 {
1619 static const struct argp argp = {
1620 .options = opts,
1621 .parser = parse_arg,
1622 .doc = argp_program_doc,
1623 };
1624 struct sigaction sigact = {
1625 .sa_handler = crash_handler,
1626 .sa_flags = SA_RESETHAND,
1627 };
1628 int err, i;
1629
1630 sigaction(SIGSEGV, &sigact, NULL);
1631
1632 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1633 if (err)
1634 return err;
1635
1636 err = cd_flavor_subdir(argv[0]);
1637 if (err)
1638 return err;
1639
1640 /* Use libbpf 1.0 API mode */
1641 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1642 libbpf_set_print(libbpf_print_fn);
1643
1644 srand(time(NULL));
1645
1646 env.jit_enabled = is_jit_enabled();
1647 env.nr_cpus = libbpf_num_possible_cpus();
1648 if (env.nr_cpus < 0) {
1649 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1650 env.nr_cpus);
1651 return -1;
1652 }
1653
1654 env.stdout = stdout;
1655 env.stderr = stderr;
1656
1657 env.has_testmod = true;
1658 if (!env.list_test_names && load_bpf_testmod()) {
1659 fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1660 env.has_testmod = false;
1661 }
1662
1663 /* initializing tests */
1664 for (i = 0; i < prog_test_cnt; i++) {
1665 struct prog_test_def *test = &prog_test_defs[i];
1666
1667 test->test_num = i + 1;
1668 test->should_run = should_run(&env.test_selector,
1669 test->test_num, test->test_name);
1670
1671 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1672 (test->run_test != NULL && test->run_serial_test != NULL)) {
1673 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1674 test->test_num, test->test_name, test->test_name, test->test_name);
1675 exit(EXIT_ERR_SETUP_INFRA);
1676 }
1677 }
1678
1679 /* ignore workers if we are just listing */
1680 if (env.get_test_cnt || env.list_test_names)
1681 env.workers = 0;
1682
1683 /* launch workers if requested */
1684 env.worker_id = -1; /* main process */
1685 if (env.workers) {
1686 env.worker_pids = calloc(sizeof(__pid_t), env.workers);
1687 env.worker_socks = calloc(sizeof(int), env.workers);
1688 if (env.debug)
1689 fprintf(stdout, "Launching %d workers.\n", env.workers);
1690 for (i = 0; i < env.workers; i++) {
1691 int sv[2];
1692 pid_t pid;
1693
1694 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1695 perror("Fail to create worker socket");
1696 return -1;
1697 }
1698 pid = fork();
1699 if (pid < 0) {
1700 perror("Failed to fork worker");
1701 return -1;
1702 } else if (pid != 0) { /* main process */
1703 close(sv[1]);
1704 env.worker_pids[i] = pid;
1705 env.worker_socks[i] = sv[0];
1706 } else { /* inside each worker process */
1707 close(sv[0]);
1708 env.worker_id = i;
1709 return worker_main(sv[1]);
1710 }
1711 }
1712
1713 if (env.worker_id == -1) {
1714 server_main();
1715 goto out;
1716 }
1717 }
1718
1719 /* The rest of the main process */
1720
1721 /* on single mode */
1722 save_netns();
1723
1724 for (i = 0; i < prog_test_cnt; i++) {
1725 struct prog_test_def *test = &prog_test_defs[i];
1726
1727 if (!test->should_run)
1728 continue;
1729
1730 if (env.get_test_cnt) {
1731 env.succ_cnt++;
1732 continue;
1733 }
1734
1735 if (env.list_test_names) {
1736 fprintf(env.stdout, "%s\n", test->test_name);
1737 env.succ_cnt++;
1738 continue;
1739 }
1740
1741 run_one_test(i);
1742 }
1743
1744 if (env.get_test_cnt) {
1745 printf("%d\n", env.succ_cnt);
1746 goto out;
1747 }
1748
1749 if (env.list_test_names)
1750 goto out;
1751
1752 calculate_summary_and_print_errors(&env);
1753
1754 close(env.saved_netns_fd);
1755 out:
1756 if (!env.list_test_names && env.has_testmod)
1757 unload_bpf_testmod();
1758
1759 free_test_selector(&env.test_selector);
1760 free_test_selector(&env.subtest_selector);
1761 free_test_states();
1762
1763 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
1764 return EXIT_NO_TEST;
1765
1766 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1767 }
1768