1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
3 #include <test_progs.h>
4 #include "cgroup_helpers.h"
5 #include "cgroup_mprog.skel.h"
6
assert_mprog_count(int cg,int atype,int expected)7 static void assert_mprog_count(int cg, int atype, int expected)
8 {
9 __u32 count = 0, attach_flags = 0;
10 int err;
11
12 err = bpf_prog_query(cg, atype, 0, &attach_flags,
13 NULL, &count);
14 ASSERT_EQ(count, expected, "count");
15 ASSERT_EQ(err, 0, "prog_query");
16 }
17
test_prog_attach_detach(int atype)18 static void test_prog_attach_detach(int atype)
19 {
20 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
21 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
22 LIBBPF_OPTS(bpf_prog_query_opts, optq);
23 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
24 struct cgroup_mprog *skel;
25 __u32 prog_ids[10];
26 int cg, err;
27
28 cg = test__join_cgroup("/prog_attach_detach");
29 if (!ASSERT_GE(cg, 0, "join_cgroup /prog_attach_detach"))
30 return;
31
32 skel = cgroup_mprog__open_and_load();
33 if (!ASSERT_OK_PTR(skel, "skel_load"))
34 goto cleanup;
35
36 fd1 = bpf_program__fd(skel->progs.getsockopt_1);
37 fd2 = bpf_program__fd(skel->progs.getsockopt_2);
38 fd3 = bpf_program__fd(skel->progs.getsockopt_3);
39 fd4 = bpf_program__fd(skel->progs.getsockopt_4);
40
41 id1 = id_from_prog_fd(fd1);
42 id2 = id_from_prog_fd(fd2);
43 id3 = id_from_prog_fd(fd3);
44 id4 = id_from_prog_fd(fd4);
45
46 assert_mprog_count(cg, atype, 0);
47
48 LIBBPF_OPTS_RESET(opta,
49 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
50 .expected_revision = 1,
51 );
52
53 /* ordering: [fd1] */
54 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
55 if (!ASSERT_EQ(err, 0, "prog_attach"))
56 goto cleanup;
57
58 assert_mprog_count(cg, atype, 1);
59
60 LIBBPF_OPTS_RESET(opta,
61 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
62 .expected_revision = 2,
63 );
64
65 /* ordering: [fd2, fd1] */
66 err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
67 if (!ASSERT_EQ(err, 0, "prog_attach"))
68 goto cleanup1;
69
70 assert_mprog_count(cg, atype, 2);
71
72 LIBBPF_OPTS_RESET(opta,
73 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
74 .relative_fd = fd2,
75 .expected_revision = 3,
76 );
77
78 /* ordering: [fd2, fd3, fd1] */
79 err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
80 if (!ASSERT_EQ(err, 0, "prog_attach"))
81 goto cleanup2;
82
83 assert_mprog_count(cg, atype, 3);
84
85 LIBBPF_OPTS_RESET(opta,
86 .flags = BPF_F_ALLOW_MULTI,
87 .expected_revision = 4,
88 );
89
90 /* ordering: [fd2, fd3, fd1, fd4] */
91 err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
92 if (!ASSERT_EQ(err, 0, "prog_attach"))
93 goto cleanup3;
94
95 assert_mprog_count(cg, atype, 4);
96
97 /* retrieve optq.prog_cnt */
98 err = bpf_prog_query_opts(cg, atype, &optq);
99 if (!ASSERT_OK(err, "prog_query"))
100 goto cleanup4;
101
102 /* optq.prog_cnt will be used in below query */
103 memset(prog_ids, 0, sizeof(prog_ids));
104 optq.prog_ids = prog_ids;
105 err = bpf_prog_query_opts(cg, atype, &optq);
106 if (!ASSERT_OK(err, "prog_query"))
107 goto cleanup4;
108
109 ASSERT_EQ(optq.count, 4, "count");
110 ASSERT_EQ(optq.revision, 5, "revision");
111 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
112 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
113 ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
114 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
115 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
116 ASSERT_EQ(optq.link_ids, NULL, "link_ids");
117
118 cleanup4:
119 optd.expected_revision = 5;
120 err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
121 ASSERT_OK(err, "prog_detach");
122 assert_mprog_count(cg, atype, 3);
123
124 cleanup3:
125 LIBBPF_OPTS_RESET(optd);
126 err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
127 ASSERT_OK(err, "prog_detach");
128 assert_mprog_count(cg, atype, 2);
129
130 /* Check revision after two detach operations */
131 err = bpf_prog_query_opts(cg, atype, &optq);
132 ASSERT_OK(err, "prog_query");
133 ASSERT_EQ(optq.revision, 7, "revision");
134
135 cleanup2:
136 err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
137 ASSERT_OK(err, "prog_detach");
138 assert_mprog_count(cg, atype, 1);
139
140 cleanup1:
141 err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
142 ASSERT_OK(err, "prog_detach");
143 assert_mprog_count(cg, atype, 0);
144
145 cleanup:
146 cgroup_mprog__destroy(skel);
147 close(cg);
148 }
149
test_link_attach_detach(int atype)150 static void test_link_attach_detach(int atype)
151 {
152 LIBBPF_OPTS(bpf_cgroup_opts, opta);
153 LIBBPF_OPTS(bpf_cgroup_opts, optd);
154 LIBBPF_OPTS(bpf_prog_query_opts, optq);
155 struct bpf_link *link1, *link2, *link3, *link4;
156 __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
157 struct cgroup_mprog *skel;
158 __u32 prog_ids[10];
159 int cg, err;
160
161 cg = test__join_cgroup("/link_attach_detach");
162 if (!ASSERT_GE(cg, 0, "join_cgroup /link_attach_detach"))
163 return;
164
165 skel = cgroup_mprog__open_and_load();
166 if (!ASSERT_OK_PTR(skel, "skel_load"))
167 goto cleanup;
168
169 fd1 = bpf_program__fd(skel->progs.getsockopt_1);
170 fd2 = bpf_program__fd(skel->progs.getsockopt_2);
171 fd3 = bpf_program__fd(skel->progs.getsockopt_3);
172 fd4 = bpf_program__fd(skel->progs.getsockopt_4);
173
174 id1 = id_from_prog_fd(fd1);
175 id2 = id_from_prog_fd(fd2);
176 id3 = id_from_prog_fd(fd3);
177 id4 = id_from_prog_fd(fd4);
178
179 assert_mprog_count(cg, atype, 0);
180
181 LIBBPF_OPTS_RESET(opta,
182 .expected_revision = 1,
183 );
184
185 /* ordering: [fd1] */
186 link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
187 if (!ASSERT_OK_PTR(link1, "link_attach"))
188 goto cleanup;
189
190 assert_mprog_count(cg, atype, 1);
191
192 LIBBPF_OPTS_RESET(opta,
193 .flags = BPF_F_BEFORE | BPF_F_LINK,
194 .relative_id = id_from_link_fd(bpf_link__fd(link1)),
195 .expected_revision = 2,
196 );
197
198 /* ordering: [fd2, fd1] */
199 link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
200 if (!ASSERT_OK_PTR(link2, "link_attach"))
201 goto cleanup1;
202
203 assert_mprog_count(cg, atype, 2);
204
205 LIBBPF_OPTS_RESET(opta,
206 .flags = BPF_F_AFTER | BPF_F_LINK,
207 .relative_fd = bpf_link__fd(link2),
208 .expected_revision = 3,
209 );
210
211 /* ordering: [fd2, fd3, fd1] */
212 link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
213 if (!ASSERT_OK_PTR(link3, "link_attach"))
214 goto cleanup2;
215
216 assert_mprog_count(cg, atype, 3);
217
218 LIBBPF_OPTS_RESET(opta,
219 .expected_revision = 4,
220 );
221
222 /* ordering: [fd2, fd3, fd1, fd4] */
223 link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
224 if (!ASSERT_OK_PTR(link4, "link_attach"))
225 goto cleanup3;
226
227 assert_mprog_count(cg, atype, 4);
228
229 /* retrieve optq.prog_cnt */
230 err = bpf_prog_query_opts(cg, atype, &optq);
231 if (!ASSERT_OK(err, "prog_query"))
232 goto cleanup4;
233
234 /* optq.prog_cnt will be used in below query */
235 memset(prog_ids, 0, sizeof(prog_ids));
236 optq.prog_ids = prog_ids;
237 err = bpf_prog_query_opts(cg, atype, &optq);
238 if (!ASSERT_OK(err, "prog_query"))
239 goto cleanup4;
240
241 ASSERT_EQ(optq.count, 4, "count");
242 ASSERT_EQ(optq.revision, 5, "revision");
243 ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
244 ASSERT_EQ(optq.prog_ids[1], id3, "prog_ids[1]");
245 ASSERT_EQ(optq.prog_ids[2], id1, "prog_ids[2]");
246 ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
247 ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
248 ASSERT_EQ(optq.link_ids, NULL, "link_ids");
249
250 cleanup4:
251 bpf_link__destroy(link4);
252 assert_mprog_count(cg, atype, 3);
253
254 cleanup3:
255 bpf_link__destroy(link3);
256 assert_mprog_count(cg, atype, 2);
257
258 /* Check revision after two detach operations */
259 err = bpf_prog_query_opts(cg, atype, &optq);
260 ASSERT_OK(err, "prog_query");
261 ASSERT_EQ(optq.revision, 7, "revision");
262
263 cleanup2:
264 bpf_link__destroy(link2);
265 assert_mprog_count(cg, atype, 1);
266
267 cleanup1:
268 bpf_link__destroy(link1);
269 assert_mprog_count(cg, atype, 0);
270
271 cleanup:
272 cgroup_mprog__destroy(skel);
273 close(cg);
274 }
275
test_preorder_prog_attach_detach(int atype)276 static void test_preorder_prog_attach_detach(int atype)
277 {
278 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
279 LIBBPF_OPTS(bpf_prog_detach_opts, optd);
280 __u32 fd1, fd2, fd3, fd4;
281 struct cgroup_mprog *skel;
282 int cg, err;
283
284 cg = test__join_cgroup("/preorder_prog_attach_detach");
285 if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_prog_attach_detach"))
286 return;
287
288 skel = cgroup_mprog__open_and_load();
289 if (!ASSERT_OK_PTR(skel, "skel_load"))
290 goto cleanup;
291
292 fd1 = bpf_program__fd(skel->progs.getsockopt_1);
293 fd2 = bpf_program__fd(skel->progs.getsockopt_2);
294 fd3 = bpf_program__fd(skel->progs.getsockopt_3);
295 fd4 = bpf_program__fd(skel->progs.getsockopt_4);
296
297 assert_mprog_count(cg, atype, 0);
298
299 LIBBPF_OPTS_RESET(opta,
300 .flags = BPF_F_ALLOW_MULTI,
301 .expected_revision = 1,
302 );
303
304 /* ordering: [fd1] */
305 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
306 if (!ASSERT_EQ(err, 0, "prog_attach"))
307 goto cleanup;
308
309 assert_mprog_count(cg, atype, 1);
310
311 LIBBPF_OPTS_RESET(opta,
312 .flags = BPF_F_ALLOW_MULTI | BPF_F_PREORDER,
313 .expected_revision = 2,
314 );
315
316 /* ordering: [fd1, fd2] */
317 err = bpf_prog_attach_opts(fd2, cg, atype, &opta);
318 if (!ASSERT_EQ(err, 0, "prog_attach"))
319 goto cleanup1;
320
321 assert_mprog_count(cg, atype, 2);
322
323 LIBBPF_OPTS_RESET(opta,
324 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
325 .relative_fd = fd2,
326 .expected_revision = 3,
327 );
328
329 err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
330 if (!ASSERT_EQ(err, -EINVAL, "prog_attach"))
331 goto cleanup2;
332
333 assert_mprog_count(cg, atype, 2);
334
335 LIBBPF_OPTS_RESET(opta,
336 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_PREORDER,
337 .relative_fd = fd2,
338 .expected_revision = 3,
339 );
340
341 /* ordering: [fd1, fd2, fd3] */
342 err = bpf_prog_attach_opts(fd3, cg, atype, &opta);
343 if (!ASSERT_EQ(err, 0, "prog_attach"))
344 goto cleanup2;
345
346 assert_mprog_count(cg, atype, 3);
347
348 LIBBPF_OPTS_RESET(opta,
349 .flags = BPF_F_ALLOW_MULTI,
350 .expected_revision = 4,
351 );
352
353 /* ordering: [fd2, fd3, fd1, fd4] */
354 err = bpf_prog_attach_opts(fd4, cg, atype, &opta);
355 if (!ASSERT_EQ(err, 0, "prog_attach"))
356 goto cleanup3;
357
358 assert_mprog_count(cg, atype, 4);
359
360 err = bpf_prog_detach_opts(fd4, cg, atype, &optd);
361 ASSERT_OK(err, "prog_detach");
362 assert_mprog_count(cg, atype, 3);
363
364 cleanup3:
365 err = bpf_prog_detach_opts(fd3, cg, atype, &optd);
366 ASSERT_OK(err, "prog_detach");
367 assert_mprog_count(cg, atype, 2);
368
369 cleanup2:
370 err = bpf_prog_detach_opts(fd2, cg, atype, &optd);
371 ASSERT_OK(err, "prog_detach");
372 assert_mprog_count(cg, atype, 1);
373
374 cleanup1:
375 err = bpf_prog_detach_opts(fd1, cg, atype, &optd);
376 ASSERT_OK(err, "prog_detach");
377 assert_mprog_count(cg, atype, 0);
378
379 cleanup:
380 cgroup_mprog__destroy(skel);
381 close(cg);
382 }
383
test_preorder_link_attach_detach(int atype)384 static void test_preorder_link_attach_detach(int atype)
385 {
386 LIBBPF_OPTS(bpf_cgroup_opts, opta);
387 struct bpf_link *link1, *link2, *link3, *link4;
388 struct cgroup_mprog *skel;
389 __u32 fd2;
390 int cg;
391
392 cg = test__join_cgroup("/preorder_link_attach_detach");
393 if (!ASSERT_GE(cg, 0, "join_cgroup /preorder_link_attach_detach"))
394 return;
395
396 skel = cgroup_mprog__open_and_load();
397 if (!ASSERT_OK_PTR(skel, "skel_load"))
398 goto cleanup;
399
400 fd2 = bpf_program__fd(skel->progs.getsockopt_2);
401
402 assert_mprog_count(cg, atype, 0);
403
404 LIBBPF_OPTS_RESET(opta,
405 .expected_revision = 1,
406 );
407
408 /* ordering: [fd1] */
409 link1 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_1, cg, &opta);
410 if (!ASSERT_OK_PTR(link1, "link_attach"))
411 goto cleanup;
412
413 assert_mprog_count(cg, atype, 1);
414
415 LIBBPF_OPTS_RESET(opta,
416 .flags = BPF_F_PREORDER,
417 .expected_revision = 2,
418 );
419
420 /* ordering: [fd1, fd2] */
421 link2 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_2, cg, &opta);
422 if (!ASSERT_OK_PTR(link2, "link_attach"))
423 goto cleanup1;
424
425 assert_mprog_count(cg, atype, 2);
426
427 LIBBPF_OPTS_RESET(opta,
428 .flags = BPF_F_AFTER,
429 .relative_fd = fd2,
430 .expected_revision = 3,
431 );
432
433 link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
434 if (!ASSERT_ERR_PTR(link3, "link_attach"))
435 goto cleanup2;
436
437 assert_mprog_count(cg, atype, 2);
438
439 LIBBPF_OPTS_RESET(opta,
440 .flags = BPF_F_AFTER | BPF_F_PREORDER | BPF_F_LINK,
441 .relative_fd = bpf_link__fd(link2),
442 .expected_revision = 3,
443 );
444
445 /* ordering: [fd1, fd2, fd3] */
446 link3 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_3, cg, &opta);
447 if (!ASSERT_OK_PTR(link3, "link_attach"))
448 goto cleanup2;
449
450 assert_mprog_count(cg, atype, 3);
451
452 LIBBPF_OPTS_RESET(opta,
453 .expected_revision = 4,
454 );
455
456 /* ordering: [fd2, fd3, fd1, fd4] */
457 link4 = bpf_program__attach_cgroup_opts(skel->progs.getsockopt_4, cg, &opta);
458 if (!ASSERT_OK_PTR(link4, "prog_attach"))
459 goto cleanup3;
460
461 assert_mprog_count(cg, atype, 4);
462
463 bpf_link__destroy(link4);
464 assert_mprog_count(cg, atype, 3);
465
466 cleanup3:
467 bpf_link__destroy(link3);
468 assert_mprog_count(cg, atype, 2);
469
470 cleanup2:
471 bpf_link__destroy(link2);
472 assert_mprog_count(cg, atype, 1);
473
474 cleanup1:
475 bpf_link__destroy(link1);
476 assert_mprog_count(cg, atype, 0);
477
478 cleanup:
479 cgroup_mprog__destroy(skel);
480 close(cg);
481 }
482
test_invalid_attach_detach(int atype)483 static void test_invalid_attach_detach(int atype)
484 {
485 LIBBPF_OPTS(bpf_prog_attach_opts, opta);
486 __u32 fd1, fd2, id2;
487 struct cgroup_mprog *skel;
488 int cg, err;
489
490 cg = test__join_cgroup("/invalid_attach_detach");
491 if (!ASSERT_GE(cg, 0, "join_cgroup /invalid_attach_detach"))
492 return;
493
494 skel = cgroup_mprog__open_and_load();
495 if (!ASSERT_OK_PTR(skel, "skel_load"))
496 goto cleanup;
497
498 fd1 = bpf_program__fd(skel->progs.getsockopt_1);
499 fd2 = bpf_program__fd(skel->progs.getsockopt_2);
500
501 id2 = id_from_prog_fd(fd2);
502
503 assert_mprog_count(cg, atype, 0);
504
505 LIBBPF_OPTS_RESET(opta,
506 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
507 .relative_id = id2,
508 );
509
510 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
511 ASSERT_EQ(err, -EINVAL, "prog_attach");
512 assert_mprog_count(cg, atype, 0);
513
514 LIBBPF_OPTS_RESET(opta,
515 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_ID,
516 );
517
518 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
519 ASSERT_EQ(err, -ENOENT, "prog_attach");
520 assert_mprog_count(cg, atype, 0);
521
522 LIBBPF_OPTS_RESET(opta,
523 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER | BPF_F_ID,
524 );
525
526 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
527 ASSERT_EQ(err, -ENOENT, "prog_attach");
528 assert_mprog_count(cg, atype, 0);
529
530 LIBBPF_OPTS_RESET(opta,
531 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE | BPF_F_AFTER,
532 .relative_id = id2,
533 );
534
535 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
536 ASSERT_EQ(err, -EINVAL, "prog_attach");
537 assert_mprog_count(cg, atype, 0);
538
539 LIBBPF_OPTS_RESET(opta,
540 .flags = BPF_F_ALLOW_MULTI | BPF_F_LINK,
541 .relative_id = id2,
542 );
543
544 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
545 ASSERT_EQ(err, -EINVAL, "prog_attach");
546 assert_mprog_count(cg, atype, 0);
547
548 LIBBPF_OPTS_RESET(opta,
549 .flags = BPF_F_ALLOW_MULTI,
550 .relative_id = id2,
551 );
552
553 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
554 ASSERT_EQ(err, -EINVAL, "prog_attach");
555 assert_mprog_count(cg, atype, 0);
556
557 LIBBPF_OPTS_RESET(opta,
558 .flags = BPF_F_ALLOW_MULTI | BPF_F_BEFORE,
559 .relative_fd = fd1,
560 );
561
562 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
563 ASSERT_EQ(err, -ENOENT, "prog_attach");
564 assert_mprog_count(cg, atype, 0);
565
566 LIBBPF_OPTS_RESET(opta,
567 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
568 .relative_fd = fd1,
569 );
570
571 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
572 ASSERT_EQ(err, -ENOENT, "prog_attach");
573 assert_mprog_count(cg, atype, 0);
574
575 LIBBPF_OPTS_RESET(opta,
576 .flags = BPF_F_ALLOW_MULTI,
577 );
578
579 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
580 if (!ASSERT_EQ(err, 0, "prog_attach"))
581 goto cleanup;
582 assert_mprog_count(cg, atype, 1);
583
584 LIBBPF_OPTS_RESET(opta,
585 .flags = BPF_F_ALLOW_MULTI | BPF_F_AFTER,
586 );
587
588 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
589 ASSERT_EQ(err, -EINVAL, "prog_attach");
590 assert_mprog_count(cg, atype, 1);
591
592 LIBBPF_OPTS_RESET(opta,
593 .flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE | BPF_F_AFTER,
594 .replace_prog_fd = fd1,
595 );
596
597 err = bpf_prog_attach_opts(fd1, cg, atype, &opta);
598 ASSERT_EQ(err, -EINVAL, "prog_attach");
599 assert_mprog_count(cg, atype, 1);
600 cleanup:
601 cgroup_mprog__destroy(skel);
602 close(cg);
603 }
604
test_cgroup_mprog_opts(void)605 void test_cgroup_mprog_opts(void)
606 {
607 if (test__start_subtest("prog_attach_detach"))
608 test_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
609 if (test__start_subtest("link_attach_detach"))
610 test_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
611 if (test__start_subtest("preorder_prog_attach_detach"))
612 test_preorder_prog_attach_detach(BPF_CGROUP_GETSOCKOPT);
613 if (test__start_subtest("preorder_link_attach_detach"))
614 test_preorder_link_attach_detach(BPF_CGROUP_GETSOCKOPT);
615 if (test__start_subtest("invalid_attach_detach"))
616 test_invalid_attach_detach(BPF_CGROUP_GETSOCKOPT);
617 }
618