| /tools/testing/selftests/bpf/prog_tests/ |
| A D | verifier_log.c | 102 opts.log_size = 25; in verif_log_subtest() 131 opts.log_size = i; in verif_log_subtest() 189 opts.log_buf = NULL; in verif_log_subtest() 191 opts.log_size = 0; in verif_log_subtest() 211 opts.log_size = 0; in verif_log_subtest() 316 opts.log_size = 25; in verif_btf_log_subtest() 335 opts.log_size = i; in verif_btf_log_subtest() 375 opts.log_size = 0; in verif_btf_log_subtest() 383 opts.log_level = 1; in verif_btf_log_subtest() 394 opts.log_level = 1; in verif_btf_log_subtest() [all …]
|
| A D | local_kptr_stash.c | 11 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_local_kptr_stash_simple() 25 ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval"); in test_local_kptr_stash_simple() 32 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_local_kptr_stash_plain() 46 ASSERT_OK(opts.retval, "local_kptr_stash_add_plain retval"); in test_local_kptr_stash_plain() 53 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_local_kptr_stash_local_with_root() 74 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_local_kptr_stash_unstash() 88 ASSERT_OK(opts.retval, "local_kptr_stash_add_nodes retval"); in test_local_kptr_stash_unstash() 99 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_refcount_acquire_without_unstash() 112 &opts); in test_refcount_acquire_without_unstash() 118 ASSERT_OK(opts.retval, "stash_refcounted_node retval"); in test_refcount_acquire_without_unstash() [all …]
|
| A D | raw_tp_test_run.c | 16 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_raw_tp_test_run() 46 opts.ctx_in = args; in test_raw_tp_test_run() 47 opts.ctx_size_in = sizeof(__u64); in test_raw_tp_test_run() 49 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_raw_tp_test_run() 52 opts.ctx_size_in = sizeof(args); in test_raw_tp_test_run() 53 err = bpf_prog_test_run_opts(prog_fd, &opts); in test_raw_tp_test_run() 61 opts.cpu = i; in test_raw_tp_test_run() 62 opts.retval = 0; in test_raw_tp_test_run() 70 opts.cpu = 0xffffffff; in test_raw_tp_test_run() 76 opts.cpu = 1; in test_raw_tp_test_run() [all …]
|
| A D | log_buf.c | 107 opts.kernel_log_size = 0; in obj_load_log_buf() 108 opts.kernel_log_level = 1; in obj_load_log_buf() 164 opts.log_buf = log_buf; in bpf_prog_load_log_buf() 165 opts.log_size = log_buf_sz; in bpf_prog_load_log_buf() 169 opts.log_level = 0; in bpf_prog_load_log_buf() 179 opts.log_level = 2; in bpf_prog_load_log_buf() 189 opts.log_level = 0; in bpf_prog_load_log_buf() 223 opts.log_buf = log_buf; in bpf_btf_load_log_buf() 228 opts.log_level = 0; in bpf_btf_load_log_buf() 238 opts.log_level = 2; in bpf_btf_load_log_buf() [all …]
|
| A D | kprobe_multi_test.c | 155 if (opts) { in test_attach_api() 212 opts.syms = syms; in test_attach_api_syms() 254 opts.syms = syms; in test_attach_api_fails() 269 opts.syms = NULL; in test_attach_api_fails() 283 opts.addrs = NULL; in test_attach_api_fails() 284 opts.syms = NULL; in test_attach_api_fails() 299 opts.syms = NULL; in test_attach_api_fails() 300 opts.cnt = 0; in test_attach_api_fails() 314 opts.syms = NULL; in test_attach_api_fails() 681 opts.cnt = cnt; in test_kprobe_multi_bench_attach() [all …]
|
| A D | map_kptr.c | 12 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_map_kptr_success() 27 ASSERT_OK(opts.retval, "test_map_kptr_ref1 retval"); in test_map_kptr_success() 30 ASSERT_OK(opts.retval, "test_map_kptr_ref2 retval"); in test_map_kptr_success() 57 ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); in test_map_kptr_success() 65 ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); in test_map_kptr_success() 72 ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); in test_map_kptr_success() 79 ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); in test_map_kptr_success() 86 ASSERT_OK(opts.retval, "test_map_kptr_ref3 retval"); in test_map_kptr_success() 122 LIBBPF_OPTS(bpf_test_run_opts, opts); in kern_sync_rcu_tasks_trace() 125 &opts), "do_call_rcu_tasks_trace")) in kern_sync_rcu_tasks_trace() [all …]
|
| A D | kprobe_multi_testmod_test.c | 32 NULL, opts); in test_testmod_attach_api() 36 opts->retprobe = true; in test_testmod_attach_api() 39 NULL, opts); in test_testmod_attach_api() 52 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); in test_testmod_attach_api_addrs() 62 opts.addrs = (const unsigned long *) addrs; in test_testmod_attach_api_addrs() 63 opts.cnt = ARRAY_SIZE(addrs); in test_testmod_attach_api_addrs() 65 test_testmod_attach_api(&opts); in test_testmod_attach_api_addrs() 70 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); in test_testmod_attach_api_syms() 77 opts.syms = syms; in test_testmod_attach_api_syms() 78 opts.cnt = ARRAY_SIZE(syms); in test_testmod_attach_api_syms() [all …]
|
| A D | bpf_cookie.c | 29 opts.bpf_cookie = 0x1; in kprobe_subtest() 30 opts.retprobe = false; in kprobe_subtest() 36 opts.bpf_cookie = 0x2; in kprobe_subtest() 37 opts.retprobe = false; in kprobe_subtest() 44 opts.bpf_cookie = 0x10; in kprobe_subtest() 45 opts.retprobe = true; in kprobe_subtest() 51 opts.bpf_cookie = 0x20; in kprobe_subtest() 52 opts.retprobe = true; in kprobe_subtest() 211 opts.syms = syms; in kprobe_multi_attach_api_subtest() 229 opts.retprobe = true; in kprobe_multi_attach_api_subtest() [all …]
|
| A D | rbtree.c | 15 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_rbtree_add_nodes() 29 ASSERT_OK(opts.retval, "rbtree_add_nodes retval"); in test_rbtree_add_nodes() 37 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_rbtree_add_nodes_nested() 51 ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval"); in test_rbtree_add_nodes_nested() 59 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_rbtree_add_and_remove() 73 ASSERT_OK(opts.retval, "rbtree_add_and_remove retval"); in test_rbtree_add_and_remove() 81 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_rbtree_add_and_remove_array() 95 ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval"); in test_rbtree_add_and_remove_array() 102 LIBBPF_OPTS(bpf_test_run_opts, opts, in test_rbtree_first_and_remove() 116 ASSERT_OK(opts.retval, "rbtree_first_and_remove retval"); in test_rbtree_first_and_remove() [all …]
|
| A D | pe_preserve_elems.c | 13 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts); in test_one_map() 30 err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); in test_one_map() 33 if (CHECK(opts.retval != 0, "bpf_perf_event_read_value", in test_one_map() 34 "failed with %d\n", opts.retval)) in test_one_map() 40 err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts); in test_one_map() 45 CHECK(opts.retval != 0, "bpf_perf_event_read_value", in test_one_map() 46 "failed with %d\n", opts.retval); in test_one_map() 48 CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value", in test_one_map() 50 opts.retval); in test_one_map()
|
| A D | uprobe_multi_test.c | 277 opts->retprobe = false; in __test_attach_api() 283 opts->retprobe = true; in __test_attach_api() 289 opts->retprobe = false; in __test_attach_api() 295 opts->retprobe = true; in __test_attach_api() 301 opts->retprobe = false; in __test_attach_api() 367 opts.syms = syms; in test_attach_api_syms() 397 LIBBPF_OPTS_RESET(opts, in test_attach_api_fails() 421 LIBBPF_OPTS_RESET(opts, in test_attach_api_fails() 433 LIBBPF_OPTS_RESET(opts, in test_attach_api_fails() 572 opts.syms = syms; in attach_uprobe_fail_trap() [all …]
|
| /tools/objtool/ |
| A D | builtin-check.c | 23 struct opts opts; variable 141 if (opts.mnop && !opts.mcount) { in opts_valid() 146 if (opts.noinstr && !opts.link) { in opts_valid() 151 if (opts.ibt && !opts.link) { in opts_valid() 156 if (opts.unret && !opts.link) { in opts_valid() 163 opts.ibt || in opts_valid() 166 opts.orc || in opts_valid() 169 opts.sls || in opts_valid() 181 if (opts.dump_orc) in opts_valid() 251 if (opts.output || opts.dryrun) in print_args() [all …]
|
| /tools/testing/selftests/bpf/progs/ |
| A D | test_bpf_nf_fail.c | 31 struct bpf_ct_opts___local opts = {}; in alloc_release() local 35 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in alloc_release() 45 struct bpf_ct_opts___local opts = {}; in insert_insert() local 49 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in insert_insert() 62 struct bpf_ct_opts___local opts = {}; in lookup_insert() local 66 ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in lookup_insert() 80 ct = bpf_skb_ct_lookup(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in write_not_allowlisted_field() 94 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in set_timeout_after_insert() 111 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in set_status_after_insert() 128 ct = bpf_skb_ct_alloc(ctx, &tup, sizeof(tup.ipv4), &opts, sizeof(opts)); in change_timeout_after_alloc() [all …]
|
| /tools/lib/bpf/ |
| A D | bpf.c | 573 count, opts); in bpf_map_lookup_and_delete_batch() 821 if (!OPTS_ZEROED(opts, tcx)) in bpf_link_create() 837 if (!OPTS_ZEROED(opts, netkit)) in bpf_link_create() 881 if (!OPTS_ZEROED(opts, cgroup)) in bpf_link_create() 885 if (!OPTS_ZEROED(opts, flags)) in bpf_link_create() 905 if (!OPTS_ZEROED(opts, sz)) in bpf_link_create() 947 if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0)) in bpf_link_update() 1011 opts.query_flags = query_flags; in bpf_prog_query() 1012 opts.prog_ids = prog_ids; in bpf_prog_query() 1013 opts.prog_cnt = *prog_cnt; in bpf_prog_query() [all …]
|
| A D | libbpf_probes.c | 134 opts.log_buf = buf; in probe_prog_load() 136 opts.log_level = 1; in probe_prog_load() 141 opts.attach_btf_id = 1; in probe_prog_load() 147 opts.log_buf = buf; in probe_prog_load() 149 opts.log_level = 1; in probe_prog_load() 150 opts.attach_btf_id = 1; in probe_prog_load() 214 if (opts) in libbpf_probe_bpf_prog_type() 383 opts.btf_fd = btf_fd; in probe_map_create() 409 if (opts) in libbpf_probe_bpf_map_type() 417 const void *opts) in libbpf_probe_bpf_helper() argument [all …]
|
| /tools/perf/util/ |
| A D | record.c | 103 opts->no_inherit = true; in evlist__config() 117 if (opts->full_auxtrace || opts->sample_identifier) { in evlist__config() 162 opts->default_interval = opts->user_interval; in record_opts__config_freq() 164 opts->freq = opts->user_freq; in record_opts__config_freq() 170 opts->freq = 0; in record_opts__config_freq() 171 else if (opts->freq) { in record_opts__config_freq() 172 opts->default_interval = opts->freq; in record_opts__config_freq() 185 if (opts->strict_freq) { in record_opts__config_freq() 198 opts->freq = max_rate; in record_opts__config_freq() 210 opts->freq = max_rate; in record_opts__config_freq() [all …]
|
| A D | clockid.c | 70 struct record_opts *opts = (struct record_opts *)opt->value; in parse_clockid() local 75 opts->use_clockid = 0; in parse_clockid() 84 if (opts->use_clockid) in parse_clockid() 87 opts->use_clockid = true; in parse_clockid() 90 if (sscanf(str, "%d", &opts->clockid) == 1) in parse_clockid() 91 return get_clockid_res(opts->clockid, &opts->clockid_res_ns); in parse_clockid() 99 opts->clockid = cm->clockid; in parse_clockid() 100 return get_clockid_res(opts->clockid, in parse_clockid() 101 &opts->clockid_res_ns); in parse_clockid() 105 opts->use_clockid = false; in parse_clockid()
|
| /tools/testing/vsock/ |
| A D | vsock_test.c | 135 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_client_close_client() 170 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_server_close_client() 212 fds[i] = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_multiconn_client() 264 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); in test_msg_peek_client() 266 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_msg_peek_client() 360 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); in test_seqpacket_msg_bounds_client() 838 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_poll_rcvlowat_client() 982 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_virtio_skb_merge_client() 1134 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_stream_shutwr_client() 1287 fd = vsock_connect(opts->peer_cid, opts->peer_port, type); in test_unsent_bytes_client() [all …]
|
| A D | vsock_uring_test.c | 69 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in vsock_io_uring_client() 289 struct test_opts opts = { in main() local 309 opts.mode = TEST_MODE_CLIENT; in main() 311 opts.mode = TEST_MODE_SERVER; in main() 318 opts.peer_cid = parse_cid(optarg); in main() 321 opts.peer_port = parse_port(optarg); in main() 334 if (opts.mode == TEST_MODE_UNSET) in main() 336 if (opts.peer_cid == VMADDR_CID_ANY) in main() 340 if (opts.mode != TEST_MODE_SERVER) in main() 346 opts.mode == TEST_MODE_SERVER); in main() [all …]
|
| A D | vsock_test_zerocopy.h | 6 void test_stream_msgzcopy_client(const struct test_opts *opts); 7 void test_stream_msgzcopy_server(const struct test_opts *opts); 9 void test_seqpacket_msgzcopy_client(const struct test_opts *opts); 10 void test_seqpacket_msgzcopy_server(const struct test_opts *opts); 12 void test_stream_msgzcopy_empty_errq_client(const struct test_opts *opts); 13 void test_stream_msgzcopy_empty_errq_server(const struct test_opts *opts);
|
| A D | vsock_test_zerocopy.c | 144 static void test_client(const struct test_opts *opts, in test_client() argument 155 fd = vsock_seqpacket_connect(opts->peer_cid, opts->peer_port); in test_client() 157 fd = vsock_stream_connect(opts->peer_cid, opts->peer_port); in test_client() 219 void test_stream_msgzcopy_client(const struct test_opts *opts) in test_stream_msgzcopy_client() argument 224 test_client(opts, &test_data_array[i], false); in test_stream_msgzcopy_client() 235 test_client(opts, &test_data_array[i], true); in test_seqpacket_msgzcopy_client() 239 static void test_server(const struct test_opts *opts, in test_server() argument 299 void test_stream_msgzcopy_server(const struct test_opts *opts) in test_stream_msgzcopy_server() argument 304 test_server(opts, &test_data_array[i], false); in test_stream_msgzcopy_server() 315 test_server(opts, &test_data_array[i], true); in test_seqpacket_msgzcopy_server() [all …]
|
| /tools/perf/arch/x86/util/ |
| A D | intel-bts.c | 116 if (opts->auxtrace_sample_mode) { in intel_bts_recording_options() 134 opts->full_auxtrace = true; in intel_bts_recording_options() 138 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { in intel_bts_recording_options() 143 if (!opts->full_auxtrace) in intel_bts_recording_options() 153 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { in intel_bts_recording_options() 166 opts->auxtrace_snapshot_size = in intel_bts_recording_options() 181 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { in intel_bts_recording_options() 190 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { in intel_bts_recording_options() 201 if (opts->auxtrace_mmap_pages) { in intel_bts_recording_options() 232 if (opts->full_auxtrace) { in intel_bts_recording_options() [all …]
|
| /tools/testing/selftests/net/netfilter/ |
| A D | nf_queue.c | 29 static struct options opts; variable 90 if (opts.verbose > 0) in queue_cb() 99 if (opts.verbose > 0) { in queue_cb() 113 if (opts.count_packets) in queue_cb() 313 if (opts.delay_ms) in mainloop() 316 nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict); in mainloop() 344 opts.queue_num = 0; in parse_opts() 353 opts.verdict <<= 16; in parse_opts() 370 opts.verbose++; in parse_opts() 375 if (opts.verdict != NF_ACCEPT && (opts.verdict >> 16 == opts.queue_num)) { in parse_opts() [all …]
|
| /tools/perf/arch/arm64/util/ |
| A D | arm-spe.c | 201 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { in arm_spe_snapshot_resolve_auxtrace_defaults() 209 } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) { in arm_spe_snapshot_resolve_auxtrace_defaults() 218 opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size; in arm_spe_snapshot_resolve_auxtrace_defaults() 302 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) in arm_spe_setup_aux_buffer() 308 if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) { in arm_spe_setup_aux_buffer() 318 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { in arm_spe_setup_aux_buffer() 339 if (opts->auxtrace_mmap_pages) { in arm_spe_setup_aux_buffer() 354 struct record_opts *opts) in arm_spe_setup_tracking_event() argument 404 opts->full_auxtrace = true; in arm_spe_recording_options() 416 if (!opts->full_auxtrace) in arm_spe_recording_options() [all …]
|
| /tools/testing/selftests/net/ |
| A D | reuseaddr_ports_exhausted.c | 83 struct reuse_opts *opts; in TEST() local 87 opts = &unreusable_opts[i]; in TEST() 90 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST() 103 struct reuse_opts *opts; in TEST() local 107 opts = &reusable_opts[i]; in TEST() 110 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST() 114 if (opts->reuseport[0] && opts->reuseport[1]) { in TEST() 128 struct reuse_opts *opts; in TEST() local 133 opts = &reusable_opts[i]; in TEST() 139 fd[j] = bind_port(_metadata, opts->reuseaddr[j], opts->reuseport[j]); in TEST()
|