| /samples/bpf/ |
| A D | xdp2skb_meta_kern.c | 32 int _xdp_mark(struct xdp_md *ctx) in _xdp_mark() argument 41 ret = bpf_xdp_adjust_meta(ctx, -(int)sizeof(*meta)); in _xdp_mark() 50 data = (void *)(unsigned long)ctx->data; in _xdp_mark() 53 meta = (void *)(unsigned long)ctx->data_meta; in _xdp_mark() 63 int _tc_mark(struct __sk_buff *ctx) in _tc_mark() argument 65 void *data = (void *)(unsigned long)ctx->data; in _tc_mark() 66 void *data_meta = (void *)(unsigned long)ctx->data_meta; in _tc_mark() 71 ctx->mark = 41; in _tc_mark() 77 ctx->mark = meta->mark; /* Transfer XDP-mark to SKB-mark */ in _tc_mark()
|
| A D | tracex5.bpf.c | 30 int bpf_prog1(struct pt_regs *ctx) in bpf_prog1() argument 32 int sc_nr = (int)PT_REGS_PARM1(ctx); in bpf_prog1() 35 bpf_tail_call(ctx, &progs, sc_nr); in bpf_prog1() 46 PROG(SYS__NR_write)(struct pt_regs *ctx) in PROG() 50 bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); in PROG() 59 PROG(SYS__NR_read)(struct pt_regs *ctx) in PROG() 63 bpf_core_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); in PROG() 73 PROG(SYS__NR_mmap2)(struct pt_regs *ctx) in PROG() 83 PROG(SYS__NR_mmap)(struct pt_regs *ctx) in PROG()
|
| A D | trace_event_kern.c | 38 int bpf_prog1(struct bpf_perf_event_data *ctx) in bpf_prog1() argument 50 if (ctx->sample_period < 10000) in bpf_prog1() 54 key.kernstack = bpf_get_stackid(ctx, &stackmap, KERN_STACKID_FLAGS); in bpf_prog1() 55 key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS); in bpf_prog1() 57 bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period, in bpf_prog1() 58 PT_REGS_IP(&ctx->regs)); in bpf_prog1() 62 ret = bpf_perf_prog_read_value(ctx, (void *)&value_buf, sizeof(struct bpf_perf_event_value)); in bpf_prog1() 68 if (ctx->addr != 0) in bpf_prog1() 69 bpf_trace_printk(addr_fmt, sizeof(addr_fmt), ctx->addr); in bpf_prog1()
|
| A D | tracex4.bpf.c | 28 int bpf_prog1(struct pt_regs *ctx) in bpf_prog1() argument 30 long ptr = PT_REGS_PARM2(ctx); in bpf_prog1() 37 int bpf_prog2(struct pt_regs *ctx) in bpf_prog2() argument 39 long ptr = PT_REGS_RC(ctx); in bpf_prog2() 43 BPF_KRETPROBE_READ_RET_IP(ip, ctx); in bpf_prog2()
|
| A D | syscall_tp_kern.c | 60 int trace_enter_open(struct syscalls_enter_open_args *ctx) in trace_enter_open() argument 68 int trace_enter_open_at(struct syscalls_enter_open_at_args *ctx) in trace_enter_open_at() argument 75 int trace_enter_open_at2(struct syscalls_enter_open_at_args *ctx) in trace_enter_open_at2() argument 83 int trace_enter_exit(struct syscalls_exit_open_args *ctx) in trace_enter_exit() argument 91 int trace_enter_exit_at(struct syscalls_exit_open_args *ctx) in trace_enter_exit_at() argument 98 int trace_enter_exit_at2(struct syscalls_exit_open_args *ctx) in trace_enter_exit_at2() argument
|
| A D | offwaketime.bpf.c | 63 int waker(struct pt_regs *ctx) in waker() argument 65 struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx); in waker() 70 woke.ret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS); in waker() 76 static inline int update_counts(void *ctx, u32 pid, u64 delta) in update_counts() argument 84 key.tret = bpf_get_stackid(ctx, &stackmap, STACKID_FLAGS); in update_counts() 108 int oncpu(struct trace_event_raw_sched_switch *ctx) argument 111 u32 pid = ctx->prev_pid; 114 int oncpu(struct pt_regs *ctx) 116 struct task_struct *p = (void *)PT_REGS_PARM1_CORE(ctx); 138 return update_counts(ctx, pid, delta);
|
| A D | xdp_fwd_kern.c | 43 static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags) in xdp_fwd_flags() argument 45 void *data_end = (void *)(long)ctx->data_end; in xdp_fwd_flags() 46 void *data = (void *)(long)ctx->data; in xdp_fwd_flags() 102 fib_params.ifindex = ctx->ingress_ifindex; in xdp_fwd_flags() 104 rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags); in xdp_fwd_flags() 147 int xdp_fwd_prog(struct xdp_md *ctx) in xdp_fwd_prog() argument 149 return xdp_fwd_flags(ctx, 0); in xdp_fwd_prog() 153 int xdp_fwd_direct_prog(struct xdp_md *ctx) in xdp_fwd_direct_prog() argument 155 return xdp_fwd_flags(ctx, BPF_FIB_LOOKUP_DIRECT); in xdp_fwd_direct_prog()
|
| A D | tracex3.bpf.c | 27 int bpf_prog1(struct trace_event_raw_block_rq *ctx) in bpf_prog1() argument 31 .dev = ctx->dev, in bpf_prog1() 32 .sector = ctx->sector in bpf_prog1() 59 int bpf_prog2(struct trace_event_raw_block_rq *ctx) in bpf_prog2() argument 62 .dev = ctx->dev, in bpf_prog2() 63 .sector = ctx->sector in bpf_prog2()
|
| A D | tcp_dumpstats_kern.c | 26 int _sockops(struct bpf_sock_ops *ctx) in _sockops() argument 33 switch (ctx->op) { in _sockops() 35 bpf_sock_ops_cb_flags_set(ctx, BPF_SOCK_OPS_RTT_CB_FLAG); in _sockops() 43 sk = ctx->sk; in _sockops()
|
| A D | ibumad_kern.c | 78 int on_ib_umad_read_recv(struct ib_umad_rw_args *ctx) in on_ib_umad_read_recv() argument 81 u8 class = ctx->mgmt_class; in on_ib_umad_read_recv() 98 int on_ib_umad_read_send(struct ib_umad_rw_args *ctx) in on_ib_umad_read_send() argument 101 u8 class = ctx->mgmt_class; in on_ib_umad_read_send() 118 int on_ib_umad_write(struct ib_umad_rw_args *ctx) in on_ib_umad_write() argument 121 u8 class = ctx->mgmt_class; in on_ib_umad_write()
|
| A D | task_fd_query_kern.c | 8 int bpf_prog1(struct pt_regs *ctx) in bpf_prog1() argument 14 int bpf_prog2(struct pt_regs *ctx) in bpf_prog2() argument
|
| A D | cpustat_kern.c | 104 int bpf_prog1(struct cpu_args *ctx) in bpf_prog1() argument 110 if (ctx->cpu_id > MAX_CPU) in bpf_prog1() 113 cpu = ctx->cpu_id; in bpf_prog1() 136 *cstate = ctx->state; in bpf_prog1() 163 if (ctx->state != (u32)-1) { in bpf_prog1() 212 int bpf_prog2(struct cpu_args *ctx) in bpf_prog2() argument 218 cpu = ctx->cpu_id; in bpf_prog2() 235 *pstate = ctx->state; in bpf_prog2()
|
| A D | spintest.bpf.c | 37 int foo(struct pt_regs *ctx) \ 39 long v = PT_REGS_IP(ctx), *val; \ 45 bpf_get_stackid(ctx, &stackmap, BPF_F_REUSE_STACKID); \
|
| A D | trace_output.bpf.c | 13 int bpf_prog1(struct pt_regs *ctx) in bpf_prog1() argument 23 bpf_perf_event_output(ctx, &my_map, 0, &data, sizeof(data)); in bpf_prog1()
|
| A D | sampleip_kern.c | 23 int do_sample(struct bpf_perf_event_data *ctx) in do_sample() argument 28 ip = PT_REGS_IP(&ctx->regs); in do_sample()
|
| A D | tracex1.bpf.c | 20 int bpf_prog1(struct pt_regs *ctx) in bpf_prog1() argument 31 bpf_core_read(&skb, sizeof(skb), (void *)PT_REGS_PARM1(ctx)); in bpf_prog1()
|
| A D | sockex2_kern.c | 45 static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) in ip_is_fragment() argument 47 return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) in ip_is_fragment() 51 static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) in ipv6_addr_hash() argument 53 __u64 w0 = load_word(ctx, off); in ipv6_addr_hash() 54 __u64 w1 = load_word(ctx, off + 4); in ipv6_addr_hash() 55 __u64 w2 = load_word(ctx, off + 8); in ipv6_addr_hash() 56 __u64 w3 = load_word(ctx, off + 12); in ipv6_addr_hash()
|
| A D | parse_ldabs.c | 21 static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) in ip_is_fragment() argument 23 return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) in ip_is_fragment()
|
| A D | sockex3_kern.c | 42 static inline int ip_is_fragment(struct __sk_buff *ctx, __u64 nhoff) in ip_is_fragment() argument 44 return load_half(ctx, nhoff + offsetof(struct iphdr, frag_off)) in ip_is_fragment() 48 static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) in ipv6_addr_hash() argument 50 __u64 w0 = load_word(ctx, off); in ipv6_addr_hash() 51 __u64 w1 = load_word(ctx, off + 4); in ipv6_addr_hash() 52 __u64 w2 = load_word(ctx, off + 8); in ipv6_addr_hash() 53 __u64 w3 = load_word(ctx, off + 12); in ipv6_addr_hash()
|
| /samples/damon/ |
| A D | prcl.c | 34 static struct damon_ctx *ctx; variable 68 ctx = damon_new_ctx(); in damon_sample_prcl_start() 69 if (!ctx) in damon_sample_prcl_start() 72 damon_destroy_ctx(ctx); in damon_sample_prcl_start() 78 damon_destroy_ctx(ctx); in damon_sample_prcl_start() 81 damon_add_target(ctx, target); in damon_sample_prcl_start() 84 damon_destroy_ctx(ctx); in damon_sample_prcl_start() 103 damon_destroy_ctx(ctx); in damon_sample_prcl_start() 119 if (ctx) { in damon_sample_prcl_stop() 120 damon_stop(&ctx, 1); in damon_sample_prcl_stop() [all …]
|
| A D | wsse.c | 35 static struct damon_ctx *ctx; variable 68 ctx = damon_new_ctx(); in damon_sample_wsse_start() 69 if (!ctx) in damon_sample_wsse_start() 72 damon_destroy_ctx(ctx); in damon_sample_wsse_start() 78 damon_destroy_ctx(ctx); in damon_sample_wsse_start() 81 damon_add_target(ctx, target); in damon_sample_wsse_start() 84 damon_destroy_ctx(ctx); in damon_sample_wsse_start() 92 repeat_call_control.data = ctx; in damon_sample_wsse_start() 99 if (ctx) { in damon_sample_wsse_stop() 100 damon_stop(&ctx, 1); in damon_sample_wsse_stop() [all …]
|
| A D | mtier.c | 75 struct damon_ctx *ctx; in damon_sample_mtier_build_ctx() local 85 ctx = damon_new_ctx(); in damon_sample_mtier_build_ctx() 86 if (!ctx) in damon_sample_mtier_build_ctx() 112 damon_add_target(ctx, target); in damon_sample_mtier_build_ctx() 166 return ctx; in damon_sample_mtier_build_ctx() 168 damon_destroy_ctx(ctx); in damon_sample_mtier_build_ctx() 174 struct damon_ctx *ctx; in damon_sample_mtier_start() local 177 if (!ctx) in damon_sample_mtier_start() 179 ctxs[0] = ctx; in damon_sample_mtier_start() 181 if (!ctx) { in damon_sample_mtier_start() [all …]
|
| /samples/hid/ |
| A D | hid_surface_dial.bpf.c | 49 struct hid_bpf_ctx *ctx; in set_haptic() local 57 ctx = hid_bpf_allocate_context(args->hid); in set_haptic() 58 if (!ctx) in set_haptic() 63 ret = hid_bpf_hw_request(ctx, haptic_data, size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); in set_haptic() 92 ret = hid_bpf_hw_request(ctx, haptic_data, size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); in set_haptic() 98 hid_bpf_release_context(ctx); in set_haptic()
|
| A D | hid_bpf_helpers.h | 9 extern __u8 *hid_bpf_get_data(struct hid_bpf_ctx *ctx, 14 extern void hid_bpf_release_context(struct hid_bpf_ctx *ctx) __ksym; 15 extern int hid_bpf_hw_request(struct hid_bpf_ctx *ctx,
|
| /samples/seccomp/ |
| A D | bpf-direct.c | 64 ucontext_t *ctx = (ucontext_t *)(void_context); in emulator() local 71 if (!ctx) in emulator() 73 syscall = ctx->uc_mcontext.gregs[REG_SYSCALL]; in emulator() 74 buf = (char *) ctx->uc_mcontext.gregs[REG_ARG1]; in emulator() 75 len = (size_t) ctx->uc_mcontext.gregs[REG_ARG2]; in emulator() 79 if (ctx->uc_mcontext.gregs[REG_ARG0] != STDERR_FILENO) in emulator() 82 ctx->uc_mcontext.gregs[REG_RESULT] = -1; in emulator() 85 ctx->uc_mcontext.gregs[REG_RESULT] = bytes; in emulator()
|