Lines Matching refs:flags
228 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid() argument
232 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in __bpf_get_stackid()
234 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stackid()
251 if (hash_matches && flags & BPF_F_FAST_STACK_CMP) in __bpf_get_stackid()
273 if (bucket && !(flags & BPF_F_REUSE_STACKID)) { in __bpf_get_stackid()
281 if (bucket && !(flags & BPF_F_REUSE_STACKID)) in __bpf_get_stackid()
301 u64, flags) in BPF_CALL_3() argument
304 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_3()
305 bool user = flags & BPF_F_USER_STACK; in BPF_CALL_3()
309 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in BPF_CALL_3()
324 return __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
349 struct bpf_map *, map, u64, flags) in BPF_CALL_3() argument
360 (unsigned long) map, flags, 0, 0); in BPF_CALL_3()
362 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in BPF_CALL_3()
366 user = flags & BPF_F_USER_STACK; in BPF_CALL_3()
379 ret = __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
384 u64 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_3()
390 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; in BPF_CALL_3()
391 ret = __bpf_get_stackid(map, trace, flags); in BPF_CALL_3()
407 void *buf, u32 size, u64 flags, bool may_fault) in __bpf_get_stack() argument
410 bool user_build_id = flags & BPF_F_USER_BUILD_ID; in __bpf_get_stack()
412 u32 skip = flags & BPF_F_SKIP_FIELD_MASK; in __bpf_get_stack()
413 bool user = flags & BPF_F_USER_STACK; in __bpf_get_stack()
419 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in __bpf_get_stack()
497 u64, flags) in BPF_CALL_4() argument
499 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
513 u64, flags) in BPF_CALL_4() argument
515 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, true /* may_fault */); in BPF_CALL_4()
529 u64 flags, bool may_fault) in __bpf_get_task_stack() argument
539 res = __bpf_get_stack(regs, task, NULL, buf, size, flags, may_fault); in __bpf_get_task_stack()
546 u32, size, u64, flags) in BPF_CALL_4() argument
548 return __bpf_get_task_stack(task, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
563 u32, size, u64, flags) in BPF_CALL_4() argument
565 return __bpf_get_task_stack(task, buf, size, flags, true /* !may_fault */); in BPF_CALL_4()
580 void *, buf, u32, size, u64, flags) in BPF_CALL_4() argument
590 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
592 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | in BPF_CALL_4()
596 user = flags & BPF_F_USER_STACK; in BPF_CALL_4()
610 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()
615 u64 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_4()
621 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; in BPF_CALL_4()
622 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags, false /* !may_fault */); in BPF_CALL_4()