Lines Matching refs:flags

92 	const gfp_t flags = GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL |  in bpf_ringbuf_area_alloc()  local
125 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc()
246 u64 flags) in ringbuf_map_update_elem() argument
407 unsigned long cons_pos, prod_pos, new_prod_pos, pend_pos, flags; in __bpf_ringbuf_reserve() local
420 if (raw_res_spin_lock_irqsave(&rb->spinlock, flags)) in __bpf_ringbuf_reserve()
446 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
458 raw_res_spin_unlock_irqrestore(&rb->spinlock, flags); in __bpf_ringbuf_reserve()
463 BPF_CALL_3(bpf_ringbuf_reserve, struct bpf_map *, map, u64, size, u64, flags) in BPF_CALL_3() argument
467 if (unlikely(flags)) in BPF_CALL_3()
482 static void bpf_ringbuf_commit(void *sample, u64 flags, bool discard) in bpf_ringbuf_commit() argument
504 if (flags & BPF_RB_FORCE_WAKEUP) in bpf_ringbuf_commit()
506 else if (cons_pos == rec_pos && !(flags & BPF_RB_NO_WAKEUP)) in bpf_ringbuf_commit()
510 BPF_CALL_2(bpf_ringbuf_submit, void *, sample, u64, flags) in BPF_CALL_2() argument
512 bpf_ringbuf_commit(sample, flags, false /* discard */); in BPF_CALL_2()
523 BPF_CALL_2(bpf_ringbuf_discard, void *, sample, u64, flags) in BPF_CALL_2() argument
525 bpf_ringbuf_commit(sample, flags, true /* discard */); in BPF_CALL_2()
537 u64, flags) in BPF_CALL_4() argument
542 if (unlikely(flags & ~(BPF_RB_NO_WAKEUP | BPF_RB_FORCE_WAKEUP))) in BPF_CALL_4()
551 bpf_ringbuf_commit(rec, flags, false /* discard */); in BPF_CALL_4()
564 BPF_CALL_2(bpf_ringbuf_query, struct bpf_map *, map, u64, flags) in BPF_CALL_2() argument
570 switch (flags) { in BPF_CALL_2()
591 BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags, in BPF_CALL_4() argument
598 if (unlikely(flags)) { in BPF_CALL_4()
631 BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument
636 bpf_ringbuf_commit(ptr->data, flags, false /* discard */); in BPF_CALL_2()
650 BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) in BPF_CALL_2() argument
655 bpf_ringbuf_commit(ptr->data, flags, true /* discard */); in BPF_CALL_2()
672 u32 hdr_len, sample_len, total_len, flags, *hdr; in __bpf_user_ringbuf_peek() local
688 flags = hdr_len & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT); in __bpf_user_ringbuf_peek()
689 sample_len = hdr_len & ~flags; in __bpf_user_ringbuf_peek()
705 if (flags & BPF_RINGBUF_DISCARD_BIT) { in __bpf_user_ringbuf_peek()
715 if (flags & BPF_RINGBUF_BUSY_BIT) in __bpf_user_ringbuf_peek()
724 static void __bpf_user_ringbuf_sample_release(struct bpf_ringbuf *rb, size_t size, u64 flags) in __bpf_user_ringbuf_sample_release() argument
739 void *, callback_fn, void *, callback_ctx, u64, flags) in BPF_CALL_4() argument
747 if (unlikely(flags & ~wakeup_flags)) in BPF_CALL_4()
777 __bpf_user_ringbuf_sample_release(rb, size, flags); in BPF_CALL_4()
787 if (flags & BPF_RB_FORCE_WAKEUP) in BPF_CALL_4()
789 else if (!(flags & BPF_RB_NO_WAKEUP) && samples > 0) in BPF_CALL_4()