/linux-6.3-rc2/tools/testing/selftests/bpf/progs/ |
A D | linked_list.c | 24 bpf_spin_lock(lock); in list_push_pop() 33 bpf_spin_lock(lock); in list_push_pop() 43 bpf_spin_lock(lock); in list_push_pop() 49 bpf_spin_lock(lock); in list_push_pop() 60 bpf_spin_lock(lock); in list_push_pop() 64 bpf_spin_lock(lock); in list_push_pop() 76 bpf_spin_lock(lock); in list_push_pop() 84 bpf_spin_lock(lock); in list_push_pop() 121 bpf_spin_lock(lock); in list_push_pop_multiple() 159 bpf_spin_lock(lock); in list_push_pop_multiple() [all …]
|
A D | rbtree_fail.c | 55 bpf_spin_lock(&glock); in rbtree_api_nolock_remove() 88 bpf_spin_lock(&glock); in rbtree_api_remove_unadded_node() 114 bpf_spin_lock(&glock); in rbtree_api_remove_no_drop() 142 bpf_spin_lock(&glock); in rbtree_api_add_to_multiple_trees() 161 bpf_spin_lock(&glock); in rbtree_api_add_release_unlock_escape() 165 bpf_spin_lock(&glock); in rbtree_api_add_release_unlock_escape() 186 bpf_spin_lock(&glock); in rbtree_api_release_aliasing() 190 bpf_spin_lock(&glock); in rbtree_api_release_aliasing() 233 bpf_spin_lock(&glock); in rbtree_api_first_release_unlock_escape() 239 bpf_spin_lock(&glock); in rbtree_api_first_release_unlock_escape() [all …]
|
A D | test_spin_lock.c | 9 struct bpf_spin_lock lock; 21 struct bpf_spin_lock lock; 32 struct bpf_spin_lock lock; 69 bpf_spin_lock(&val->lock); in bpf_spin_lock_test() 83 bpf_spin_lock(&q->lock); in bpf_spin_lock_test() 94 bpf_spin_lock(&cls->lock); in bpf_spin_lock_test()
|
A D | linked_list_fail.c | 366 bpf_spin_lock(&glock); in use_after_unlock() 394 bpf_spin_lock(&glock); in list_double_add() 422 bpf_spin_lock(&glock); in no_node_value_type() 437 bpf_spin_lock(&glock); in incorrect_value_type() 452 bpf_spin_lock(&glock); in incorrect_node_var_off() 467 bpf_spin_lock(&glock); in incorrect_node_off1() 482 bpf_spin_lock(&glock); in incorrect_node_off2() 497 bpf_spin_lock(&glock); in no_head_type() 499 bpf_spin_lock(&glock); in no_head_type() 512 bpf_spin_lock(&glock); in incorrect_head_var_off1() [all …]
|
A D | rbtree.c | 21 private(A) struct bpf_spin_lock glock; 36 static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock) in __add_three() 52 bpf_spin_lock(&glock); in __add_three() 62 bpf_spin_lock(&glock); in __add_three() 90 bpf_spin_lock(&glock); in rbtree_add_and_remove() 134 bpf_spin_lock(&glock); in rbtree_first_and_remove() 156 bpf_spin_lock(&glock); in rbtree_first_and_remove()
|
A D | test_spin_lock_fail.c | 8 struct bpf_spin_lock lock; 31 SEC(".data.A") struct bpf_spin_lock lockA; 32 SEC(".data.B") struct bpf_spin_lock lockB; 108 bpf_spin_lock(A); \ 136 bpf_spin_lock(&f1->lock); in lock_id_mismatch_mapval_mapval() 164 bpf_spin_lock(&f1->lock); in lock_id_mismatch_innermapval_innermapval1() 191 bpf_spin_lock(&f1->lock); in lock_id_mismatch_innermapval_innermapval2()
|
A D | linked_list.h | 17 struct bpf_spin_lock lock; 23 struct bpf_spin_lock lock; 52 private(A) struct bpf_spin_lock glock; 54 private(B) struct bpf_spin_lock glock2;
|
A D | test_map_lock.c | 10 struct bpf_spin_lock lock; 22 struct bpf_spin_lock lock; 45 bpf_spin_lock(&val->lock); in bpf_map_lock_test() 54 bpf_spin_lock(&q->lock); in bpf_map_lock_test()
|
A D | freplace_attach_probe.c | 12 struct bpf_spin_lock lock; 33 bpf_spin_lock(&val->lock); in new_handle_kprobe()
|
A D | rbtree_btf_fail__wrong_node_type.c | 31 private(A) struct bpf_spin_lock glock; 43 bpf_spin_lock(&glock); in rbtree_api_add__wrong_node_type()
|
A D | rbtree_btf_fail__add_wrong_type.c | 34 private(A) struct bpf_spin_lock glock; 46 bpf_spin_lock(&glock); in rbtree_api_add__add_wrong_type()
|
A D | test_helper_restricted.c | 11 struct bpf_spin_lock l; 54 bpf_spin_lock(&lock->l); in spin_lock_work()
|
A D | htab_reuse.c | 9 struct bpf_spin_lock lock;
|
A D | timer_crash.c | 9 struct bpf_spin_lock lock;
|
A D | test_sock_fields.c | 27 struct bpf_spin_lock lock; 210 bpf_spin_lock(&pkt_out_cnt10->lock); in egress_read_sock_fields()
|
A D | timer.c | 13 struct bpf_spin_lock lock; /* unused */
|
/linux-6.3-rc2/kernel/bpf/ |
A D | helpers.c | 277 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() 291 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() 300 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) in __bpf_spin_lock() 310 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) in __bpf_spin_unlock() 330 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) in BPF_CALL_1() argument 337 .func = bpf_spin_lock, 353 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) in BPF_CALL_1() argument 370 struct bpf_spin_lock *lock; in copy_map_value_locked() 1106 struct bpf_spin_lock lock; 1735 struct bpf_spin_lock *spin_lock) in bpf_list_head_free() [all …]
|
/linux-6.3-rc2/samples/bpf/ |
A D | hbm.h | 12 struct bpf_spin_lock lock;
|
A D | hbm_edt_kern.c | 92 bpf_spin_lock(&qdp->lock); in _hbm_out_cg()
|
A D | hbm_out_kern.c | 94 bpf_spin_lock(&qdp->lock); in _hbm_out_cg()
|
/linux-6.3-rc2/Documentation/bpf/ |
A D | graph_ds_impl.rst | 70 struct bpf_spin_lock glock; 74 which also contains a ``bpf_spin_lock`` - in the above example both global 92 bpf_spin_lock(&lock); 125 bpf_spin_lock(&lock); 136 * Graph data structure APIs can only be used when the ``bpf_spin_lock`` 146 Because the associated ``bpf_spin_lock`` must be held by any program adding 217 bpf_spin_lock(&lock);
|
A D | bpf_design_QA.rst | 314 Q: Users are allowed to embed bpf_spin_lock, bpf_timer fields in their BPF map 320 A: It depends. For bpf_spin_lock, bpf_timer: YES, for kptr and everything else: 323 For struct types that have been added already, like bpf_spin_lock and bpf_timer,
|
A D | map_hash.rst | 105 infrastructure provides ``struct bpf_spin_lock`` to synchronise access.
|
A D | map_array.rst | 88 Since kernel version 5.1, the BPF infrastructure provides ``struct bpf_spin_lock``
|
/linux-6.3-rc2/tools/testing/selftests/bpf/prog_tests/ |
A D | sock_fields.c | 30 struct bpf_spin_lock lock;
|