Home
last modified time | relevance | path

Searched refs:bkt (Results 1 – 25 of 66) sorted by relevance

123

/linux/include/linux/
A Dhashtable.h126 #define hash_for_each(name, bkt, obj, member) \ argument
127 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
128 (bkt)++)\
129 hlist_for_each_entry(obj, &name[bkt], member)
138 #define hash_for_each_rcu(name, bkt, obj, member) \ argument
139 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
140 (bkt)++)\
141 hlist_for_each_entry_rcu(obj, &name[bkt], member)
153 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
154 (bkt)++)\
[all …]
A Drhashtable.h376 return __rht_ptr(rcu_dereference(*bkt), bkt); in rht_ptr_rcu()
384 return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); in rht_ptr()
390 return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); in rht_ptr_exclusive()
605 bkt = rht_bucket(tbl, hash); in __rhashtable_lookup()
729 if (!bkt) in __rhashtable_insert_fast()
732 flags = rht_lock(tbl, bkt); in __rhashtable_insert_fast()
809 rht_unlock(tbl, bkt, flags); in __rhashtable_insert_fast()
1009 if (!bkt) in __rhashtable_remove_fast_one()
1012 flags = rht_lock(tbl, bkt); in __rhashtable_remove_fast_one()
1168 if (!bkt) in __rhashtable_replace_fast()
[all …]
/linux/fs/smb/client/
A Dcompress.c72 p = bkt[i].count; in has_low_entropy()
105 sum += bkt[i].count; in calc_byte_distribution()
111 sum += bkt[i].count; in calc_byte_distribution()
131 if (bkt[i].count > 0) in is_mostly_ascii()
226 struct bucket *bkt = NULL; in is_compressible() local
261 bkt = kcalloc(bkt_size, sizeof(*bkt), GFP_KERNEL); in is_compressible()
262 if (!bkt) { in is_compressible()
270 bkt[sample[i]].count++; in is_compressible()
272 if (is_mostly_ascii(bkt)) in is_compressible()
276 sort(bkt, bkt_size, sizeof(*bkt), cmp_bkt, NULL); in is_compressible()
[all …]
/linux/tools/include/linux/
A Dhashtable.h105 #define hash_for_each(name, bkt, obj, member) \ argument
106 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
107 (bkt)++)\
108 hlist_for_each_entry(obj, &name[bkt], member)
119 #define hash_for_each_safe(name, bkt, tmp, obj, member) \ argument
120 for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
121 (bkt)++)\
122 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/linux/tools/lib/bpf/
A Dhashmap.h168 #define hashmap__for_each_entry(map, cur, bkt) \ argument
169 for (bkt = 0; bkt < map->cap; bkt++) \
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
180 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument
181 for (bkt = 0; bkt < map->cap; bkt++) \
182 for (cur = map->buckets[bkt]; \
A Dhashmap.c66 size_t bkt; in hashmap__clear() local
68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear()
106 size_t h, bkt; in hashmap_grow() local
117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
/linux/tools/perf/util/
A Dhashmap.h168 #define hashmap__for_each_entry(map, cur, bkt) \ argument
169 for (bkt = 0; bkt < map->cap; bkt++) \
170 for (cur = map->buckets[bkt]; cur; cur = cur->next)
180 #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ argument
181 for (bkt = 0; bkt < map->cap; bkt++) \
182 for (cur = map->buckets[bkt]; \
A Dexpr.c82 size_t bkt; in ids__free() local
87 hashmap__for_each_entry(ids, cur, bkt) { in ids__free()
111 size_t bkt; in ids__union() local
129 hashmap__for_each_entry(ids2, cur, bkt) { in ids__union()
228 size_t bkt; in expr__subset_of_ids() local
231 hashmap__for_each_entry(needles->ids, cur, bkt) { in expr__subset_of_ids()
310 size_t bkt; in expr__ctx_clear() local
312 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_clear()
322 size_t bkt; in expr__ctx_free() local
328 hashmap__for_each_entry(ctx->ids, cur, bkt) { in expr__ctx_free()
A Dthreads.c140 size_t bkt; in threads__remove_all_threads() local
144 hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) { in threads__remove_all_threads()
175 size_t bkt; in threads__for_each_thread() local
178 hashmap__for_each_entry((&table->shard), cur, bkt) { in threads__for_each_thread()
A Dhashmap.c66 size_t bkt; in hashmap__clear() local
68 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap__clear()
106 size_t h, bkt; in hashmap_grow() local
117 hashmap__for_each_entry_safe(map, cur, tmp, bkt) { in hashmap_grow()
/linux/drivers/infiniband/ulp/opa_vnic/
A Dopa_vnic_internal.h280 #define vnic_hash_for_each_safe(name, bkt, tmp, obj, member) \ argument
281 for ((bkt) = 0, obj = NULL; \
282 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
283 hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
289 #define vnic_hash_for_each(name, bkt, obj, member) \ argument
290 for ((bkt) = 0, obj = NULL; \
291 !obj && (bkt) < OPA_VNIC_MAC_TBL_SIZE; (bkt)++) \
292 hlist_for_each_entry(obj, &name[bkt], member)
A Dopa_vnic_encap.c107 int bkt; in opa_vnic_free_mac_tbl() local
112 vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) { in opa_vnic_free_mac_tbl()
157 int bkt; in opa_vnic_query_mac_tbl() local
168 vnic_hash_for_each(mactbl, bkt, node, hlist) { in opa_vnic_query_mac_tbl()
209 int i, bkt, rc = 0; in opa_vnic_update_mac_tbl() local
263 vnic_hash_for_each(old_mactbl, bkt, node, hlist) { in opa_vnic_update_mac_tbl()
/linux/lib/
A Dhashtable_test.c68 int bkt; in hashtable_test_hash_add() local
80 hash_for_each(hash, bkt, x, node) { in hashtable_test_hash_add()
127 int bkt, i, j, count; in hashtable_test_hash_for_each() local
139 hash_for_each(hash, bkt, x, node) { in hashtable_test_hash_for_each()
157 int bkt, i, j, count; in hashtable_test_hash_for_each_safe() local
190 int bkt, i, j, count; in hashtable_test_hash_for_each_possible() local
220 hash_for_each(hash, bkt, y, node) { in hashtable_test_hash_for_each_possible()
223 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible()
245 int bkt, i, j, count; in hashtable_test_hash_for_each_possible_safe() local
278 hash_for_each(hash, bkt, y, node) { in hashtable_test_hash_for_each_possible_safe()
[all …]
A Drhashtable.c274 rht_assign_locked(bkt, next); in rhashtable_rehash_one()
288 if (!bkt) in rhashtable_rehash_chain()
290 flags = rht_lock(old_tbl, bkt); in rhashtable_rehash_chain()
297 rht_unlock(old_tbl, bkt, flags); in rhashtable_rehash_chain()
534 rht_assign_locked(bkt, obj); in rhashtable_lookup_one()
572 head = rht_ptr(bkt, tbl, hash); in rhashtable_insert_one()
585 rht_assign_locked(bkt, obj); in rhashtable_insert_one()
611 bkt = rht_bucket_var(tbl, hash); in rhashtable_try_insert()
614 if (bkt == NULL) { in rhashtable_try_insert()
618 flags = rht_lock(tbl, bkt); in rhashtable_try_insert()
[all …]
/linux/tools/testing/selftests/bpf/prog_tests/
A Dhashmap.c47 int err, bkt, found_cnt, i; in test_hashmap_generic() local
92 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
140 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { in test_hashmap_generic()
230 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
238 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_generic()
265 int err, i, bkt; in test_hashmap_ptr_iface() local
319 hashmap__for_each_entry(map, cur, bkt) { in test_hashmap_ptr_iface()
345 int err, bkt; in test_hashmap_multimap() local
382 hashmap__for_each_entry(map, entry, bkt) { in test_hashmap_multimap()
414 int bkt; in test_hashmap_empty() local
[all …]
/linux/drivers/s390/crypto/
A Dap_card.c80 int bkt; in request_count_store() local
85 hash_for_each(ap_queues, bkt, aq, hnode) in request_count_store()
99 int bkt; in requestq_count_show() local
106 hash_for_each(ap_queues, bkt, aq, hnode) in requestq_count_show()
118 int bkt; in pendingq_count_show() local
125 hash_for_each(ap_queues, bkt, aq, hnode) in pendingq_count_show()
/linux/net/ipv6/
A Dcalipso.c203 u32 bkt; in calipso_cache_check() local
212 bkt = hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_check()
213 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
224 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
239 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
244 spin_unlock_bh(&calipso_cache[bkt].lock); in calipso_cache_check()
268 u32 bkt; in calipso_cache_add() local
291 bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1); in calipso_cache_add()
292 spin_lock_bh(&calipso_cache[bkt].lock); in calipso_cache_add()
295 calipso_cache[bkt].size += 1; in calipso_cache_add()
[all …]
/linux/drivers/net/ethernet/intel/ice/
A Dice_vf_lib.h197 #define ice_for_each_vf(pf, bkt, vf) \ argument
198 hash_for_each((pf)->vfs.table, (bkt), (vf), entry)
214 #define ice_for_each_vf_rcu(pf, bkt, vf) \ argument
215 hash_for_each_rcu((pf)->vfs.table, (bkt), (vf), entry)
A Dice_sriov.c29 unsigned int bkt; in ice_free_vf_entries() local
156 unsigned int bkt; in ice_free_vfs() local
175 ice_for_each_vf(pf, bkt, vf) { in ice_free_vfs()
584 unsigned int bkt, it_cnt; in ice_start_vfs() local
1010 int to_remap = 0, bkt; in ice_sriov_remap_vectors() local
1215 unsigned int bkt; in ice_process_vflr_event() local
1252 unsigned int bkt; in ice_get_vf_from_pfq() local
1604 unsigned int bkt; in ice_calc_all_vfs_min_tx_rate() local
1916 unsigned int bkt; in ice_print_vfs_mdd_events() local
1957 u32 bkt; in ice_restore_all_vfs_msi_state() local
[all …]
A Dice_vf_lib.c108 unsigned int bkt; in ice_get_num_vfs() local
112 ice_for_each_vf_rcu(pf, bkt, vf) in ice_get_num_vfs()
579 unsigned int bkt; in ice_is_any_vf_in_unicast_promisc() local
582 ice_for_each_vf_rcu(pf, bkt, vf) { in ice_is_any_vf_in_unicast_promisc()
735 unsigned int bkt; in ice_reset_all_vfs() local
744 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
754 ice_for_each_vf(pf, bkt, vf) in ice_reset_all_vfs()
761 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
773 ice_for_each_vf(pf, bkt, vf) { in ice_reset_all_vfs()
1363 unsigned int bkt; in ice_get_vf_ctrl_vsi() local
[all …]
/linux/net/ipv4/
A Dcipso_ipv4.c237 u32 bkt; in cipso_v4_cache_check() local
246 bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); in cipso_v4_cache_check()
247 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
258 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
273 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
278 spin_unlock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_check()
301 u32 bkt; in cipso_v4_cache_add() local
325 spin_lock_bh(&cipso_v4_cache[bkt].lock); in cipso_v4_cache_add()
326 if (cipso_v4_cache[bkt].size < bkt_size) { in cipso_v4_cache_add()
328 cipso_v4_cache[bkt].size += 1; in cipso_v4_cache_add()
[all …]
/linux/net/sched/
A Dcls_route.c55 struct route4_bucket *bkt; member
328 b = f->bkt; in route4_delete()
465 f->bkt = b; in route4_set_parms()
524 f->bkt = fold->bkt; in route4_change()
534 fp = &f->bkt->ht[h]; in route4_change()
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
A Dvxlan.c183 int bkt; in mlx5_vxlan_reset_to_default() local
188 hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { in mlx5_vxlan_reset_to_default()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
A Dhtb.c37 int bkt, err; in mlx5e_htb_enumerate_leaves() local
39 hash_for_each(htb->qos_tc2node, bkt, node, hnode) { in mlx5e_htb_enumerate_leaves()
434 int bkt; in mlx5e_htb_node_find_by_qid() local
436 hash_for_each(htb->qos_tc2node, bkt, node, hnode) in mlx5e_htb_node_find_by_qid()
627 int bkt; in mlx5e_htb_update_children() local
629 hash_for_each(htb->qos_tc2node, bkt, child, hnode) { in mlx5e_htb_update_children()
/linux/arch/powerpc/kvm/
A Dbook3s_hv_uvmem.c470 int srcu_idx, bkt; in kvmppc_h_svm_init_start() local
489 kvm_for_each_memslot(memslot, bkt, slots) { in kvmppc_h_svm_init_start()
497 kvm_for_each_memslot(m, bkt, slots) { in kvmppc_h_svm_init_start()
661 int srcu_idx, bkt; in kvmppc_h_svm_init_abort() local
676 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm)) in kvmppc_h_svm_init_abort()
834 int srcu_idx, bkt; in kvmppc_h_svm_init_done() local
843 kvm_for_each_memslot(memslot, bkt, slots) { in kvmppc_h_svm_init_done()

Completed in 53 milliseconds

123