| /linux/fs/bcachefs/ |
| A D | movinggc.c | 151 move_buckets *buckets) in bch2_copygc_get_buckets() argument 188 ret2 = darray_push(buckets, b); in bch2_copygc_get_buckets() 194 ret2 = buckets->nr >= nr_to_get; in bch2_copygc_get_buckets() 216 move_buckets buckets = { 0 }; in bch2_copygc() local 225 darray_for_each(buckets, i) { in bch2_copygc() 248 darray_exit(&buckets); in bch2_copygc() 326 struct buckets_in_flight *buckets; in bch2_copygc_thread() local 331 if (!buckets) in bch2_copygc_thread() 336 kfree(buckets); in bch2_copygc_thread() 400 rhashtable_destroy(&buckets->table); in bch2_copygc_thread() [all …]
|
| A D | journal_sb.c | 38 b[i] = le64_to_cpu(journal->buckets[i]); in bch2_sb_journal_validate() 79 prt_printf(out, " %llu", le64_to_cpu(journal->buckets[i])); in bch2_sb_journal_to_text() 192 u64 *buckets, unsigned nr) in bch2_journal_buckets_to_sb() argument 207 if (buckets[i] + 1 != buckets[i + 1]) in bch2_journal_buckets_to_sb() 217 j->d[dst].start = cpu_to_le64(buckets[0]); in bch2_journal_buckets_to_sb() 221 if (buckets[i] == buckets[i - 1] + 1) { in bch2_journal_buckets_to_sb() 225 j->d[dst].start = cpu_to_le64(buckets[i]); in bch2_journal_buckets_to_sb()
|
| /linux/drivers/s390/scsi/ |
| A D | zfcp_reqlist.h | 26 struct list_head buckets[ZFCP_REQ_LIST_BUCKETS]; member 52 INIT_LIST_HEAD(&rl->buckets[i]); in zfcp_reqlist_alloc() 68 if (!list_empty(&rl->buckets[i])) in zfcp_reqlist_isempty() 92 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find() 165 list_add_tail(&req->list, &rl->buckets[i]); in zfcp_reqlist_add() 182 list_splice_init(&rl->buckets[i], list); in zfcp_reqlist_move() 209 list_for_each_entry(req, &rl->buckets[i], list) in zfcp_reqlist_apply_for_all()
|
| /linux/tools/testing/selftests/bpf/ |
| A D | bpf_arena_htab.h | 14 htab_bucket_t *buckets; member 21 htab_bucket_t *b = htab->buckets; in __select_bucket() 95 void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in htab_init() local 97 cast_user(buckets); in htab_init() 98 htab->buckets = buckets; in htab_init()
|
| /linux/Documentation/networking/ |
| A D | nexthop-group-resilient.rst | 56 the buckets that held it are simply reassigned to other next hops:: 70 choose a subset of buckets that are currently not used for forwarding 80 certain number of buckets, according to its weight and the number of 86 Next hops that have fewer buckets than their wants count, are called 98 buckets: 112 of balance until idle buckets do appear, possibly never. If set to a 117 item. Thus buckets: 133 created and buckets populated for the first time. 247 buckets 8 idle_timer 60 unbalanced_timer 300 259 This can be confirmed by looking at individual buckets:: [all …]
|
| /linux/tools/lib/bpf/ |
| A D | hashmap.h | 77 struct hashmap_entry **buckets; member 170 for (cur = map->buckets[bkt]; cur; cur = cur->next) 182 for (cur = map->buckets[bkt]; \ 193 for (cur = map->buckets \ 194 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ 201 for (cur = map->buckets \ 202 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
| A D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| /linux/tools/perf/util/ |
| A D | hashmap.h | 77 struct hashmap_entry **buckets; member 170 for (cur = map->buckets[bkt]; cur; cur = cur->next) 182 for (cur = map->buckets[bkt]; \ 193 for (cur = map->buckets \ 194 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \ 201 for (cur = map->buckets \ 202 ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
|
| A D | hashmap.c | 45 map->buckets = NULL; in hashmap__init() 71 free(map->buckets); in hashmap__clear() 72 map->buckets = NULL; in hashmap__clear() 124 free(map->buckets); in hashmap_grow() 125 map->buckets = new_buckets; in hashmap_grow() 137 if (!map->buckets) in hashmap_find_entry() 140 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; in hashmap_find_entry() 200 hashmap_add_entry(&map->buckets[h], entry); in hashmap_insert()
|
| /linux/drivers/md/dm-vdo/ |
| A D | priority-table.c | 44 struct bucket buckets[]; member 69 struct bucket *bucket = &table->buckets[priority]; in vdo_make_priority_table() 116 list_del_init(&table->buckets[priority].queue); in vdo_reset_priority_table() 134 list_move_tail(entry, &table->buckets[priority].queue); in vdo_priority_table_enqueue() 173 bucket = &table->buckets[top_priority]; in vdo_priority_table_dequeue()
|
| A D | int-map.c | 102 struct bucket *buckets; member 168 "struct int_map buckets", &map->buckets); in allocate_buckets() 220 vdo_free(vdo_forget(map->buckets)); in vdo_int_map_free() 307 return &map->buckets[(hash * map->capacity) >> 32]; in select_bucket() 394 struct bucket *entry = &old_map.buckets[i]; in resize_buckets() 402 vdo_free(vdo_forget(map->buckets)); in resize_buckets() 409 vdo_free(vdo_forget(old_map.buckets)); in resize_buckets() 432 ptrdiff_t remaining = &map->buckets[map->bucket_count] - bucket; in find_empty_bucket()
|
| /linux/block/ |
| A D | blk-stat.c | 83 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_timer_fn() 90 for (bucket = 0; bucket < cb->buckets; bucket++) { in blk_stat_timer_fn() 102 unsigned int buckets, void *data) in blk_stat_alloc_callback() argument 110 cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 116 cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat), in blk_stat_alloc_callback() 127 cb->buckets = buckets; in blk_stat_alloc_callback() 144 for (bucket = 0; bucket < cb->buckets; bucket++) in blk_stat_add_callback()
|
| /linux/tools/testing/selftests/drivers/net/netdevsim/ |
| A D | nexthop.sh | 213 $IP nexthop add id 10 group 1/2 type resilient buckets 4 325 $IP nexthop add id 10 group 1/2 type resilient buckets 6 353 $IP nexthop add id 10 group 1/2 type resilient buckets 6 434 type resilient buckets 8 idle_timer 6 535 type resilient buckets 8 $timer 4 581 buckets 8 idle_timer 6 unbalanced_timer 10 613 buckets 8 idle_timer 6 unbalanced_timer 10 648 $IP nexthop add id 10 group 1/2 type resilient buckets 8 672 buckets 8 idle_timer 120 unbalanced_timer 4 707 buckets 8 idle_timer 120 [all …]
|
| /linux/lib/ |
| A D | hashtable_test.c | 189 int buckets[2]; in hashtable_test_hash_for_each_possible() local 223 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible() 230 if (buckets[0] == buckets[1]) { in hashtable_test_hash_for_each_possible() 244 int buckets[2]; in hashtable_test_hash_for_each_possible_safe() local 281 buckets[y->key] = bkt; in hashtable_test_hash_for_each_possible_safe() 288 if (buckets[0] == buckets[1]) { in hashtable_test_hash_for_each_possible_safe()
|
| /linux/net/ceph/crush/ |
| A D | crush.c | 111 if (map->buckets) { in crush_destroy() 114 if (map->buckets[b] == NULL) in crush_destroy() 116 crush_destroy_bucket(map->buckets[b]); in crush_destroy() 118 kfree(map->buckets); in crush_destroy()
|
| A D | mapper.c | 531 itemtype = map->buckets[-1-item]->type; in crush_choose_firstn() 544 in = map->buckets[-1-item]; in crush_choose_firstn() 568 map->buckets[-1-item], in crush_choose_firstn() 744 itemtype = map->buckets[-1-item]->type; in crush_choose_indep() 761 in = map->buckets[-1-item]; in crush_choose_indep() 781 map->buckets[-1-item], in crush_choose_indep() 868 if (!map->buckets[b]) in crush_init_workspace() 872 switch (map->buckets[b]->alg) { in crush_init_workspace() 951 map->buckets[-1-curstep->arg1])) { in crush_do_rule() 1035 map->buckets[bno], in crush_do_rule() [all …]
|
| /linux/net/netfilter/ipvs/ |
| A D | ip_vs_sh.c | 70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE]; member 108 struct ip_vs_dest *dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get() 130 dest = rcu_dereference(s->buckets[ihash].dest); in ip_vs_sh_get_fallback() 145 dest = rcu_dereference(s->buckets[hash].dest); in ip_vs_sh_get_fallback() 172 b = &s->buckets[0]; in ip_vs_sh_reassign() 216 b = &s->buckets[0]; in ip_vs_sh_flush()
|
| A D | ip_vs_dh.c | 64 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; member 90 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); in ip_vs_dh_get() 106 b = &s->buckets[0]; in ip_vs_dh_reassign() 140 b = &s->buckets[0]; in ip_vs_dh_flush()
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| A D | arena_htab.c | 20 printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets); in test_arena_htab_common() 21 ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL"); in test_arena_htab_common() 22 for (i = 0; htab->buckets && i < 16; i += 4) { in test_arena_htab_common()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ |
| A D | ipoib_vlan.c | 45 struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; member 71 static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, in mlx5i_find_qpn_to_netdev_node() argument 74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node() 99 hlist_add_head(&new_node->hlist, &ht->buckets[key]); in mlx5i_pkey_add_qpn() 112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn() 131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
|
| /linux/drivers/md/ |
| A D | dm-region-hash.c | 73 struct list_head *buckets; member 209 rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets))); in dm_region_hash_create() 210 if (!rh->buckets) { in dm_region_hash_create() 217 INIT_LIST_HEAD(rh->buckets + i); in dm_region_hash_create() 231 vfree(rh->buckets); in dm_region_hash_create() 247 list_for_each_entry_safe(reg, nreg, rh->buckets + h, in dm_region_hash_destroy() 258 vfree(rh->buckets); in dm_region_hash_destroy() 277 struct list_head *bucket = rh->buckets + rh_hash(rh, region); in __rh_lookup() 288 list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); in __rh_insert()
|
| /linux/tools/perf/ |
| A D | builtin-ftrace.c | 786 buckets[i]++; in make_histogram() 806 total += buckets[i]; in display_histogram() 816 bar_len = buckets[0] * bar_total / total; in display_histogram() 830 bar_len = buckets[i] * bar_total / total; in display_histogram() 832 start, stop, unit, buckets[i], bar_len, bar, in display_histogram() 836 bar_len = buckets[NUM_BUCKET - 1] * bar_total / total; in display_histogram() 908 return perf_ftrace__latency_read_bpf(ftrace, buckets); in read_func_latency() 930 int buckets[NUM_BUCKET] = { }; in __cmd_latency() local 971 make_histogram(buckets, buf, n, line, ftrace->use_nsec); in __cmd_latency() 974 read_func_latency(ftrace, buckets); in __cmd_latency() [all …]
|
| /linux/fs/nfs/ |
| A D | pnfs_nfs.c | 102 p = kmalloc(struct_size(p, buckets, n), gfp_flags); in pnfs_alloc_commit_array() 109 for (b = &p->buckets[0]; n != 0; b++, n--) { in pnfs_alloc_commit_array() 259 struct pnfs_commit_bucket *buckets, in pnfs_bucket_scan_array() argument 288 cnt = pnfs_bucket_scan_array(cinfo, array->buckets, in pnfs_generic_scan_commit_lists() 304 struct pnfs_commit_bucket *buckets, in pnfs_bucket_recover_commit_reqs() argument 314 for (i = 0, b = buckets; i < nbuckets; i++, b++) { in pnfs_bucket_recover_commit_reqs() 343 array->buckets, in pnfs_generic_recover_commit_reqs() 392 for (bucket = buckets; idx < nbuckets; bucket++, idx++) { in pnfs_generic_retry_commit() 405 struct pnfs_commit_bucket *buckets, in pnfs_bucket_alloc_ds_commits() argument 432 pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i); in pnfs_bucket_alloc_ds_commits() [all …]
|
| A D | nfs42xattr.c | 70 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; member 111 INIT_HLIST_HEAD(&cache->buckets[i].hlist); in nfs4_xattr_hash_init() 112 spin_lock_init(&cache->buckets[i].lock); in nfs4_xattr_hash_init() 113 cache->buckets[i].cache = cache; in nfs4_xattr_hash_init() 114 cache->buckets[i].draining = false; in nfs4_xattr_hash_init() 276 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) in nfs4_xattr_free_cache_cb() 278 cache->buckets[i].draining = false; in nfs4_xattr_free_cache_cb() 394 bucket = &cache->buckets[i]; in nfs4_xattr_discard_cache() 506 return &cache->buckets[jhash(name, strlen(name), 0) & in nfs4_xattr_hash_bucket() 507 (ARRAY_SIZE(cache->buckets) - 1)]; in nfs4_xattr_hash_bucket() [all …]
|
| /linux/kernel/bpf/ |
| A D | bpf_local_storage.c | 24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; in select_bucket() 751 usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); in bpf_local_storage_map_mem_usage() 785 smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets, in bpf_local_storage_map_alloc() 786 sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN); in bpf_local_storage_map_alloc() 787 if (!smap->buckets) { in bpf_local_storage_map_alloc() 793 INIT_HLIST_HEAD(&smap->buckets[i].list); in bpf_local_storage_map_alloc() 794 raw_spin_lock_init(&smap->buckets[i].lock); in bpf_local_storage_map_alloc() 817 kvfree(smap->buckets); in bpf_local_storage_map_alloc() 850 b = &smap->buckets[i]; in bpf_local_storage_map_free() 889 kvfree(smap->buckets); in bpf_local_storage_map_free()
|