Lines Matching refs:tbl
38 const struct bucket_table *tbl, in head_hashfn() argument
41 return rht_head_hashfn(ht, tbl, he, ht->p); in head_hashfn()
53 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) in lockdep_rht_bucket_is_held() argument
57 if (unlikely(tbl->nest)) in lockdep_rht_bucket_is_held()
59 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]); in lockdep_rht_bucket_is_held()
67 const struct bucket_table *tbl) in nested_table_top() argument
72 return (void *)rcu_dereference_protected(tbl->buckets[0], 1); in nested_table_top()
94 static void nested_bucket_table_free(const struct bucket_table *tbl) in nested_bucket_table_free() argument
96 unsigned int size = tbl->size >> tbl->nest; in nested_bucket_table_free()
97 unsigned int len = 1 << tbl->nest; in nested_bucket_table_free()
101 ntbl = nested_table_top(tbl); in nested_bucket_table_free()
109 static void bucket_table_free(const struct bucket_table *tbl) in bucket_table_free() argument
111 if (tbl->nest) in bucket_table_free()
112 nested_bucket_table_free(tbl); in bucket_table_free()
114 kvfree(tbl); in bucket_table_free()
153 struct bucket_table *tbl; in nested_bucket_table_alloc() local
159 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc()
161 tbl = alloc_hooks_tag(ht->alloc_tag, in nested_bucket_table_alloc()
163 if (!tbl) in nested_bucket_table_alloc()
166 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets, in nested_bucket_table_alloc()
168 kfree(tbl); in nested_bucket_table_alloc()
172 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1; in nested_bucket_table_alloc()
174 return tbl; in nested_bucket_table_alloc()
181 struct bucket_table *tbl = NULL; in bucket_table_alloc() local
186 tbl = alloc_hooks_tag(ht->alloc_tag, in bucket_table_alloc()
187 kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), in bucket_table_alloc()
192 if (tbl == NULL && !gfpflags_allow_blocking(gfp)) { in bucket_table_alloc()
193 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp); in bucket_table_alloc()
197 if (tbl == NULL) in bucket_table_alloc()
200 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0); in bucket_table_alloc()
202 tbl->size = size; in bucket_table_alloc()
204 rcu_head_init(&tbl->rcu); in bucket_table_alloc()
205 INIT_LIST_HEAD(&tbl->walkers); in bucket_table_alloc()
207 tbl->hash_rnd = get_random_u32(); in bucket_table_alloc()
210 INIT_RHT_NULLS_HEAD(tbl->buckets[i]); in bucket_table_alloc()
212 return tbl; in bucket_table_alloc()
216 struct bucket_table *tbl) in rhashtable_last_table() argument
221 new_tbl = tbl; in rhashtable_last_table()
222 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_last_table()
223 } while (tbl); in rhashtable_last_table()
232 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_one()
283 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_chain()
321 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_rehash_table()
339 rcu_assign_pointer(ht->tbl, new_tbl); in rhashtable_rehash_table()
343 walker->tbl = NULL; in rhashtable_rehash_table()
396 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); in rhashtable_shrink()
417 struct bucket_table *tbl; in rht_deferred_worker() local
423 tbl = rht_dereference(ht->tbl, ht); in rht_deferred_worker()
424 tbl = rhashtable_last_table(ht, tbl); in rht_deferred_worker()
426 if (rht_grow_above_75(ht, tbl)) in rht_deferred_worker()
427 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2); in rht_deferred_worker()
428 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) in rht_deferred_worker()
430 else if (tbl->nest) in rht_deferred_worker()
431 err = rhashtable_rehash_alloc(ht, tbl, tbl->size); in rht_deferred_worker()
447 struct bucket_table *tbl) in rhashtable_insert_rehash() argument
454 old_tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_insert_rehash()
456 size = tbl->size; in rhashtable_insert_rehash()
460 if (rht_grow_above_75(ht, tbl)) in rhashtable_insert_rehash()
463 else if (old_tbl != tbl) in rhashtable_insert_rehash()
472 err = rhashtable_rehash_attach(ht, tbl, new_tbl); in rhashtable_insert_rehash()
484 if (likely(rcu_access_pointer(tbl->future_tbl))) in rhashtable_insert_rehash()
496 struct bucket_table *tbl, unsigned int hash, in rhashtable_lookup_one() argument
508 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { in rhashtable_lookup_one()
528 head = rht_dereference_bucket(head->next, tbl, hash); in rhashtable_lookup_one()
547 struct bucket_table *tbl, unsigned int hash, struct rhash_head *obj, in rhashtable_insert_one() argument
559 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_insert_one()
566 if (unlikely(rht_grow_above_max(ht, tbl))) in rhashtable_insert_one()
569 if (unlikely(rht_grow_above_100(ht, tbl))) in rhashtable_insert_one()
572 head = rht_ptr(bkt, tbl, hash); in rhashtable_insert_one()
588 if (rht_grow_above_75(ht, tbl)) in rhashtable_insert_one()
598 struct bucket_table *tbl; in rhashtable_try_insert() local
604 new_tbl = rcu_dereference(ht->tbl); in rhashtable_try_insert()
607 tbl = new_tbl; in rhashtable_try_insert()
608 hash = rht_head_hashfn(ht, tbl, obj, ht->p); in rhashtable_try_insert()
609 if (rcu_access_pointer(tbl->future_tbl)) in rhashtable_try_insert()
611 bkt = rht_bucket_var(tbl, hash); in rhashtable_try_insert()
613 bkt = rht_bucket_insert(ht, tbl, hash); in rhashtable_try_insert()
615 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_try_insert()
618 flags = rht_lock(tbl, bkt); in rhashtable_try_insert()
619 data = rhashtable_lookup_one(ht, bkt, tbl, in rhashtable_try_insert()
621 new_tbl = rhashtable_insert_one(ht, bkt, tbl, in rhashtable_try_insert()
626 rht_unlock(tbl, bkt, flags); in rhashtable_try_insert()
631 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: in rhashtable_try_insert()
682 iter->walker.tbl = in rhashtable_walk_enter()
683 rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); in rhashtable_walk_enter()
684 list_add(&iter->walker.list, &iter->walker.tbl->walkers); in rhashtable_walk_enter()
698 if (iter->walker.tbl) in rhashtable_walk_exit()
731 if (iter->walker.tbl) in rhashtable_walk_start_check()
737 if (!iter->walker.tbl) { in rhashtable_walk_start_check()
738 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_walk_start_check()
751 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { in rhashtable_walk_start_check()
766 rht_for_each_rcu(p, iter->walker.tbl, iter->slot) { in rhashtable_walk_start_check()
797 struct bucket_table *tbl = iter->walker.tbl; in __rhashtable_walk_find_next() local
803 if (!tbl) in __rhashtable_walk_find_next()
806 for (; iter->slot < tbl->size; iter->slot++) { in __rhashtable_walk_find_next()
809 rht_for_each_rcu(p, tbl, iter->slot) { in __rhashtable_walk_find_next()
843 iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_walk_find_next()
844 if (iter->walker.tbl) { in __rhashtable_walk_find_next()
942 struct bucket_table *tbl = iter->walker.tbl; in rhashtable_walk_stop() local
944 if (!tbl) in rhashtable_walk_stop()
950 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu)) in rhashtable_walk_stop()
952 iter->walker.tbl = NULL; in rhashtable_walk_stop()
954 list_add(&iter->walker.list, &tbl->walkers); in rhashtable_walk_stop()
1026 struct bucket_table *tbl; in rhashtable_init_noprof() local
1071 tbl = bucket_table_alloc(ht, size, GFP_KERNEL); in rhashtable_init_noprof()
1072 if (unlikely(tbl == NULL)) { in rhashtable_init_noprof()
1074 tbl = bucket_table_alloc(ht, size, GFP_KERNEL | __GFP_NOFAIL); in rhashtable_init_noprof()
1079 RCU_INIT_POINTER(ht->tbl, tbl); in rhashtable_init_noprof()
1144 struct bucket_table *tbl, *next_tbl; in rhashtable_free_and_destroy() local
1150 tbl = rht_dereference(ht->tbl, ht); in rhashtable_free_and_destroy()
1153 for (i = 0; i < tbl->size; i++) { in rhashtable_free_and_destroy()
1157 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), in rhashtable_free_and_destroy()
1168 next_tbl = rht_dereference(tbl->future_tbl, ht); in rhashtable_free_and_destroy()
1169 bucket_table_free(tbl); in rhashtable_free_and_destroy()
1171 tbl = next_tbl; in rhashtable_free_and_destroy()
1185 const struct bucket_table *tbl, unsigned int hash) in __rht_bucket_nested() argument
1188 unsigned int index = hash & ((1 << tbl->nest) - 1); in __rht_bucket_nested()
1189 unsigned int size = tbl->size >> tbl->nest; in __rht_bucket_nested()
1193 ntbl = nested_table_top(tbl); in __rht_bucket_nested()
1194 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash); in __rht_bucket_nested()
1195 subhash >>= tbl->nest; in __rht_bucket_nested()
1200 tbl, hash); in __rht_bucket_nested()
1214 const struct bucket_table *tbl, unsigned int hash) in rht_bucket_nested() argument
1220 return __rht_bucket_nested(tbl, hash) ?: &rhnull; in rht_bucket_nested()
1225 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) in rht_bucket_nested_insert() argument
1228 unsigned int index = hash & ((1 << tbl->nest) - 1); in rht_bucket_nested_insert()
1229 unsigned int size = tbl->size >> tbl->nest; in rht_bucket_nested_insert()
1232 ntbl = nested_table_top(tbl); in rht_bucket_nested_insert()
1233 hash >>= tbl->nest; in rht_bucket_nested_insert()