Lines Matching refs:htab
116 struct bpf_htab *htab; member
130 static inline bool htab_is_prealloc(const struct bpf_htab *htab) in htab_is_prealloc() argument
132 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
135 static inline bool htab_use_raw_lock(const struct bpf_htab *htab) in htab_use_raw_lock() argument
137 return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab)); in htab_use_raw_lock()
140 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
144 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
145 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
146 if (htab_use_raw_lock(htab)) { in htab_init_buckets()
147 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
148 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
149 &htab->lockdep_key); in htab_init_buckets()
151 spin_lock_init(&htab->buckets[i].lock); in htab_init_buckets()
152 lockdep_set_class(&htab->buckets[i].lock, in htab_init_buckets()
153 &htab->lockdep_key); in htab_init_buckets()
159 static inline int htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
168 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
169 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
174 if (htab_use_raw_lock(htab)) in htab_lock_bucket()
183 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
188 if (htab_use_raw_lock(htab)) in htab_unlock_bucket()
192 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
198 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
200 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
201 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
204 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
206 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
207 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
226 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
228 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
231 static bool htab_has_extra_elems(struct bpf_htab *htab) in htab_has_extra_elems() argument
233 return !htab_is_percpu(htab) && !htab_is_lru(htab); in htab_has_extra_elems()
236 static void htab_free_prealloced_timers(struct bpf_htab *htab) in htab_free_prealloced_timers() argument
238 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
241 if (likely(!map_value_has_timer(&htab->map))) in htab_free_prealloced_timers()
243 if (htab_has_extra_elems(htab)) in htab_free_prealloced_timers()
249 elem = get_htab_elem(htab, i); in htab_free_prealloced_timers()
251 round_up(htab->map.key_size, 8) + in htab_free_prealloced_timers()
252 htab->map.timer_off); in htab_free_prealloced_timers()
257 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
261 if (!htab_is_percpu(htab)) in htab_free_elems()
264 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
267 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
268 htab->map.key_size); in htab_free_elems()
273 bpf_map_area_free(htab->elems); in htab_free_elems()
287 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
290 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
294 u32 key_size = htab->map.key_size; in prealloc_lru_pop()
298 check_and_init_map_value(&htab->map, in prealloc_lru_pop()
306 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
308 u32 num_entries = htab->map.max_entries; in prealloc_init()
311 if (htab_has_extra_elems(htab)) in prealloc_init()
314 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
315 htab->map.numa_node); in prealloc_init()
316 if (!htab->elems) in prealloc_init()
319 if (!htab_is_percpu(htab)) in prealloc_init()
323 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
326 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
330 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
336 if (htab_is_lru(htab)) in prealloc_init()
337 err = bpf_lru_init(&htab->lru, in prealloc_init()
338 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
342 htab); in prealloc_init()
344 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
349 if (htab_is_lru(htab)) in prealloc_init()
350 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
352 htab->elem_size, num_entries); in prealloc_init()
354 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
355 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
356 htab->elem_size, num_entries); in prealloc_init()
361 htab_free_elems(htab); in prealloc_init()
365 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
367 htab_free_elems(htab); in prealloc_destroy()
369 if (htab_is_lru(htab)) in prealloc_destroy()
370 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
372 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
375 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
381 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
387 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
394 htab->extra_elems = pptr; in alloc_extra_elems()
415 BUILD_BUG_ON(offsetof(struct htab_elem, htab) != in htab_map_alloc_check()
475 struct bpf_htab *htab; in htab_map_alloc() local
478 htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT); in htab_map_alloc()
479 if (!htab) in htab_map_alloc()
482 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
484 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
491 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
493 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
494 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
499 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
501 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
502 round_up(htab->map.key_size, 8); in htab_map_alloc()
504 htab->elem_size += sizeof(void *); in htab_map_alloc()
506 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
510 if (htab->n_buckets == 0 || in htab_map_alloc()
511 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
515 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
517 htab->map.numa_node); in htab_map_alloc()
518 if (!htab->buckets) in htab_map_alloc()
522 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
526 if (!htab->map_locked[i]) in htab_map_alloc()
530 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
531 htab->hashrnd = 0; in htab_map_alloc()
533 htab->hashrnd = get_random_int(); in htab_map_alloc()
535 htab_init_buckets(htab); in htab_map_alloc()
538 err = prealloc_init(htab); in htab_map_alloc()
546 err = alloc_extra_elems(htab); in htab_map_alloc()
552 return &htab->map; in htab_map_alloc()
555 prealloc_destroy(htab); in htab_map_alloc()
558 free_percpu(htab->map_locked[i]); in htab_map_alloc()
559 bpf_map_area_free(htab->buckets); in htab_map_alloc()
561 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
562 kfree(htab); in htab_map_alloc()
571 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
573 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
576 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
578 return &__select_bucket(htab, hash)->head; in select_bucket()
624 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
634 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
636 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
638 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
728 static void check_and_free_timer(struct bpf_htab *htab, struct htab_elem *elem) in check_and_free_timer() argument
730 if (unlikely(map_value_has_timer(&htab->map))) in check_and_free_timer()
732 round_up(htab->map.key_size, 8) + in check_and_free_timer()
733 htab->map.timer_off); in check_and_free_timer()
741 struct bpf_htab *htab = (struct bpf_htab *)arg; in htab_lru_map_delete_node() local
750 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
753 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
760 check_and_free_timer(htab, l); in htab_lru_map_delete_node()
764 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
772 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
785 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
787 head = select_bucket(htab, hash); in htab_map_get_next_key()
790 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
806 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
811 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
812 head = select_bucket(htab, i); in htab_map_get_next_key()
828 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
830 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
831 free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); in htab_elem_free()
832 check_and_free_timer(htab, l); in htab_elem_free()
839 struct bpf_htab *htab = l->htab; in htab_elem_free_rcu() local
841 htab_elem_free(htab, l); in htab_elem_free_rcu()
844 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
846 struct bpf_map *map = &htab->map; in htab_put_fd_value()
855 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
857 htab_put_fd_value(htab, l); in free_htab_elem()
859 if (htab_is_prealloc(htab)) { in free_htab_elem()
860 check_and_free_timer(htab, l); in free_htab_elem()
861 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
863 atomic_dec(&htab->count); in free_htab_elem()
864 l->htab = htab; in free_htab_elem()
869 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
874 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value()
876 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
887 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
896 if (htab_is_prealloc(htab) && !onallcpus) { in pcpu_init_value()
897 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value()
909 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
913 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
915 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
919 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
924 u32 size = htab->map.value_size; in alloc_htab_elem()
925 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
934 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
936 htab_put_fd_value(htab, old_elem); in alloc_htab_elem()
941 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
947 if (atomic_inc_return(&htab->count) > htab->map.max_entries) in alloc_htab_elem()
957 l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size, in alloc_htab_elem()
959 htab->map.numa_node); in alloc_htab_elem()
964 check_and_init_map_value(&htab->map, in alloc_htab_elem()
975 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in alloc_htab_elem()
984 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
988 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
992 copy_map_value(&htab->map, in alloc_htab_elem()
1000 atomic_dec(&htab->count); in alloc_htab_elem()
1004 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
1022 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
1039 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1041 b = __select_bucket(htab, hash); in htab_map_update_elem()
1049 htab->n_buckets); in htab_map_update_elem()
1050 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1066 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_update_elem()
1072 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1090 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1104 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1105 free_htab_elem(htab, l_old); in htab_map_update_elem()
1107 check_and_free_timer(htab, l_old); in htab_map_update_elem()
1111 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1115 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) in htab_lru_push_free() argument
1117 check_and_free_timer(htab, elem); in htab_lru_push_free()
1118 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1124 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1141 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1143 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1151 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1154 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1157 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_update_elem()
1163 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1178 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_update_elem()
1181 htab_lru_push_free(htab, l_new); in htab_lru_map_update_elem()
1183 htab_lru_push_free(htab, l_old); in htab_lru_map_update_elem()
1192 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1209 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1211 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1214 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_percpu_map_update_elem()
1220 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1226 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1229 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1239 htab_unlock_bucket(htab, b, hash, flags); in __htab_percpu_map_update_elem()
1247 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1264 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1266 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1275 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1280 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_lru_percpu_map_update_elem()
1286 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1294 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1297 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1304 htab_unlock_bucket(htab, b, hash, flags); in __htab_lru_percpu_map_update_elem()
1306 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1326 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1339 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1340 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1343 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_delete_elem()
1351 free_htab_elem(htab, l); in htab_map_delete_elem()
1356 htab_unlock_bucket(htab, b, hash, flags); in htab_map_delete_elem()
1362 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1375 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1376 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1379 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_delete_elem()
1390 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_delete_elem()
1392 htab_lru_push_free(htab, l); in htab_lru_map_delete_elem()
1396 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1400 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1401 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1407 htab_elem_free(htab, l); in delete_all_elements()
1412 static void htab_free_malloced_timers(struct bpf_htab *htab) in htab_free_malloced_timers() argument
1417 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1418 struct hlist_nulls_head *head = select_bucket(htab, i); in htab_free_malloced_timers()
1423 check_and_free_timer(htab, l); in htab_free_malloced_timers()
1431 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers() local
1433 if (likely(!map_value_has_timer(&htab->map))) in htab_map_free_timers()
1435 if (!htab_is_prealloc(htab)) in htab_map_free_timers()
1436 htab_free_malloced_timers(htab); in htab_map_free_timers()
1438 htab_free_prealloced_timers(htab); in htab_map_free_timers()
1444 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1456 if (!htab_is_prealloc(htab)) in htab_map_free()
1457 delete_all_elements(htab); in htab_map_free()
1459 prealloc_destroy(htab); in htab_map_free()
1461 free_percpu(htab->extra_elems); in htab_map_free()
1462 bpf_map_area_free(htab->buckets); in htab_map_free()
1464 free_percpu(htab->map_locked[i]); in htab_map_free()
1465 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1466 kfree(htab); in htab_map_free()
1494 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem() local
1504 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1505 b = __select_bucket(htab, hash); in __htab_map_lookup_and_delete_elem()
1508 ret = htab_lock_bucket(htab, b, hash, &bflags); in __htab_map_lookup_and_delete_elem()
1543 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_elem()
1546 htab_unlock_bucket(htab, b, hash, bflags); in __htab_map_lookup_and_delete_elem()
1549 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_elem()
1591 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1628 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1631 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1632 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1633 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1660 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1664 ret = htab_lock_bucket(htab, b, batch, &flags); in __htab_map_lookup_and_delete_batch()
1684 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1695 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1741 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1748 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1754 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_batch()
1761 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1778 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1871 struct bpf_htab *htab; member
1881 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
1890 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
1904 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
1909 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
1910 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2027 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2055 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem() local
2070 is_percpu = htab_is_percpu(htab); in bpf_for_each_hash_elem()
2078 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2079 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2124 BATCH_OPS(htab),
2211 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
2215 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2304 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2310 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2311 head = select_bucket(htab, i); in fd_htab_map_free()