Lines Matching refs:htab

131 static inline bool htab_is_prealloc(const struct bpf_htab *htab)  in htab_is_prealloc()  argument
133 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
136 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
140 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
141 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
142 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
143 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
144 &htab->lockdep_key); in htab_init_buckets()
149 static inline int htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
155 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
158 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
159 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
170 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
174 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
176 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
182 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
184 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
185 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
188 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
190 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
191 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
210 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
212 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
215 static bool htab_has_extra_elems(struct bpf_htab *htab) in htab_has_extra_elems() argument
217 return !htab_is_percpu(htab) && !htab_is_lru(htab); in htab_has_extra_elems()
220 static void htab_free_prealloced_timers(struct bpf_htab *htab) in htab_free_prealloced_timers() argument
222 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers()
225 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers()
227 if (htab_has_extra_elems(htab)) in htab_free_prealloced_timers()
233 elem = get_htab_elem(htab, i); in htab_free_prealloced_timers()
234 bpf_obj_free_timer(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers()
239 static void htab_free_prealloced_fields(struct bpf_htab *htab) in htab_free_prealloced_fields() argument
241 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
244 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
246 if (htab_has_extra_elems(htab)) in htab_free_prealloced_fields()
251 elem = get_htab_elem(htab, i); in htab_free_prealloced_fields()
252 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
257 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
261 if (!htab_is_percpu(htab)) in htab_free_elems()
264 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
267 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
268 htab->map.key_size); in htab_free_elems()
273 bpf_map_area_free(htab->elems); in htab_free_elems()
287 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
290 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
295 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
302 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
304 u32 num_entries = htab->map.max_entries; in prealloc_init()
307 if (htab_has_extra_elems(htab)) in prealloc_init()
310 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
311 htab->map.numa_node); in prealloc_init()
312 if (!htab->elems) in prealloc_init()
315 if (!htab_is_percpu(htab)) in prealloc_init()
319 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
322 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
326 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
332 if (htab_is_lru(htab)) in prealloc_init()
333 err = bpf_lru_init(&htab->lru, in prealloc_init()
334 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
338 htab); in prealloc_init()
340 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
345 if (htab_is_lru(htab)) in prealloc_init()
346 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
348 htab->elem_size, num_entries); in prealloc_init()
350 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
351 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
352 htab->elem_size, num_entries); in prealloc_init()
357 htab_free_elems(htab); in prealloc_init()
361 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
363 htab_free_elems(htab); in prealloc_destroy()
365 if (htab_is_lru(htab)) in prealloc_destroy()
366 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
368 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
371 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
377 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
383 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
390 htab->extra_elems = pptr; in alloc_extra_elems()
469 struct bpf_htab *htab; in htab_map_alloc() local
472 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); in htab_map_alloc()
473 if (!htab) in htab_map_alloc()
476 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
478 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
485 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
487 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
488 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
493 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
495 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
496 round_up(htab->map.key_size, 8); in htab_map_alloc()
498 htab->elem_size += sizeof(void *); in htab_map_alloc()
500 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
504 if (htab->n_buckets == 0 || in htab_map_alloc()
505 htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
509 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
511 htab->map.numa_node); in htab_map_alloc()
512 if (!htab->buckets) in htab_map_alloc()
516 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
520 if (!htab->map_locked[i]) in htab_map_alloc()
524 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
525 htab->hashrnd = 0; in htab_map_alloc()
527 htab->hashrnd = get_random_u32(); in htab_map_alloc()
529 htab_init_buckets(htab); in htab_map_alloc()
546 htab->use_percpu_counter = true; in htab_map_alloc()
548 if (htab->use_percpu_counter) { in htab_map_alloc()
549 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
555 err = prealloc_init(htab); in htab_map_alloc()
563 err = alloc_extra_elems(htab); in htab_map_alloc()
568 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
572 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
573 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
579 return &htab->map; in htab_map_alloc()
582 prealloc_destroy(htab); in htab_map_alloc()
584 if (htab->use_percpu_counter) in htab_map_alloc()
585 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
587 free_percpu(htab->map_locked[i]); in htab_map_alloc()
588 bpf_map_area_free(htab->buckets); in htab_map_alloc()
589 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
590 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
592 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
593 bpf_map_area_free(htab); in htab_map_alloc()
602 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
604 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
607 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
609 return &__select_bucket(htab, hash)->head; in select_bucket()
655 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
665 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
667 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
669 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
759 static void check_and_free_fields(struct bpf_htab *htab, in check_and_free_fields() argument
762 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
764 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
772 struct bpf_htab *htab = arg; in htab_lru_map_delete_node() local
781 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
784 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
791 check_and_free_fields(htab, l); in htab_lru_map_delete_node()
795 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
803 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
816 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
818 head = select_bucket(htab, hash); in htab_map_get_next_key()
821 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
837 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
842 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
843 head = select_bucket(htab, i); in htab_map_get_next_key()
859 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
861 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
862 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
863 check_and_free_fields(htab, l); in htab_elem_free()
864 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
867 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
869 struct bpf_map *map = &htab->map; in htab_put_fd_value()
878 static bool is_map_full(struct bpf_htab *htab) in is_map_full() argument
880 if (htab->use_percpu_counter) in is_map_full()
881 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
883 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
886 static void inc_elem_count(struct bpf_htab *htab) in inc_elem_count() argument
888 if (htab->use_percpu_counter) in inc_elem_count()
889 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
891 atomic_inc(&htab->count); in inc_elem_count()
894 static void dec_elem_count(struct bpf_htab *htab) in dec_elem_count() argument
896 if (htab->use_percpu_counter) in dec_elem_count()
897 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
899 atomic_dec(&htab->count); in dec_elem_count()
903 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
905 htab_put_fd_value(htab, l); in free_htab_elem()
907 if (htab_is_prealloc(htab)) { in free_htab_elem()
908 check_and_free_fields(htab, l); in free_htab_elem()
909 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
911 dec_elem_count(htab); in free_htab_elem()
912 htab_elem_free(htab, l); in free_htab_elem()
916 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
921 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); in pcpu_copy_value()
923 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
934 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
943 u32 size = round_up(htab->map.value_size, 8); in pcpu_init_value()
955 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
959 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
961 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
965 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
970 u32 size = htab->map.value_size; in alloc_htab_elem()
971 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
980 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
982 htab_put_fd_value(htab, old_elem); in alloc_htab_elem()
987 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
993 if (is_map_full(htab)) in alloc_htab_elem()
1001 inc_elem_count(htab); in alloc_htab_elem()
1002 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1015 pptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1017 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1025 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
1029 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
1033 copy_map_value(&htab->map, in alloc_htab_elem()
1041 dec_elem_count(htab); in alloc_htab_elem()
1045 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
1063 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
1080 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1082 b = __select_bucket(htab, hash); in htab_map_update_elem()
1090 htab->n_buckets); in htab_map_update_elem()
1091 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1107 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_update_elem()
1113 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1131 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1145 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1146 free_htab_elem(htab, l_old); in htab_map_update_elem()
1148 check_and_free_fields(htab, l_old); in htab_map_update_elem()
1152 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1156 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) in htab_lru_push_free() argument
1158 check_and_free_fields(htab, elem); in htab_lru_push_free()
1159 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1165 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1182 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1184 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1192 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1195 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1198 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_update_elem()
1204 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1219 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_update_elem()
1222 htab_lru_push_free(htab, l_new); in htab_lru_map_update_elem()
1224 htab_lru_push_free(htab, l_old); in htab_lru_map_update_elem()
1233 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1250 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1252 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1255 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_percpu_map_update_elem()
1261 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1267 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1270 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1280 htab_unlock_bucket(htab, b, hash, flags); in __htab_percpu_map_update_elem()
1288 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1305 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1307 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1316 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1321 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_lru_percpu_map_update_elem()
1327 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1335 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1338 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1345 htab_unlock_bucket(htab, b, hash, flags); in __htab_lru_percpu_map_update_elem()
1347 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1367 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1380 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1381 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1384 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_delete_elem()
1392 free_htab_elem(htab, l); in htab_map_delete_elem()
1397 htab_unlock_bucket(htab, b, hash, flags); in htab_map_delete_elem()
1403 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1416 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1417 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1420 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_delete_elem()
1431 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_delete_elem()
1433 htab_lru_push_free(htab, l); in htab_lru_map_delete_elem()
1437 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1445 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1446 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1452 htab_elem_free(htab, l); in delete_all_elements()
1458 static void htab_free_malloced_timers(struct bpf_htab *htab) in htab_free_malloced_timers() argument
1463 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers()
1464 struct hlist_nulls_head *head = select_bucket(htab, i); in htab_free_malloced_timers()
1470 bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers()
1479 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers() local
1482 if (!btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_map_free_timers()
1484 if (!htab_is_prealloc(htab)) in htab_map_free_timers()
1485 htab_free_malloced_timers(htab); in htab_map_free_timers()
1487 htab_free_prealloced_timers(htab); in htab_map_free_timers()
1493 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1505 if (!htab_is_prealloc(htab)) { in htab_map_free()
1506 delete_all_elements(htab); in htab_map_free()
1508 htab_free_prealloced_fields(htab); in htab_map_free()
1509 prealloc_destroy(htab); in htab_map_free()
1512 free_percpu(htab->extra_elems); in htab_map_free()
1513 bpf_map_area_free(htab->buckets); in htab_map_free()
1514 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1515 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1516 if (htab->use_percpu_counter) in htab_map_free()
1517 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1519 free_percpu(htab->map_locked[i]); in htab_map_free()
1520 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1521 bpf_map_area_free(htab); in htab_map_free()
1549 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem() local
1559 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1560 b = __select_bucket(htab, hash); in __htab_map_lookup_and_delete_elem()
1563 ret = htab_lock_bucket(htab, b, hash, &bflags); in __htab_map_lookup_and_delete_elem()
1599 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_elem()
1602 htab_unlock_bucket(htab, b, hash, bflags); in __htab_map_lookup_and_delete_elem()
1605 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_elem()
1647 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1684 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1687 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1688 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1689 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1716 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1720 ret = htab_lock_bucket(htab, b, batch, &flags); in __htab_map_lookup_and_delete_batch()
1743 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1754 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1809 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1816 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1822 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_batch()
1829 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1846 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1939 struct bpf_htab *htab; member
1949 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
1958 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
1972 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
1977 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
1978 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2096 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2125 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem() local
2140 is_percpu = htab_is_percpu(htab); in bpf_for_each_hash_elem()
2148 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2149 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2194 BATCH_OPS(htab),
2308 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
2312 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2399 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2405 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2406 head = select_bucket(htab, i); in fd_htab_map_free()
2522 BATCH_OPS(htab),