Lines Matching refs:htab
132 static inline bool htab_is_prealloc(const struct bpf_htab *htab) in htab_is_prealloc() argument
134 return !(htab->map.map_flags & BPF_F_NO_PREALLOC); in htab_is_prealloc()
137 static void htab_init_buckets(struct bpf_htab *htab) in htab_init_buckets() argument
141 for (i = 0; i < htab->n_buckets; i++) { in htab_init_buckets()
142 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); in htab_init_buckets()
143 raw_spin_lock_init(&htab->buckets[i].raw_lock); in htab_init_buckets()
144 lockdep_set_class(&htab->buckets[i].raw_lock, in htab_init_buckets()
145 &htab->lockdep_key); in htab_init_buckets()
150 static inline int htab_lock_bucket(const struct bpf_htab *htab, in htab_lock_bucket() argument
156 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_lock_bucket()
160 if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { in htab_lock_bucket()
161 __this_cpu_dec(*(htab->map_locked[hash])); in htab_lock_bucket()
173 static inline void htab_unlock_bucket(const struct bpf_htab *htab, in htab_unlock_bucket() argument
177 hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); in htab_unlock_bucket()
179 __this_cpu_dec(*(htab->map_locked[hash])); in htab_unlock_bucket()
186 static bool htab_is_lru(const struct bpf_htab *htab) in htab_is_lru() argument
188 return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || in htab_is_lru()
189 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_lru()
192 static bool htab_is_percpu(const struct bpf_htab *htab) in htab_is_percpu() argument
194 return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || in htab_is_percpu()
195 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; in htab_is_percpu()
214 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) in get_htab_elem() argument
216 return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size); in get_htab_elem()
219 static bool htab_has_extra_elems(struct bpf_htab *htab) in htab_has_extra_elems() argument
221 return !htab_is_percpu(htab) && !htab_is_lru(htab); in htab_has_extra_elems()
224 static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab) in htab_free_prealloced_timers_and_wq() argument
226 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers_and_wq()
229 if (htab_has_extra_elems(htab)) in htab_free_prealloced_timers_and_wq()
235 elem = get_htab_elem(htab, i); in htab_free_prealloced_timers_and_wq()
236 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_prealloced_timers_and_wq()
237 bpf_obj_free_timer(htab->map.record, in htab_free_prealloced_timers_and_wq()
238 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
239 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_prealloced_timers_and_wq()
240 bpf_obj_free_workqueue(htab->map.record, in htab_free_prealloced_timers_and_wq()
241 elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_timers_and_wq()
246 static void htab_free_prealloced_fields(struct bpf_htab *htab) in htab_free_prealloced_fields() argument
248 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields()
251 if (IS_ERR_OR_NULL(htab->map.record)) in htab_free_prealloced_fields()
253 if (htab_has_extra_elems(htab)) in htab_free_prealloced_fields()
258 elem = get_htab_elem(htab, i); in htab_free_prealloced_fields()
259 if (htab_is_percpu(htab)) { in htab_free_prealloced_fields()
260 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in htab_free_prealloced_fields()
264 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in htab_free_prealloced_fields()
268 bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8)); in htab_free_prealloced_fields()
275 static void htab_free_elems(struct bpf_htab *htab) in htab_free_elems() argument
279 if (!htab_is_percpu(htab)) in htab_free_elems()
282 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems()
285 pptr = htab_elem_get_ptr(get_htab_elem(htab, i), in htab_free_elems()
286 htab->map.key_size); in htab_free_elems()
291 bpf_map_area_free(htab->elems); in htab_free_elems()
305 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, in prealloc_lru_pop() argument
308 struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); in prealloc_lru_pop()
312 bpf_map_inc_elem_count(&htab->map); in prealloc_lru_pop()
314 memcpy(l->key, key, htab->map.key_size); in prealloc_lru_pop()
321 static int prealloc_init(struct bpf_htab *htab) in prealloc_init() argument
323 u32 num_entries = htab->map.max_entries; in prealloc_init()
326 if (htab_has_extra_elems(htab)) in prealloc_init()
329 htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries, in prealloc_init()
330 htab->map.numa_node); in prealloc_init()
331 if (!htab->elems) in prealloc_init()
334 if (!htab_is_percpu(htab)) in prealloc_init()
338 u32 size = round_up(htab->map.value_size, 8); in prealloc_init()
341 pptr = bpf_map_alloc_percpu(&htab->map, size, 8, in prealloc_init()
345 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, in prealloc_init()
351 if (htab_is_lru(htab)) in prealloc_init()
352 err = bpf_lru_init(&htab->lru, in prealloc_init()
353 htab->map.map_flags & BPF_F_NO_COMMON_LRU, in prealloc_init()
357 htab); in prealloc_init()
359 err = pcpu_freelist_init(&htab->freelist); in prealloc_init()
364 if (htab_is_lru(htab)) in prealloc_init()
365 bpf_lru_populate(&htab->lru, htab->elems, in prealloc_init()
367 htab->elem_size, num_entries); in prealloc_init()
369 pcpu_freelist_populate(&htab->freelist, in prealloc_init()
370 htab->elems + offsetof(struct htab_elem, fnode), in prealloc_init()
371 htab->elem_size, num_entries); in prealloc_init()
376 htab_free_elems(htab); in prealloc_init()
380 static void prealloc_destroy(struct bpf_htab *htab) in prealloc_destroy() argument
382 htab_free_elems(htab); in prealloc_destroy()
384 if (htab_is_lru(htab)) in prealloc_destroy()
385 bpf_lru_destroy(&htab->lru); in prealloc_destroy()
387 pcpu_freelist_destroy(&htab->freelist); in prealloc_destroy()
390 static int alloc_extra_elems(struct bpf_htab *htab) in alloc_extra_elems() argument
396 pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8, in alloc_extra_elems()
402 l = pcpu_freelist_pop(&htab->freelist); in alloc_extra_elems()
409 htab->extra_elems = pptr; in alloc_extra_elems()
485 struct bpf_htab *htab; in htab_map_alloc() local
488 htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE); in htab_map_alloc()
489 if (!htab) in htab_map_alloc()
492 lockdep_register_key(&htab->lockdep_key); in htab_map_alloc()
494 bpf_map_init_from_attr(&htab->map, attr); in htab_map_alloc()
501 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc()
503 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc()
504 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc()
512 if (htab->map.max_entries > 1UL << 31) in htab_map_alloc()
515 htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); in htab_map_alloc()
517 htab->elem_size = sizeof(struct htab_elem) + in htab_map_alloc()
518 round_up(htab->map.key_size, 8); in htab_map_alloc()
520 htab->elem_size += sizeof(void *); in htab_map_alloc()
522 htab->elem_size += round_up(htab->map.value_size, 8); in htab_map_alloc()
525 if (htab->n_buckets > U32_MAX / sizeof(struct bucket)) in htab_map_alloc()
528 err = bpf_map_init_elem_count(&htab->map); in htab_map_alloc()
533 htab->buckets = bpf_map_area_alloc(htab->n_buckets * in htab_map_alloc()
535 htab->map.numa_node); in htab_map_alloc()
536 if (!htab->buckets) in htab_map_alloc()
540 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map, in htab_map_alloc()
544 if (!htab->map_locked[i]) in htab_map_alloc()
548 if (htab->map.map_flags & BPF_F_ZERO_SEED) in htab_map_alloc()
549 htab->hashrnd = 0; in htab_map_alloc()
551 htab->hashrnd = get_random_u32(); in htab_map_alloc()
553 htab_init_buckets(htab); in htab_map_alloc()
570 htab->use_percpu_counter = true; in htab_map_alloc()
572 if (htab->use_percpu_counter) { in htab_map_alloc()
573 err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL); in htab_map_alloc()
579 err = prealloc_init(htab); in htab_map_alloc()
587 err = alloc_extra_elems(htab); in htab_map_alloc()
592 err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false); in htab_map_alloc()
596 err = bpf_mem_alloc_init(&htab->pcpu_ma, in htab_map_alloc()
597 round_up(htab->map.value_size, 8), true); in htab_map_alloc()
603 return &htab->map; in htab_map_alloc()
606 prealloc_destroy(htab); in htab_map_alloc()
608 if (htab->use_percpu_counter) in htab_map_alloc()
609 percpu_counter_destroy(&htab->pcount); in htab_map_alloc()
611 free_percpu(htab->map_locked[i]); in htab_map_alloc()
612 bpf_map_area_free(htab->buckets); in htab_map_alloc()
613 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_alloc()
614 bpf_mem_alloc_destroy(&htab->ma); in htab_map_alloc()
616 bpf_map_free_elem_count(&htab->map); in htab_map_alloc()
618 lockdep_unregister_key(&htab->lockdep_key); in htab_map_alloc()
619 bpf_map_area_free(htab); in htab_map_alloc()
630 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) in __select_bucket() argument
632 return &htab->buckets[hash & (htab->n_buckets - 1)]; in __select_bucket()
635 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) in select_bucket() argument
637 return &__select_bucket(htab, hash)->head; in select_bucket()
683 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_elem() local
693 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_elem()
695 head = select_bucket(htab, hash); in __htab_map_lookup_elem()
697 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in __htab_map_lookup_elem()
787 static void check_and_free_fields(struct bpf_htab *htab, in check_and_free_fields() argument
790 if (htab_is_percpu(htab)) { in check_and_free_fields()
791 void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size); in check_and_free_fields()
795 bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); in check_and_free_fields()
797 void *map_value = elem->key + round_up(htab->map.key_size, 8); in check_and_free_fields()
799 bpf_obj_free_fields(htab->map.record, map_value); in check_and_free_fields()
808 struct bpf_htab *htab = arg; in htab_lru_map_delete_node() local
817 b = __select_bucket(htab, tgt_l->hash); in htab_lru_map_delete_node()
820 ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags); in htab_lru_map_delete_node()
827 check_and_free_fields(htab, l); in htab_lru_map_delete_node()
828 bpf_map_dec_elem_count(&htab->map); in htab_lru_map_delete_node()
832 htab_unlock_bucket(htab, b, tgt_l->hash, flags); in htab_lru_map_delete_node()
840 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_get_next_key() local
853 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_get_next_key()
855 head = select_bucket(htab, hash); in htab_map_get_next_key()
858 l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); in htab_map_get_next_key()
874 i = hash & (htab->n_buckets - 1); in htab_map_get_next_key()
879 for (; i < htab->n_buckets; i++) { in htab_map_get_next_key()
880 head = select_bucket(htab, i); in htab_map_get_next_key()
896 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) in htab_elem_free() argument
898 check_and_free_fields(htab, l); in htab_elem_free()
899 if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) in htab_elem_free()
900 bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr); in htab_elem_free()
901 bpf_mem_cache_free(&htab->ma, l); in htab_elem_free()
904 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) in htab_put_fd_value() argument
906 struct bpf_map *map = &htab->map; in htab_put_fd_value()
915 static bool is_map_full(struct bpf_htab *htab) in is_map_full() argument
917 if (htab->use_percpu_counter) in is_map_full()
918 return __percpu_counter_compare(&htab->pcount, htab->map.max_entries, in is_map_full()
920 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full()
923 static void inc_elem_count(struct bpf_htab *htab) in inc_elem_count() argument
925 bpf_map_inc_elem_count(&htab->map); in inc_elem_count()
927 if (htab->use_percpu_counter) in inc_elem_count()
928 percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH); in inc_elem_count()
930 atomic_inc(&htab->count); in inc_elem_count()
933 static void dec_elem_count(struct bpf_htab *htab) in dec_elem_count() argument
935 bpf_map_dec_elem_count(&htab->map); in dec_elem_count()
937 if (htab->use_percpu_counter) in dec_elem_count()
938 percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH); in dec_elem_count()
940 atomic_dec(&htab->count); in dec_elem_count()
944 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) in free_htab_elem() argument
946 htab_put_fd_value(htab, l); in free_htab_elem()
948 if (htab_is_prealloc(htab)) { in free_htab_elem()
949 bpf_map_dec_elem_count(&htab->map); in free_htab_elem()
950 check_and_free_fields(htab, l); in free_htab_elem()
951 __pcpu_freelist_push(&htab->freelist, &l->fnode); in free_htab_elem()
953 dec_elem_count(htab); in free_htab_elem()
954 htab_elem_free(htab, l); in free_htab_elem()
958 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_copy_value() argument
963 copy_map_value(&htab->map, this_cpu_ptr(pptr), value); in pcpu_copy_value()
965 u32 size = round_up(htab->map.value_size, 8); in pcpu_copy_value()
969 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); in pcpu_copy_value()
975 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, in pcpu_init_value() argument
989 copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value); in pcpu_init_value()
991 zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu)); in pcpu_init_value()
994 pcpu_copy_value(htab, pptr, value, onallcpus); in pcpu_init_value()
998 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) in fd_htab_map_needs_adjust() argument
1000 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && in fd_htab_map_needs_adjust()
1004 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, in alloc_htab_elem() argument
1009 u32 size = htab->map.value_size; in alloc_htab_elem()
1010 bool prealloc = htab_is_prealloc(htab); in alloc_htab_elem()
1019 pl_new = this_cpu_ptr(htab->extra_elems); in alloc_htab_elem()
1021 htab_put_fd_value(htab, old_elem); in alloc_htab_elem()
1026 l = __pcpu_freelist_pop(&htab->freelist); in alloc_htab_elem()
1030 bpf_map_inc_elem_count(&htab->map); in alloc_htab_elem()
1033 if (is_map_full(htab)) in alloc_htab_elem()
1041 inc_elem_count(htab); in alloc_htab_elem()
1042 l_new = bpf_mem_cache_alloc(&htab->ma); in alloc_htab_elem()
1055 void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma); in alloc_htab_elem()
1058 bpf_mem_cache_free(&htab->ma, l_new); in alloc_htab_elem()
1066 pcpu_init_value(htab, pptr, value, onallcpus); in alloc_htab_elem()
1070 } else if (fd_htab_map_needs_adjust(htab)) { in alloc_htab_elem()
1074 copy_map_value(&htab->map, in alloc_htab_elem()
1082 dec_elem_count(htab); in alloc_htab_elem()
1086 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, in check_flags() argument
1104 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_update_elem() local
1121 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_update_elem()
1123 b = __select_bucket(htab, hash); in htab_map_update_elem()
1131 htab->n_buckets); in htab_map_update_elem()
1132 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1148 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_update_elem()
1154 ret = check_flags(htab, l_old, map_flags); in htab_map_update_elem()
1172 l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, in htab_map_update_elem()
1186 if (!htab_is_prealloc(htab)) in htab_map_update_elem()
1187 free_htab_elem(htab, l_old); in htab_map_update_elem()
1189 check_and_free_fields(htab, l_old); in htab_map_update_elem()
1193 htab_unlock_bucket(htab, b, hash, flags); in htab_map_update_elem()
1197 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem) in htab_lru_push_free() argument
1199 check_and_free_fields(htab, elem); in htab_lru_push_free()
1200 bpf_map_dec_elem_count(&htab->map); in htab_lru_push_free()
1201 bpf_lru_push_free(&htab->lru, &elem->lru_node); in htab_lru_push_free()
1207 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_update_elem() local
1224 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_update_elem()
1226 b = __select_bucket(htab, hash); in htab_lru_map_update_elem()
1234 l_new = prealloc_lru_pop(htab, key, hash); in htab_lru_map_update_elem()
1237 copy_map_value(&htab->map, in htab_lru_map_update_elem()
1240 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_update_elem()
1246 ret = check_flags(htab, l_old, map_flags); in htab_lru_map_update_elem()
1261 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_update_elem()
1265 htab_lru_push_free(htab, l_new); in htab_lru_map_update_elem()
1267 htab_lru_push_free(htab, l_old); in htab_lru_map_update_elem()
1276 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_percpu_map_update_elem() local
1293 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_percpu_map_update_elem()
1295 b = __select_bucket(htab, hash); in __htab_percpu_map_update_elem()
1298 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_percpu_map_update_elem()
1304 ret = check_flags(htab, l_old, map_flags); in __htab_percpu_map_update_elem()
1310 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_percpu_map_update_elem()
1313 l_new = alloc_htab_elem(htab, key, value, key_size, in __htab_percpu_map_update_elem()
1323 htab_unlock_bucket(htab, b, hash, flags); in __htab_percpu_map_update_elem()
1331 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_lru_percpu_map_update_elem() local
1348 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_lru_percpu_map_update_elem()
1350 b = __select_bucket(htab, hash); in __htab_lru_percpu_map_update_elem()
1359 l_new = prealloc_lru_pop(htab, key, hash); in __htab_lru_percpu_map_update_elem()
1364 ret = htab_lock_bucket(htab, b, hash, &flags); in __htab_lru_percpu_map_update_elem()
1370 ret = check_flags(htab, l_old, map_flags); in __htab_lru_percpu_map_update_elem()
1378 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), in __htab_lru_percpu_map_update_elem()
1381 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), in __htab_lru_percpu_map_update_elem()
1388 htab_unlock_bucket(htab, b, hash, flags); in __htab_lru_percpu_map_update_elem()
1391 bpf_map_dec_elem_count(&htab->map); in __htab_lru_percpu_map_update_elem()
1392 bpf_lru_push_free(&htab->lru, &l_new->lru_node); in __htab_lru_percpu_map_update_elem()
1413 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_delete_elem() local
1426 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_map_delete_elem()
1427 b = __select_bucket(htab, hash); in htab_map_delete_elem()
1430 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_map_delete_elem()
1438 free_htab_elem(htab, l); in htab_map_delete_elem()
1443 htab_unlock_bucket(htab, b, hash, flags); in htab_map_delete_elem()
1449 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_lru_map_delete_elem() local
1462 hash = htab_map_hash(key, key_size, htab->hashrnd); in htab_lru_map_delete_elem()
1463 b = __select_bucket(htab, hash); in htab_lru_map_delete_elem()
1466 ret = htab_lock_bucket(htab, b, hash, &flags); in htab_lru_map_delete_elem()
1477 htab_unlock_bucket(htab, b, hash, flags); in htab_lru_map_delete_elem()
1479 htab_lru_push_free(htab, l); in htab_lru_map_delete_elem()
1483 static void delete_all_elements(struct bpf_htab *htab) in delete_all_elements() argument
1491 for (i = 0; i < htab->n_buckets; i++) { in delete_all_elements()
1492 struct hlist_nulls_head *head = select_bucket(htab, i); in delete_all_elements()
1498 htab_elem_free(htab, l); in delete_all_elements()
1505 static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab) in htab_free_malloced_timers_and_wq() argument
1510 for (i = 0; i < htab->n_buckets; i++) { in htab_free_malloced_timers_and_wq()
1511 struct hlist_nulls_head *head = select_bucket(htab, i); in htab_free_malloced_timers_and_wq()
1517 if (btf_record_has_field(htab->map.record, BPF_TIMER)) in htab_free_malloced_timers_and_wq()
1518 bpf_obj_free_timer(htab->map.record, in htab_free_malloced_timers_and_wq()
1519 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1520 if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) in htab_free_malloced_timers_and_wq()
1521 bpf_obj_free_workqueue(htab->map.record, in htab_free_malloced_timers_and_wq()
1522 l->key + round_up(htab->map.key_size, 8)); in htab_free_malloced_timers_and_wq()
1531 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free_timers_and_wq() local
1534 if (btf_record_has_field(htab->map.record, BPF_TIMER | BPF_WORKQUEUE)) { in htab_map_free_timers_and_wq()
1535 if (!htab_is_prealloc(htab)) in htab_map_free_timers_and_wq()
1536 htab_free_malloced_timers_and_wq(htab); in htab_map_free_timers_and_wq()
1538 htab_free_prealloced_timers_and_wq(htab); in htab_map_free_timers_and_wq()
1545 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_free() local
1557 if (!htab_is_prealloc(htab)) { in htab_map_free()
1558 delete_all_elements(htab); in htab_map_free()
1560 htab_free_prealloced_fields(htab); in htab_map_free()
1561 prealloc_destroy(htab); in htab_map_free()
1565 free_percpu(htab->extra_elems); in htab_map_free()
1566 bpf_map_area_free(htab->buckets); in htab_map_free()
1567 bpf_mem_alloc_destroy(&htab->pcpu_ma); in htab_map_free()
1568 bpf_mem_alloc_destroy(&htab->ma); in htab_map_free()
1569 if (htab->use_percpu_counter) in htab_map_free()
1570 percpu_counter_destroy(&htab->pcount); in htab_map_free()
1572 free_percpu(htab->map_locked[i]); in htab_map_free()
1573 lockdep_unregister_key(&htab->lockdep_key); in htab_map_free()
1574 bpf_map_area_free(htab); in htab_map_free()
1602 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_elem() local
1612 hash = htab_map_hash(key, key_size, htab->hashrnd); in __htab_map_lookup_and_delete_elem()
1613 b = __select_bucket(htab, hash); in __htab_map_lookup_and_delete_elem()
1616 ret = htab_lock_bucket(htab, b, hash, &bflags); in __htab_map_lookup_and_delete_elem()
1631 copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_elem()
1632 check_and_init_map_value(&htab->map, value + off); in __htab_map_lookup_and_delete_elem()
1651 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_elem()
1654 htab_unlock_bucket(htab, b, hash, bflags); in __htab_map_lookup_and_delete_elem()
1657 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_elem()
1699 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in __htab_map_lookup_and_delete_batch() local
1736 if (batch >= htab->n_buckets) in __htab_map_lookup_and_delete_batch()
1739 key_size = htab->map.key_size; in __htab_map_lookup_and_delete_batch()
1740 roundup_key_size = round_up(htab->map.key_size, 8); in __htab_map_lookup_and_delete_batch()
1741 value_size = htab->map.value_size; in __htab_map_lookup_and_delete_batch()
1768 b = &htab->buckets[batch]; in __htab_map_lookup_and_delete_batch()
1772 ret = htab_lock_bucket(htab, b, batch, &flags); in __htab_map_lookup_and_delete_batch()
1795 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1806 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1827 copy_map_value_long(&htab->map, dst_val + off, per_cpu_ptr(pptr, cpu)); in __htab_map_lookup_and_delete_batch()
1828 check_and_init_map_value(&htab->map, dst_val + off); in __htab_map_lookup_and_delete_batch()
1861 free_htab_elem(htab, l); in __htab_map_lookup_and_delete_batch()
1868 htab_unlock_bucket(htab, b, batch, flags); in __htab_map_lookup_and_delete_batch()
1874 htab_lru_push_free(htab, l); in __htab_map_lookup_and_delete_batch()
1881 if (!bucket_cnt && (batch + 1 < htab->n_buckets)) { in __htab_map_lookup_and_delete_batch()
1898 if (batch >= htab->n_buckets) { in __htab_map_lookup_and_delete_batch()
1991 struct bpf_htab *htab; member
2001 const struct bpf_htab *htab = info->htab; in bpf_hash_map_seq_find_next() local
2010 if (bucket_id >= htab->n_buckets) in bpf_hash_map_seq_find_next()
2024 b = &htab->buckets[bucket_id++]; in bpf_hash_map_seq_find_next()
2029 for (i = bucket_id; i < htab->n_buckets; i++) { in bpf_hash_map_seq_find_next()
2030 b = &htab->buckets[i]; in bpf_hash_map_seq_find_next()
2148 seq_info->htab = container_of(map, struct bpf_htab, map); in bpf_iter_init_hash_map()
2177 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_for_each_hash_elem() local
2192 is_percpu = htab_is_percpu(htab); in bpf_for_each_hash_elem()
2200 for (i = 0; i < htab->n_buckets; i++) { in bpf_for_each_hash_elem()
2201 b = &htab->buckets[i]; in bpf_for_each_hash_elem()
2232 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in htab_map_mem_usage() local
2233 u32 value_size = round_up(htab->map.value_size, 8); in htab_map_mem_usage()
2234 bool prealloc = htab_is_prealloc(htab); in htab_map_mem_usage()
2235 bool percpu = htab_is_percpu(htab); in htab_map_mem_usage()
2236 bool lru = htab_is_lru(htab); in htab_map_mem_usage()
2240 usage += sizeof(struct bucket) * htab->n_buckets; in htab_map_mem_usage()
2244 if (htab_has_extra_elems(htab)) in htab_map_mem_usage()
2247 usage += htab->elem_size * num_entries; in htab_map_mem_usage()
2256 num_entries = htab->use_percpu_counter ? in htab_map_mem_usage()
2257 percpu_counter_sum(&htab->pcount) : in htab_map_mem_usage()
2258 atomic_read(&htab->count); in htab_map_mem_usage()
2259 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries; in htab_map_mem_usage()
2285 BATCH_OPS(htab),
2420 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in bpf_percpu_hash_update() local
2424 if (htab_is_lru(htab)) in bpf_percpu_hash_update()
2514 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); in fd_htab_map_free() local
2520 for (i = 0; i < htab->n_buckets; i++) { in fd_htab_map_free()
2521 head = select_bucket(htab, i); in fd_htab_map_free()
2644 BATCH_OPS(htab),