Lines Matching refs:map
26 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_free_percpu()
37 for (i = 0; i < array->map.max_entries; i++) { in bpf_array_alloc_percpu()
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8, in bpf_array_alloc_percpu()
146 array->map.bypass_spec_v1 = bypass_spec_v1; in array_map_alloc()
149 bpf_map_init_from_attr(&array->map, attr); in array_map_alloc()
157 return &array->map; in array_map_alloc()
166 static void *array_map_lookup_elem(struct bpf_map *map, void *key) in array_map_lookup_elem() argument
168 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_lookup_elem()
171 if (unlikely(index >= array->map.max_entries)) in array_map_lookup_elem()
177 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, in array_map_direct_value_addr() argument
180 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_addr()
182 if (map->max_entries != 1) in array_map_direct_value_addr()
184 if (off >= map->value_size) in array_map_direct_value_addr()
191 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, in array_map_direct_value_meta() argument
194 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_direct_value_meta()
198 if (map->max_entries != 1) in array_map_direct_value_meta()
208 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in array_map_gen_lookup() argument
210 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_gen_lookup()
217 if (map->map_flags & BPF_F_INNER_MAP) in array_map_gen_lookup()
222 if (!map->bypass_spec_v1) { in array_map_gen_lookup()
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); in array_map_gen_lookup()
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); in array_map_gen_lookup()
241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) in percpu_array_map_lookup_elem() argument
243 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_elem()
246 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_elem()
253 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) in percpu_array_map_gen_lookup() argument
255 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_gen_lookup()
261 if (map->map_flags & BPF_F_INNER_MAP) in percpu_array_map_gen_lookup()
264 BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0); in percpu_array_map_gen_lookup()
268 if (!map->bypass_spec_v1) { in percpu_array_map_gen_lookup()
269 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6); in percpu_array_map_gen_lookup()
272 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5); in percpu_array_map_gen_lookup()
284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu) in percpu_array_map_lookup_percpu_elem() argument
286 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_lookup_percpu_elem()
292 if (unlikely(index >= array->map.max_entries)) in percpu_array_map_lookup_percpu_elem()
298 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) in bpf_percpu_array_copy() argument
300 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_copy()
306 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_copy()
317 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_copy()
318 check_and_init_map_value(map, value + off); in bpf_percpu_array_copy()
326 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in array_map_get_next_key() argument
328 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_get_next_key()
332 if (index >= array->map.max_entries) { in array_map_get_next_key()
337 if (index == array->map.max_entries - 1) in array_map_get_next_key()
345 static long array_map_update_elem(struct bpf_map *map, void *key, void *value, in array_map_update_elem() argument
348 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_update_elem()
356 if (unlikely(index >= array->map.max_entries)) in array_map_update_elem()
365 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in array_map_update_elem()
368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_update_elem()
370 copy_map_value(map, val, value); in array_map_update_elem()
371 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
376 copy_map_value_locked(map, val, value, false); in array_map_update_elem()
378 copy_map_value(map, val, value); in array_map_update_elem()
379 bpf_obj_free_fields(array->map.record, val); in array_map_update_elem()
384 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, in bpf_percpu_array_update() argument
387 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_percpu_array_update()
397 if (unlikely(index >= array->map.max_entries)) in bpf_percpu_array_update()
415 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off); in bpf_percpu_array_update()
416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu)); in bpf_percpu_array_update()
424 static long array_map_delete_elem(struct bpf_map *map, void *key) in array_map_delete_elem() argument
434 static void array_map_free_timers_wq(struct bpf_map *map) in array_map_free_timers_wq() argument
436 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free_timers_wq()
442 if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) { in array_map_free_timers_wq()
443 for (i = 0; i < array->map.max_entries; i++) { in array_map_free_timers_wq()
444 if (btf_record_has_field(map->record, BPF_TIMER)) in array_map_free_timers_wq()
445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
446 if (btf_record_has_field(map->record, BPF_WORKQUEUE)) in array_map_free_timers_wq()
447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); in array_map_free_timers_wq()
453 static void array_map_free(struct bpf_map *map) in array_map_free() argument
455 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_free()
458 if (!IS_ERR_OR_NULL(map->record)) { in array_map_free()
459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in array_map_free()
460 for (i = 0; i < array->map.max_entries; i++) { in array_map_free()
465 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu)); in array_map_free()
470 for (i = 0; i < array->map.max_entries; i++) in array_map_free()
471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i)); in array_map_free()
475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) in array_map_free()
478 if (array->map.map_flags & BPF_F_MMAPABLE) in array_map_free()
484 static void array_map_seq_show_elem(struct bpf_map *map, void *key, in array_map_seq_show_elem() argument
491 value = array_map_lookup_elem(map, key); in array_map_seq_show_elem()
497 if (map->btf_key_type_id) in array_map_seq_show_elem()
499 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); in array_map_seq_show_elem()
505 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, in percpu_array_map_seq_show_elem() argument
508 struct bpf_array *array = container_of(map, struct bpf_array, map); in percpu_array_map_seq_show_elem()
519 btf_type_seq_show(map->btf, map->btf_value_type_id, in percpu_array_map_seq_show_elem()
528 static int array_map_check_btf(const struct bpf_map *map, in array_map_check_btf() argument
535 if (map->map_type != BPF_MAP_TYPE_ARRAY || in array_map_check_btf()
536 map->max_entries != 1) in array_map_check_btf()
555 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) in array_map_mmap() argument
557 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mmap()
560 if (!(map->map_flags & BPF_F_MMAPABLE)) in array_map_mmap()
564 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size)) in array_map_mmap()
581 struct bpf_map *map; member
589 struct bpf_map *map = info->map; in bpf_array_map_seq_start() local
593 if (info->index >= map->max_entries) in bpf_array_map_seq_start()
598 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_start()
608 struct bpf_map *map = info->map; in bpf_array_map_seq_next() local
614 if (info->index >= map->max_entries) in bpf_array_map_seq_next()
617 array = container_of(map, struct bpf_array, map); in bpf_array_map_seq_next()
628 struct bpf_map *map = info->map; in __bpf_array_map_seq_show() local
629 struct bpf_array *array = container_of(map, struct bpf_array, map); in __bpf_array_map_seq_show()
642 ctx.map = info->map; in __bpf_array_map_seq_show()
652 copy_map_value_long(map, info->percpu_value_buf + off, in __bpf_array_map_seq_show()
654 check_and_init_map_value(map, info->percpu_value_buf + off); in __bpf_array_map_seq_show()
679 struct bpf_map *map = aux->map; in bpf_iter_init_array_map() local
680 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_iter_init_array_map()
684 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_iter_init_array_map()
697 bpf_map_inc_with_uref(map); in bpf_iter_init_array_map()
698 seq_info->map = map; in bpf_iter_init_array_map()
706 bpf_map_put_with_uref(seq_info->map); in bpf_iter_fini_array_map()
724 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn, in bpf_for_each_array_elem() argument
738 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in bpf_for_each_array_elem()
739 array = container_of(map, struct bpf_array, map); in bpf_for_each_array_elem()
740 for (i = 0; i < map->max_entries; i++) { in bpf_for_each_array_elem()
747 ret = callback_fn((u64)(long)map, (u64)(long)&key, in bpf_for_each_array_elem()
757 static u64 array_map_mem_usage(const struct bpf_map *map) in array_map_mem_usage() argument
759 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_map_mem_usage()
760 bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; in array_map_mem_usage()
762 u64 entries = map->max_entries; in array_map_mem_usage()
769 if (map->map_flags & BPF_F_MMAPABLE) { in array_map_mem_usage()
838 static void fd_array_map_free(struct bpf_map *map) in fd_array_map_free() argument
840 struct bpf_array *array = container_of(map, struct bpf_array, map); in fd_array_map_free()
844 for (i = 0; i < array->map.max_entries; i++) in fd_array_map_free()
850 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) in fd_array_map_lookup_elem() argument
856 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) in bpf_fd_array_map_lookup_elem() argument
861 if (!map->ops->map_fd_sys_lookup_elem) in bpf_fd_array_map_lookup_elem()
865 elem = array_map_lookup_elem(map, key); in bpf_fd_array_map_lookup_elem()
867 *value = map->ops->map_fd_sys_lookup_elem(ptr); in bpf_fd_array_map_lookup_elem()
876 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, in bpf_fd_array_map_update_elem() argument
879 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_update_elem()
886 if (index >= array->map.max_entries) in bpf_fd_array_map_update_elem()
890 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); in bpf_fd_array_map_update_elem()
894 if (map->ops->map_poke_run) { in bpf_fd_array_map_update_elem()
897 map->ops->map_poke_run(map, index, old_ptr, new_ptr); in bpf_fd_array_map_update_elem()
904 map->ops->map_fd_put_ptr(map, old_ptr, true); in bpf_fd_array_map_update_elem()
908 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer) in __fd_array_map_delete_elem() argument
910 struct bpf_array *array = container_of(map, struct bpf_array, map); in __fd_array_map_delete_elem()
914 if (index >= array->map.max_entries) in __fd_array_map_delete_elem()
917 if (map->ops->map_poke_run) { in __fd_array_map_delete_elem()
920 map->ops->map_poke_run(map, index, old_ptr, NULL); in __fd_array_map_delete_elem()
927 map->ops->map_fd_put_ptr(map, old_ptr, need_defer); in __fd_array_map_delete_elem()
934 static long fd_array_map_delete_elem(struct bpf_map *map, void *key) in fd_array_map_delete_elem() argument
936 return __fd_array_map_delete_elem(map, key, true); in fd_array_map_delete_elem()
939 static void *prog_fd_array_get_ptr(struct bpf_map *map, in prog_fd_array_get_ptr() argument
949 !bpf_prog_map_compatible(map, prog)) { in prog_fd_array_get_ptr()
972 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in prog_fd_array_put_ptr() argument
989 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer) in bpf_fd_array_map_clear() argument
991 struct bpf_array *array = container_of(map, struct bpf_array, map); in bpf_fd_array_map_clear()
994 for (i = 0; i < array->map.max_entries; i++) in bpf_fd_array_map_clear()
995 __fd_array_map_delete_elem(map, &i, need_defer); in bpf_fd_array_map_clear()
998 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, in prog_array_map_seq_show_elem() argument
1006 elem = array_map_lookup_elem(map, key); in prog_array_map_seq_show_elem()
1012 btf_type_seq_show(map->btf, map->btf_value_type_id, in prog_array_map_seq_show_elem()
1026 static int prog_array_map_poke_track(struct bpf_map *map, in prog_array_map_poke_track() argument
1033 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_track()
1059 static void prog_array_map_poke_untrack(struct bpf_map *map, in prog_array_map_poke_untrack() argument
1065 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_untrack()
1083 static void prog_array_map_poke_run(struct bpf_map *map, u32 key, in prog_array_map_poke_run() argument
1090 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_poke_run()
1123 if (poke->tail_call.map != map || in prog_array_map_poke_run()
1134 struct bpf_map *map = container_of(work, struct bpf_array_aux, in prog_array_map_clear_deferred() local
1135 work)->map; in prog_array_map_clear_deferred()
1136 bpf_fd_array_map_clear(map, true); in prog_array_map_clear_deferred()
1137 bpf_map_put(map); in prog_array_map_clear_deferred()
1140 static void prog_array_map_clear(struct bpf_map *map) in prog_array_map_clear() argument
1142 struct bpf_array_aux *aux = container_of(map, struct bpf_array, in prog_array_map_clear()
1143 map)->aux; in prog_array_map_clear()
1144 bpf_map_inc(map); in prog_array_map_clear()
1151 struct bpf_map *map; in prog_array_map_alloc() local
1161 map = array_map_alloc(attr); in prog_array_map_alloc()
1162 if (IS_ERR(map)) { in prog_array_map_alloc()
1164 return map; in prog_array_map_alloc()
1167 container_of(map, struct bpf_array, map)->aux = aux; in prog_array_map_alloc()
1168 aux->map = map; in prog_array_map_alloc()
1170 return map; in prog_array_map_alloc()
1173 static void prog_array_map_free(struct bpf_map *map) in prog_array_map_free() argument
1178 aux = container_of(map, struct bpf_array, map)->aux; in prog_array_map_free()
1184 fd_array_map_free(map); in prog_array_map_free()
1240 static void *perf_event_fd_array_get_ptr(struct bpf_map *map, in perf_event_fd_array_get_ptr() argument
1266 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in perf_event_fd_array_put_ptr() argument
1272 static void perf_event_fd_array_release(struct bpf_map *map, in perf_event_fd_array_release() argument
1275 struct bpf_array *array = container_of(map, struct bpf_array, map); in perf_event_fd_array_release()
1279 if (map->map_flags & BPF_F_PRESERVE_ELEMS) in perf_event_fd_array_release()
1283 for (i = 0; i < array->map.max_entries; i++) { in perf_event_fd_array_release()
1286 __fd_array_map_delete_elem(map, &i, true); in perf_event_fd_array_release()
1291 static void perf_event_fd_array_map_free(struct bpf_map *map) in perf_event_fd_array_map_free() argument
1293 if (map->map_flags & BPF_F_PRESERVE_ELEMS) in perf_event_fd_array_map_free()
1294 bpf_fd_array_map_clear(map, false); in perf_event_fd_array_map_free()
1295 fd_array_map_free(map); in perf_event_fd_array_map_free()
1315 static void *cgroup_fd_array_get_ptr(struct bpf_map *map, in cgroup_fd_array_get_ptr() argument
1322 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) in cgroup_fd_array_put_ptr() argument
1328 static void cgroup_fd_array_free(struct bpf_map *map) in cgroup_fd_array_free() argument
1330 bpf_fd_array_map_clear(map, false); in cgroup_fd_array_free()
1331 fd_array_map_free(map); in cgroup_fd_array_free()
1352 struct bpf_map *map, *inner_map_meta; in array_of_map_alloc() local
1358 map = array_map_alloc(attr); in array_of_map_alloc()
1359 if (IS_ERR(map)) { in array_of_map_alloc()
1361 return map; in array_of_map_alloc()
1364 map->inner_map_meta = inner_map_meta; in array_of_map_alloc()
1366 return map; in array_of_map_alloc()
1369 static void array_of_map_free(struct bpf_map *map) in array_of_map_free() argument
1374 bpf_map_meta_free(map->inner_map_meta); in array_of_map_free()
1375 bpf_fd_array_map_clear(map, false); in array_of_map_free()
1376 fd_array_map_free(map); in array_of_map_free()
1379 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) in array_of_map_lookup_elem() argument
1381 struct bpf_map **inner_map = array_map_lookup_elem(map, key); in array_of_map_lookup_elem()
1389 static int array_of_map_gen_lookup(struct bpf_map *map, in array_of_map_gen_lookup() argument
1392 struct bpf_array *array = container_of(map, struct bpf_array, map); in array_of_map_gen_lookup()
1401 if (!map->bypass_spec_v1) { in array_of_map_gen_lookup()
1402 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); in array_of_map_gen_lookup()
1405 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); in array_of_map_gen_lookup()