Lines Matching refs:map

46 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \  argument
47 (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
48 (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
49 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY) argument
50 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) argument
51 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \ argument
52 IS_FD_HASH(map))
119 static void bpf_map_write_active_inc(struct bpf_map *map) in bpf_map_write_active_inc() argument
121 atomic64_inc(&map->writecnt); in bpf_map_write_active_inc()
124 static void bpf_map_write_active_dec(struct bpf_map *map) in bpf_map_write_active_dec() argument
126 atomic64_dec(&map->writecnt); in bpf_map_write_active_dec()
129 bool bpf_map_write_active(const struct bpf_map *map) in bpf_map_write_active() argument
131 return atomic64_read(&map->writecnt) != 0; in bpf_map_write_active()
134 static u32 bpf_map_value_size(const struct bpf_map *map) in bpf_map_value_size() argument
136 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_value_size()
137 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || in bpf_map_value_size()
138 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || in bpf_map_value_size()
139 map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in bpf_map_value_size()
140 return round_up(map->value_size, 8) * num_possible_cpus(); in bpf_map_value_size()
141 else if (IS_FD_MAP(map)) in bpf_map_value_size()
144 return map->value_size; in bpf_map_value_size()
147 static void maybe_wait_bpf_programs(struct bpf_map *map) in maybe_wait_bpf_programs() argument
157 if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || in maybe_wait_bpf_programs()
158 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) in maybe_wait_bpf_programs()
245 static int bpf_map_update_value(struct bpf_map *map, struct file *map_file, in bpf_map_update_value() argument
251 if (bpf_map_is_offloaded(map)) { in bpf_map_update_value()
252 return bpf_map_offload_update_elem(map, key, value, flags); in bpf_map_update_value()
253 } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || in bpf_map_update_value()
254 map->map_type == BPF_MAP_TYPE_ARENA || in bpf_map_update_value()
255 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_update_value()
256 return map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
257 } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH || in bpf_map_update_value()
258 map->map_type == BPF_MAP_TYPE_SOCKMAP) { in bpf_map_update_value()
259 return sock_map_update_elem_sys(map, key, value, flags); in bpf_map_update_value()
260 } else if (IS_FD_PROG_ARRAY(map)) { in bpf_map_update_value()
261 return bpf_fd_array_map_update_elem(map, map_file, key, value, in bpf_map_update_value()
266 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_update_value()
267 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_update_value()
268 err = bpf_percpu_hash_update(map, key, value, flags); in bpf_map_update_value()
269 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_update_value()
270 err = bpf_percpu_array_update(map, key, value, flags); in bpf_map_update_value()
271 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_update_value()
272 err = bpf_percpu_cgroup_storage_update(map, key, value, in bpf_map_update_value()
274 } else if (IS_FD_ARRAY(map)) { in bpf_map_update_value()
275 err = bpf_fd_array_map_update_elem(map, map_file, key, value, in bpf_map_update_value()
277 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { in bpf_map_update_value()
278 err = bpf_fd_htab_map_update_elem(map, map_file, key, value, in bpf_map_update_value()
280 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_update_value()
282 err = bpf_fd_reuseport_array_update_elem(map, key, value, in bpf_map_update_value()
284 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_update_value()
285 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_update_value()
286 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_update_value()
287 err = map->ops->map_push_elem(map, value, flags); in bpf_map_update_value()
289 err = bpf_obj_pin_uptrs(map->record, value); in bpf_map_update_value()
292 err = map->ops->map_update_elem(map, key, value, flags); in bpf_map_update_value()
295 bpf_obj_unpin_uptrs(map->record, value); in bpf_map_update_value()
303 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value, in bpf_map_copy_value() argument
309 if (bpf_map_is_offloaded(map)) in bpf_map_copy_value()
310 return bpf_map_offload_lookup_elem(map, key, value); in bpf_map_copy_value()
313 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in bpf_map_copy_value()
314 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in bpf_map_copy_value()
315 err = bpf_percpu_hash_copy(map, key, value); in bpf_map_copy_value()
316 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { in bpf_map_copy_value()
317 err = bpf_percpu_array_copy(map, key, value); in bpf_map_copy_value()
318 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { in bpf_map_copy_value()
319 err = bpf_percpu_cgroup_storage_copy(map, key, value); in bpf_map_copy_value()
320 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { in bpf_map_copy_value()
321 err = bpf_stackmap_copy(map, key, value); in bpf_map_copy_value()
322 } else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) { in bpf_map_copy_value()
323 err = bpf_fd_array_map_lookup_elem(map, key, value); in bpf_map_copy_value()
324 } else if (IS_FD_HASH(map)) { in bpf_map_copy_value()
325 err = bpf_fd_htab_map_lookup_elem(map, key, value); in bpf_map_copy_value()
326 } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { in bpf_map_copy_value()
327 err = bpf_fd_reuseport_array_lookup_elem(map, key, value); in bpf_map_copy_value()
328 } else if (map->map_type == BPF_MAP_TYPE_QUEUE || in bpf_map_copy_value()
329 map->map_type == BPF_MAP_TYPE_STACK || in bpf_map_copy_value()
330 map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in bpf_map_copy_value()
331 err = map->ops->map_peek_elem(map, value); in bpf_map_copy_value()
332 } else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in bpf_map_copy_value()
334 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value); in bpf_map_copy_value()
337 if (map->ops->map_lookup_elem_sys_only) in bpf_map_copy_value()
338 ptr = map->ops->map_lookup_elem_sys_only(map, key); in bpf_map_copy_value()
340 ptr = map->ops->map_lookup_elem(map, key); in bpf_map_copy_value()
349 copy_map_value_locked(map, value, ptr, true); in bpf_map_copy_value()
351 copy_map_value(map, value, ptr); in bpf_map_copy_value()
353 check_and_init_map_value(map, value); in bpf_map_copy_value()
431 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) in bpf_map_init_from_attr() argument
433 map->map_type = attr->map_type; in bpf_map_init_from_attr()
434 map->key_size = attr->key_size; in bpf_map_init_from_attr()
435 map->value_size = attr->value_size; in bpf_map_init_from_attr()
436 map->max_entries = attr->max_entries; in bpf_map_init_from_attr()
437 map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); in bpf_map_init_from_attr()
438 map->numa_node = bpf_map_attr_numa_node(attr); in bpf_map_init_from_attr()
439 map->map_extra = attr->map_extra; in bpf_map_init_from_attr()
442 static int bpf_map_alloc_id(struct bpf_map *map) in bpf_map_alloc_id() argument
448 id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); in bpf_map_alloc_id()
450 map->id = id; in bpf_map_alloc_id()
460 void bpf_map_free_id(struct bpf_map *map) in bpf_map_free_id() argument
469 if (!map->id) in bpf_map_free_id()
474 idr_remove(&map_idr, map->id); in bpf_map_free_id()
475 map->id = 0; in bpf_map_free_id()
481 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
489 map->objcg = get_obj_cgroup_from_current(); in bpf_map_save_memcg()
492 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
494 if (map->objcg) in bpf_map_release_memcg()
495 obj_cgroup_put(map->objcg); in bpf_map_release_memcg()
498 static struct mem_cgroup *bpf_map_get_memcg(const struct bpf_map *map) in bpf_map_get_memcg() argument
500 if (map->objcg) in bpf_map_get_memcg()
501 return get_mem_cgroup_from_objcg(map->objcg); in bpf_map_get_memcg()
506 void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, in bpf_map_kmalloc_node() argument
512 memcg = bpf_map_get_memcg(map); in bpf_map_kmalloc_node()
521 void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) in bpf_map_kzalloc() argument
526 memcg = bpf_map_get_memcg(map); in bpf_map_kzalloc()
535 void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, in bpf_map_kvcalloc() argument
541 memcg = bpf_map_get_memcg(map); in bpf_map_kvcalloc()
550 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, in bpf_map_alloc_percpu() argument
556 memcg = bpf_map_get_memcg(map); in bpf_map_alloc_percpu()
566 static void bpf_map_save_memcg(struct bpf_map *map) in bpf_map_save_memcg() argument
570 static void bpf_map_release_memcg(struct bpf_map *map) in bpf_map_release_memcg() argument
592 int bpf_map_alloc_pages(const struct bpf_map *map, int nid, in bpf_map_alloc_pages() argument
601 memcg = bpf_map_get_memcg(map); in bpf_map_alloc_pages()
685 void bpf_map_free_record(struct bpf_map *map) in bpf_map_free_record() argument
687 btf_record_free(map->record); in bpf_map_free_record()
688 map->record = NULL; in bpf_map_free_record()
854 static void bpf_map_free(struct bpf_map *map) in bpf_map_free() argument
856 struct btf_record *rec = map->record; in bpf_map_free()
857 struct btf *btf = map->btf; in bpf_map_free()
864 map->ops->map_free(map); in bpf_map_free()
886 struct bpf_map *map = container_of(work, struct bpf_map, work); in bpf_map_free_deferred() local
888 security_bpf_map_free(map); in bpf_map_free_deferred()
889 bpf_map_release_memcg(map); in bpf_map_free_deferred()
890 bpf_map_owner_free(map); in bpf_map_free_deferred()
891 bpf_map_free(map); in bpf_map_free_deferred()
894 static void bpf_map_put_uref(struct bpf_map *map) in bpf_map_put_uref() argument
896 if (atomic64_dec_and_test(&map->usercnt)) { in bpf_map_put_uref()
897 if (map->ops->map_release_uref) in bpf_map_put_uref()
898 map->ops->map_release_uref(map); in bpf_map_put_uref()
902 static void bpf_map_free_in_work(struct bpf_map *map) in bpf_map_free_in_work() argument
904 INIT_WORK(&map->work, bpf_map_free_deferred); in bpf_map_free_in_work()
908 queue_work(system_unbound_wq, &map->work); in bpf_map_free_in_work()
927 void bpf_map_put(struct bpf_map *map) in bpf_map_put() argument
929 if (atomic64_dec_and_test(&map->refcnt)) { in bpf_map_put()
931 bpf_map_free_id(map); in bpf_map_put()
933 WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt)); in bpf_map_put()
934 if (READ_ONCE(map->free_after_mult_rcu_gp)) in bpf_map_put()
935 call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); in bpf_map_put()
936 else if (READ_ONCE(map->free_after_rcu_gp)) in bpf_map_put()
937 call_rcu(&map->rcu, bpf_map_free_rcu_gp); in bpf_map_put()
939 bpf_map_free_in_work(map); in bpf_map_put()
944 void bpf_map_put_with_uref(struct bpf_map *map) in bpf_map_put_with_uref() argument
946 bpf_map_put_uref(map); in bpf_map_put_with_uref()
947 bpf_map_put(map); in bpf_map_put_with_uref()
952 struct bpf_map *map = filp->private_data; in bpf_map_release() local
954 if (map->ops->map_release) in bpf_map_release()
955 map->ops->map_release(map, filp); in bpf_map_release()
957 bpf_map_put_with_uref(map); in bpf_map_release()
961 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) in map_get_sys_perms() argument
968 if (READ_ONCE(map->frozen)) in map_get_sys_perms()
975 static u64 bpf_map_memory_usage(const struct bpf_map *map) in bpf_map_memory_usage() argument
977 return map->ops->map_mem_usage(map); in bpf_map_memory_usage()
982 struct bpf_map *map = filp->private_data; in bpf_map_show_fdinfo() local
985 spin_lock(&map->owner_lock); in bpf_map_show_fdinfo()
986 if (map->owner) { in bpf_map_show_fdinfo()
987 type = map->owner->type; in bpf_map_show_fdinfo()
988 jited = map->owner->jited; in bpf_map_show_fdinfo()
990 spin_unlock(&map->owner_lock); in bpf_map_show_fdinfo()
1002 map->map_type, in bpf_map_show_fdinfo()
1003 map->key_size, in bpf_map_show_fdinfo()
1004 map->value_size, in bpf_map_show_fdinfo()
1005 map->max_entries, in bpf_map_show_fdinfo()
1006 map->map_flags, in bpf_map_show_fdinfo()
1007 (unsigned long long)map->map_extra, in bpf_map_show_fdinfo()
1008 bpf_map_memory_usage(map), in bpf_map_show_fdinfo()
1009 map->id, in bpf_map_show_fdinfo()
1010 READ_ONCE(map->frozen)); in bpf_map_show_fdinfo()
1039 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_open() local
1042 bpf_map_write_active_inc(map); in bpf_map_mmap_open()
1048 struct bpf_map *map = vma->vm_file->private_data; in bpf_map_mmap_close() local
1051 bpf_map_write_active_dec(map); in bpf_map_mmap_close()
1061 struct bpf_map *map = filp->private_data; in bpf_map_mmap() local
1064 if (!map->ops->map_mmap || !IS_ERR_OR_NULL(map->record)) in bpf_map_mmap()
1070 mutex_lock(&map->freeze_mutex); in bpf_map_mmap()
1073 if (map->frozen) { in bpf_map_mmap()
1082 if (map->map_flags & BPF_F_RDONLY_PROG) { in bpf_map_mmap()
1086 bpf_map_write_active_inc(map); in bpf_map_mmap()
1089 mutex_unlock(&map->freeze_mutex); in bpf_map_mmap()
1095 vma->vm_private_data = map; in bpf_map_mmap()
1107 err = map->ops->map_mmap(map, vma); in bpf_map_mmap()
1110 bpf_map_write_active_dec(map); in bpf_map_mmap()
1118 struct bpf_map *map = filp->private_data; in bpf_map_poll() local
1120 if (map->ops->map_poll) in bpf_map_poll()
1121 return map->ops->map_poll(map, filp, pts); in bpf_map_poll()
1130 struct bpf_map *map = filp->private_data; in bpf_get_unmapped_area() local
1132 if (map->ops->map_get_unmapped_area) in bpf_get_unmapped_area()
1133 return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); in bpf_get_unmapped_area()
1153 int bpf_map_new_fd(struct bpf_map *map, int flags) in bpf_map_new_fd() argument
1157 ret = security_bpf_map(map, OPEN_FMODE(flags)); in bpf_map_new_fd()
1161 return anon_inode_getfd("bpf-map", &bpf_map_fops, map, in bpf_map_new_fd()
1208 int map_check_no_btf(const struct bpf_map *map, in map_check_no_btf() argument
1216 static int map_check_btf(struct bpf_map *map, struct bpf_token *token, in map_check_btf() argument
1226 if (!key_type || key_size != map->key_size) in map_check_btf()
1230 if (!map->ops->map_check_btf) in map_check_btf()
1235 if (!value_type || value_size != map->value_size) in map_check_btf()
1238 map->record = btf_parse_fields(btf, value_type, in map_check_btf()
1241 map->value_size); in map_check_btf()
1242 if (!IS_ERR_OR_NULL(map->record)) { in map_check_btf()
1249 if (map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) { in map_check_btf()
1253 for (i = 0; i < sizeof(map->record->field_mask) * 8; i++) { in map_check_btf()
1254 switch (map->record->field_mask & (1 << i)) { in map_check_btf()
1259 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1260 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1261 map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && in map_check_btf()
1262 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1263 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1264 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1265 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1272 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1273 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1274 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1283 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1284 map->map_type != BPF_MAP_TYPE_PERCPU_HASH && in map_check_btf()
1285 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1286 map->map_type != BPF_MAP_TYPE_LRU_PERCPU_HASH && in map_check_btf()
1287 map->map_type != BPF_MAP_TYPE_ARRAY && in map_check_btf()
1288 map->map_type != BPF_MAP_TYPE_PERCPU_ARRAY && in map_check_btf()
1289 map->map_type != BPF_MAP_TYPE_SK_STORAGE && in map_check_btf()
1290 map->map_type != BPF_MAP_TYPE_INODE_STORAGE && in map_check_btf()
1291 map->map_type != BPF_MAP_TYPE_TASK_STORAGE && in map_check_btf()
1292 map->map_type != BPF_MAP_TYPE_CGRP_STORAGE) { in map_check_btf()
1298 if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE) { in map_check_btf()
1305 if (map->map_type != BPF_MAP_TYPE_HASH && in map_check_btf()
1306 map->map_type != BPF_MAP_TYPE_LRU_HASH && in map_check_btf()
1307 map->map_type != BPF_MAP_TYPE_ARRAY) { in map_check_btf()
1320 ret = btf_check_and_fixup_fields(btf, map->record); in map_check_btf()
1324 if (map->ops->map_check_btf) { in map_check_btf()
1325 ret = map->ops->map_check_btf(map, btf, key_type, value_type); in map_check_btf()
1332 bpf_map_free_record(map); in map_check_btf()
1349 struct bpf_map *map; in map_create() local
1480 map = ops->map_alloc(attr); in map_create()
1481 if (IS_ERR(map)) { in map_create()
1482 err = PTR_ERR(map); in map_create()
1485 map->ops = ops; in map_create()
1486 map->map_type = map_type; in map_create()
1488 err = bpf_obj_name_cpy(map->name, attr->map_name, in map_create()
1494 map->cookie = gen_cookie_next(&bpf_map_cookie); in map_create()
1497 atomic64_set(&map->refcnt, 1); in map_create()
1498 atomic64_set(&map->usercnt, 1); in map_create()
1499 mutex_init(&map->freeze_mutex); in map_create()
1500 spin_lock_init(&map->owner_lock); in map_create()
1522 map->btf = btf; in map_create()
1525 err = map_check_btf(map, token, btf, attr->btf_key_type_id, in map_create()
1531 map->btf_key_type_id = attr->btf_key_type_id; in map_create()
1532 map->btf_value_type_id = attr->btf_value_type_id; in map_create()
1533 map->btf_vmlinux_value_type_id = in map_create()
1537 err = security_bpf_map_create(map, attr, token, kernel); in map_create()
1541 err = bpf_map_alloc_id(map); in map_create()
1545 bpf_map_save_memcg(map); in map_create()
1548 err = bpf_map_new_fd(map, f_flags); in map_create()
1556 bpf_map_put_with_uref(map); in map_create()
1563 security_bpf_map_free(map); in map_create()
1565 bpf_map_free(map); in map_create()
1571 void bpf_map_inc(struct bpf_map *map) in bpf_map_inc() argument
1573 atomic64_inc(&map->refcnt); in bpf_map_inc()
1577 void bpf_map_inc_with_uref(struct bpf_map *map) in bpf_map_inc_with_uref() argument
1579 atomic64_inc(&map->refcnt); in bpf_map_inc_with_uref()
1580 atomic64_inc(&map->usercnt); in bpf_map_inc_with_uref()
1587 struct bpf_map *map = __bpf_map_get(f); in bpf_map_get() local
1589 if (!IS_ERR(map)) in bpf_map_get()
1590 bpf_map_inc(map); in bpf_map_get()
1592 return map; in bpf_map_get()
1599 struct bpf_map *map = __bpf_map_get(f); in bpf_map_get_with_uref() local
1601 if (!IS_ERR(map)) in bpf_map_get_with_uref()
1602 bpf_map_inc_with_uref(map); in bpf_map_get_with_uref()
1604 return map; in bpf_map_get_with_uref()
1610 struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref) in __bpf_map_inc_not_zero() argument
1614 refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0); in __bpf_map_inc_not_zero()
1618 atomic64_inc(&map->usercnt); in __bpf_map_inc_not_zero()
1620 return map; in __bpf_map_inc_not_zero()
1623 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map) in bpf_map_inc_not_zero() argument
1626 return __bpf_map_inc_not_zero(map, false); in bpf_map_inc_not_zero()
1630 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) in bpf_stackmap_copy() argument
1664 struct bpf_map *map; in map_lookup_elem() local
1676 map = __bpf_map_get(f); in map_lookup_elem()
1677 if (IS_ERR(map)) in map_lookup_elem()
1678 return PTR_ERR(map); in map_lookup_elem()
1679 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) in map_lookup_elem()
1683 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in map_lookup_elem()
1686 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_elem()
1690 value_size = bpf_map_value_size(map); in map_lookup_elem()
1697 if (map->map_type == BPF_MAP_TYPE_BLOOM_FILTER) { in map_lookup_elem()
1701 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1705 err = bpf_map_copy_value(map, key, value, attr->flags); in map_lookup_elem()
1729 struct bpf_map *map; in map_update_elem() local
1738 map = __bpf_map_get(f); in map_update_elem()
1739 if (IS_ERR(map)) in map_update_elem()
1740 return PTR_ERR(map); in map_update_elem()
1741 bpf_map_write_active_inc(map); in map_update_elem()
1742 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_update_elem()
1748 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_update_elem()
1753 key = ___bpf_copy_key(ukey, map->key_size); in map_update_elem()
1759 value_size = bpf_map_value_size(map); in map_update_elem()
1766 err = bpf_map_update_value(map, fd_file(f), key, value, attr->flags); in map_update_elem()
1768 maybe_wait_bpf_programs(map); in map_update_elem()
1774 bpf_map_write_active_dec(map); in map_update_elem()
1783 struct bpf_map *map; in map_delete_elem() local
1791 map = __bpf_map_get(f); in map_delete_elem()
1792 if (IS_ERR(map)) in map_delete_elem()
1793 return PTR_ERR(map); in map_delete_elem()
1794 bpf_map_write_active_inc(map); in map_delete_elem()
1795 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_delete_elem()
1800 key = ___bpf_copy_key(ukey, map->key_size); in map_delete_elem()
1806 if (bpf_map_is_offloaded(map)) { in map_delete_elem()
1807 err = bpf_map_offload_delete_elem(map, key); in map_delete_elem()
1809 } else if (IS_FD_PROG_ARRAY(map) || in map_delete_elem()
1810 map->map_type == BPF_MAP_TYPE_STRUCT_OPS) { in map_delete_elem()
1812 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1818 err = map->ops->map_delete_elem(map, key); in map_delete_elem()
1822 maybe_wait_bpf_programs(map); in map_delete_elem()
1826 bpf_map_write_active_dec(map); in map_delete_elem()
1837 struct bpf_map *map; in map_get_next_key() local
1845 map = __bpf_map_get(f); in map_get_next_key()
1846 if (IS_ERR(map)) in map_get_next_key()
1847 return PTR_ERR(map); in map_get_next_key()
1848 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) in map_get_next_key()
1852 key = __bpf_copy_key(ukey, map->key_size); in map_get_next_key()
1860 next_key = kvmalloc(map->key_size, GFP_USER); in map_get_next_key()
1864 if (bpf_map_is_offloaded(map)) { in map_get_next_key()
1865 err = bpf_map_offload_get_next_key(map, key, next_key); in map_get_next_key()
1870 err = map->ops->map_get_next_key(map, key, next_key); in map_get_next_key()
1877 if (copy_to_user(unext_key, next_key, map->key_size) != 0) in map_get_next_key()
1889 int generic_map_delete_batch(struct bpf_map *map, in generic_map_delete_batch() argument
1902 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_delete_batch()
1913 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_delete_batch()
1919 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_delete_batch()
1920 map->key_size)) in generic_map_delete_batch()
1923 if (bpf_map_is_offloaded(map)) { in generic_map_delete_batch()
1924 err = bpf_map_offload_delete_elem(map, key); in generic_map_delete_batch()
1930 err = map->ops->map_delete_elem(map, key); in generic_map_delete_batch()
1945 int generic_map_update_batch(struct bpf_map *map, struct file *map_file, in generic_map_update_batch() argument
1959 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in generic_map_update_batch()
1963 value_size = bpf_map_value_size(map); in generic_map_update_batch()
1972 key = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_update_batch()
1984 if (copy_from_user(key, keys + cp * map->key_size, in generic_map_update_batch()
1985 map->key_size) || in generic_map_update_batch()
1989 err = bpf_map_update_value(map, map_file, key, value, in generic_map_update_batch()
2006 int generic_map_lookup_batch(struct bpf_map *map, in generic_map_lookup_batch() argument
2022 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) in generic_map_lookup_batch()
2025 value_size = bpf_map_value_size(map); in generic_map_lookup_batch()
2034 buf_prevkey = kvmalloc(map->key_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2038 buf = kvmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN); in generic_map_lookup_batch()
2046 if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size)) in generic_map_lookup_batch()
2049 value = key + map->key_size; in generic_map_lookup_batch()
2055 err = map->ops->map_get_next_key(map, prev_key, key); in generic_map_lookup_batch()
2059 err = bpf_map_copy_value(map, key, value, in generic_map_lookup_batch()
2068 if (copy_to_user(keys + cp * map->key_size, key, in generic_map_lookup_batch()
2069 map->key_size)) { in generic_map_lookup_batch()
2091 (cp && copy_to_user(uobatch, prev_key, map->key_size)))) in generic_map_lookup_batch()
2106 struct bpf_map *map; in map_lookup_and_delete_elem() local
2118 map = __bpf_map_get(f); in map_lookup_and_delete_elem()
2119 if (IS_ERR(map)) in map_lookup_and_delete_elem()
2120 return PTR_ERR(map); in map_lookup_and_delete_elem()
2121 bpf_map_write_active_inc(map); in map_lookup_and_delete_elem()
2122 if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || in map_lookup_and_delete_elem()
2123 !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in map_lookup_and_delete_elem()
2129 (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2130 map->map_type == BPF_MAP_TYPE_STACK)) { in map_lookup_and_delete_elem()
2136 !btf_record_has_field(map->record, BPF_SPIN_LOCK)) { in map_lookup_and_delete_elem()
2141 key = __bpf_copy_key(ukey, map->key_size); in map_lookup_and_delete_elem()
2147 value_size = bpf_map_value_size(map); in map_lookup_and_delete_elem()
2155 if (map->map_type == BPF_MAP_TYPE_QUEUE || in map_lookup_and_delete_elem()
2156 map->map_type == BPF_MAP_TYPE_STACK) { in map_lookup_and_delete_elem()
2157 err = map->ops->map_pop_elem(map, value); in map_lookup_and_delete_elem()
2158 } else if (map->map_type == BPF_MAP_TYPE_HASH || in map_lookup_and_delete_elem()
2159 map->map_type == BPF_MAP_TYPE_PERCPU_HASH || in map_lookup_and_delete_elem()
2160 map->map_type == BPF_MAP_TYPE_LRU_HASH || in map_lookup_and_delete_elem()
2161 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { in map_lookup_and_delete_elem()
2162 if (!bpf_map_is_offloaded(map)) { in map_lookup_and_delete_elem()
2165 err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags); in map_lookup_and_delete_elem()
2186 bpf_map_write_active_dec(map); in map_lookup_and_delete_elem()
2195 struct bpf_map *map; in map_freeze() local
2201 map = __bpf_map_get(f); in map_freeze()
2202 if (IS_ERR(map)) in map_freeze()
2203 return PTR_ERR(map); in map_freeze()
2205 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS || !IS_ERR_OR_NULL(map->record)) in map_freeze()
2208 if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) in map_freeze()
2211 mutex_lock(&map->freeze_mutex); in map_freeze()
2212 if (bpf_map_write_active(map)) { in map_freeze()
2216 if (READ_ONCE(map->frozen)) { in map_freeze()
2221 WRITE_ONCE(map->frozen, true); in map_freeze()
2223 mutex_unlock(&map->freeze_mutex); in map_freeze()
4626 struct bpf_map *map; in bpf_map_get_curr_or_next() local
4630 map = idr_get_next(&map_idr, id); in bpf_map_get_curr_or_next()
4631 if (map) { in bpf_map_get_curr_or_next()
4632 map = __bpf_map_inc_not_zero(map, false); in bpf_map_get_curr_or_next()
4633 if (IS_ERR(map)) { in bpf_map_get_curr_or_next()
4640 return map; in bpf_map_get_curr_or_next()
4708 struct bpf_map *map; in bpf_map_get_fd_by_id() local
4725 map = idr_find(&map_idr, id); in bpf_map_get_fd_by_id()
4726 if (map) in bpf_map_get_fd_by_id()
4727 map = __bpf_map_inc_not_zero(map, true); in bpf_map_get_fd_by_id()
4729 map = ERR_PTR(-ENOENT); in bpf_map_get_fd_by_id()
4732 if (IS_ERR(map)) in bpf_map_get_fd_by_id()
4733 return PTR_ERR(map); in bpf_map_get_fd_by_id()
4735 fd = bpf_map_new_fd(map, f_flags); in bpf_map_get_fd_by_id()
4737 bpf_map_put_with_uref(map); in bpf_map_get_fd_by_id()
4746 const struct bpf_map *map; in bpf_map_from_imm() local
4751 map = prog->aux->used_maps[i]; in bpf_map_from_imm()
4752 if (map == (void *)addr) { in bpf_map_from_imm()
4756 if (!map->ops->map_direct_value_meta) in bpf_map_from_imm()
4758 if (!map->ops->map_direct_value_meta(map, addr, off)) { in bpf_map_from_imm()
4763 map = NULL; in bpf_map_from_imm()
4767 return map; in bpf_map_from_imm()
4773 const struct bpf_map *map; in bpf_insn_prepare_dump() local
4816 map = bpf_map_from_imm(prog, imm, &off, &type); in bpf_insn_prepare_dump()
4817 if (map) { in bpf_insn_prepare_dump()
4819 insns[i].imm = map->id; in bpf_insn_prepare_dump()
5149 struct bpf_map *map, in bpf_map_get_info_by_fd() argument
5164 info.type = map->map_type; in bpf_map_get_info_by_fd()
5165 info.id = map->id; in bpf_map_get_info_by_fd()
5166 info.key_size = map->key_size; in bpf_map_get_info_by_fd()
5167 info.value_size = map->value_size; in bpf_map_get_info_by_fd()
5168 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
5169 info.map_flags = map->map_flags; in bpf_map_get_info_by_fd()
5170 info.map_extra = map->map_extra; in bpf_map_get_info_by_fd()
5171 memcpy(info.name, map->name, sizeof(map->name)); in bpf_map_get_info_by_fd()
5173 if (map->btf) { in bpf_map_get_info_by_fd()
5174 info.btf_id = btf_obj_id(map->btf); in bpf_map_get_info_by_fd()
5175 info.btf_key_type_id = map->btf_key_type_id; in bpf_map_get_info_by_fd()
5176 info.btf_value_type_id = map->btf_value_type_id; in bpf_map_get_info_by_fd()
5178 info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; in bpf_map_get_info_by_fd()
5179 if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) in bpf_map_get_info_by_fd()
5180 bpf_map_struct_ops_info_fill(&info, map); in bpf_map_get_info_by_fd()
5182 if (bpf_map_is_offloaded(map)) { in bpf_map_get_info_by_fd()
5183 err = bpf_map_offload_info_fill(&info, map); in bpf_map_get_info_by_fd()
5485 struct bpf_map *map; in bpf_map_do_batch() local
5493 map = __bpf_map_get(f); in bpf_map_do_batch()
5494 if (IS_ERR(map)) in bpf_map_do_batch()
5495 return PTR_ERR(map); in bpf_map_do_batch()
5497 bpf_map_write_active_inc(map); in bpf_map_do_batch()
5498 if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { in bpf_map_do_batch()
5502 if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { in bpf_map_do_batch()
5508 BPF_DO_BATCH(map->ops->map_lookup_batch, map, attr, uattr); in bpf_map_do_batch()
5510 BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5512 BPF_DO_BATCH(map->ops->map_update_batch, map, fd_file(f), attr, uattr); in bpf_map_do_batch()
5514 BPF_DO_BATCH(map->ops->map_delete_batch, map, attr, uattr); in bpf_map_do_batch()
5517 maybe_wait_bpf_programs(map); in bpf_map_do_batch()
5518 bpf_map_write_active_dec(map); in bpf_map_do_batch()
5892 struct bpf_map *map; in bpf_prog_bind_map() local
5906 map = bpf_map_get(attr->prog_bind_map.map_fd); in bpf_prog_bind_map()
5907 if (IS_ERR(map)) { in bpf_prog_bind_map()
5908 ret = PTR_ERR(map); in bpf_prog_bind_map()
5917 if (used_maps_old[i] == map) { in bpf_prog_bind_map()
5918 bpf_map_put(map); in bpf_prog_bind_map()
5934 atomic64_inc(&map->sleepable_refcnt); in bpf_prog_bind_map()
5937 used_maps_new[prog->aux->used_map_cnt] = map; in bpf_prog_bind_map()
5948 bpf_map_put(map); in bpf_prog_bind_map()