Lines Matching refs:map

78 	struct bpf_map map;  member
103 bpf_map_init_from_attr(&cmap->map, attr); in cpu_map_alloc()
106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc()
108 cmap->map.numa_node); in cpu_map_alloc()
114 return &cmap->map; in cpu_map_alloc()
409 struct bpf_map *map, int fd) in __cpu_map_load_bpf_program() argument
418 !bpf_prog_map_compatible(map, prog)) { in __cpu_map_load_bpf_program()
430 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, in __cpu_map_entry_alloc() argument
441 rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa); in __cpu_map_entry_alloc()
446 rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq), in __cpu_map_entry_alloc()
457 rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp, in __cpu_map_entry_alloc()
467 rcpu->map_id = map->id; in __cpu_map_entry_alloc()
471 if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) in __cpu_map_entry_alloc()
478 map->id); in __cpu_map_entry_alloc()
557 static long cpu_map_delete_elem(struct bpf_map *map, void *key) in cpu_map_delete_elem() argument
559 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); in cpu_map_delete_elem()
562 if (key_cpu >= map->max_entries) in cpu_map_delete_elem()
570 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value, in cpu_map_update_elem() argument
573 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); in cpu_map_update_elem()
579 memcpy(&cpumap_value, value, map->value_size); in cpu_map_update_elem()
583 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem()
598 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu); in cpu_map_update_elem()
608 static void cpu_map_free(struct bpf_map *map) in cpu_map_free() argument
610 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); in cpu_map_free()
626 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free()
644 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) in __cpu_map_lookup_elem() argument
646 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); in __cpu_map_lookup_elem()
649 if (key >= map->max_entries) in __cpu_map_lookup_elem()
657 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) in cpu_map_lookup_elem() argument
660 __cpu_map_lookup_elem(map, *(u32 *)key); in cpu_map_lookup_elem()
665 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) in cpu_map_get_next_key() argument
667 struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); in cpu_map_get_next_key()
671 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key()
676 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key()
682 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags) in cpu_map_redirect() argument
684 return __bpf_xdp_redirect_map(map, index, flags, 0, in cpu_map_redirect()
688 static u64 cpu_map_mem_usage(const struct bpf_map *map) in cpu_map_mem_usage() argument
693 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_mem_usage()