Lines Matching refs:smap

45 static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)  in prealloc_elems_and_freelist()  argument
48 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist()
52 smap->map.numa_node); in prealloc_elems_and_freelist()
53 if (!smap->elems) in prealloc_elems_and_freelist()
56 err = pcpu_freelist_init(&smap->freelist); in prealloc_elems_and_freelist()
60 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, in prealloc_elems_and_freelist()
61 smap->map.max_entries); in prealloc_elems_and_freelist()
65 bpf_map_area_free(smap->elems); in prealloc_elems_and_freelist()
73 struct bpf_stack_map *smap; in stack_map_alloc() local
102 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); in stack_map_alloc()
103 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); in stack_map_alloc()
104 if (!smap) in stack_map_alloc()
107 bpf_map_init_from_attr(&smap->map, attr); in stack_map_alloc()
108 smap->n_buckets = n_buckets; in stack_map_alloc()
114 err = prealloc_elems_and_freelist(smap); in stack_map_alloc()
118 return &smap->map; in stack_map_alloc()
123 bpf_map_area_free(smap); in stack_map_alloc()
216 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); in __bpf_get_stackid() local
232 id = hash & (smap->n_buckets - 1); in __bpf_get_stackid()
233 bucket = READ_ONCE(smap->buckets[id]); in __bpf_get_stackid()
243 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid()
253 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid()
257 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); in __bpf_get_stackid()
268 pcpu_freelist_pop(&smap->freelist); in __bpf_get_stackid()
277 old_bucket = xchg(&smap->buckets[id], new_bucket); in __bpf_get_stackid()
279 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in __bpf_get_stackid()
571 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); in bpf_stackmap_copy() local
575 if (unlikely(id >= smap->n_buckets)) in bpf_stackmap_copy()
578 bucket = xchg(&smap->buckets[id], NULL); in bpf_stackmap_copy()
586 old_bucket = xchg(&smap->buckets[id], bucket); in bpf_stackmap_copy()
588 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in bpf_stackmap_copy()
595 struct bpf_stack_map *smap = container_of(map, in stack_map_get_next_key() local
605 if (id >= smap->n_buckets || !smap->buckets[id]) in stack_map_get_next_key()
611 while (id < smap->n_buckets && !smap->buckets[id]) in stack_map_get_next_key()
614 if (id >= smap->n_buckets) in stack_map_get_next_key()
630 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); in stack_map_delete_elem() local
634 if (unlikely(id >= smap->n_buckets)) in stack_map_delete_elem()
637 old_bucket = xchg(&smap->buckets[id], NULL); in stack_map_delete_elem()
639 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); in stack_map_delete_elem()
649 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); in stack_map_free() local
651 bpf_map_area_free(smap->elems); in stack_map_free()
652 pcpu_freelist_destroy(&smap->freelist); in stack_map_free()
653 bpf_map_area_free(smap); in stack_map_free()