Lines Matching refs:u64

23 	u64 data[];
42 sizeof(struct bpf_stack_build_id) : sizeof(u64); in stack_map_data_size()
47 u64 elem_size = sizeof(struct stack_map_bucket) + in prealloc_elems_and_freelist()
48 (u64)smap->map.value_size; in prealloc_elems_and_freelist()
74 u64 cost, n_buckets; in stack_map_alloc()
85 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); in stack_map_alloc()
167 u64 ip = READ_ONCE(id_offs[i].ip); in stack_map_get_build_id_offset()
211 u64 *to = entry->ip; in get_callchain_entry_for_task()
216 to[i] = (u64)(from[i]); in get_callchain_entry_for_task()
228 struct perf_callchain_entry *trace, u64 flags) in __bpf_get_stackid()
235 u64 *ips; in __bpf_get_stackid()
243 trace_len = trace_nr * sizeof(u64); in __bpf_get_stackid()
301 u64, flags) in BPF_CALL_3() argument
349 struct bpf_map *, map, u64, flags) in BPF_CALL_3() argument
384 u64 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_3()
407 void *buf, u32 size, u64 flags, bool may_fault) in __bpf_get_stack()
417 u64 *ips; in __bpf_get_stack()
425 elem_size = user_build_id ? sizeof(struct bpf_stack_build_id) : sizeof(u64); in __bpf_get_stack()
497 u64, flags) in BPF_CALL_4() argument
513 u64, flags) in BPF_CALL_4() argument
529 u64 flags, bool may_fault) in __bpf_get_task_stack()
546 u32, size, u64, flags) in BPF_CALL_4() argument
563 u32, size, u64, flags) in BPF_CALL_4() argument
580 void *, buf, u32, size, u64, flags) in BPF_CALL_4() argument
615 u64 skip = flags & BPF_F_SKIP_FIELD_MASK; in BPF_CALL_4()
702 u64 map_flags) in stack_map_update_elem()
737 static u64 stack_map_mem_usage(const struct bpf_map *map) in stack_map_mem_usage()
740 u64 value_size = map->value_size; in stack_map_mem_usage()
741 u64 n_buckets = smap->n_buckets; in stack_map_mem_usage()
742 u64 enties = map->max_entries; in stack_map_mem_usage()
743 u64 usage = sizeof(*smap); in stack_map_mem_usage()