| /kernel/bpf/ |
| A D | arraymap.c | 87 u32 elem_size, index_mask, max_entries; in array_map_alloc() local 94 max_entries = attr->max_entries; in array_map_alloc() 100 mask64 = fls_long(max_entries - 1); in array_map_alloc() 109 max_entries = index_mask + 1; in array_map_alloc() 111 if (max_entries < attr->max_entries) in array_map_alloc() 182 if (map->max_entries != 1) in array_map_direct_value_addr() 198 if (map->max_entries != 1) in array_map_direct_value_meta() 536 map->max_entries != 1) in array_map_check_btf() 577 meta0->max_entries == meta1->max_entries; in array_map_meta_equal() 593 if (info->index >= map->max_entries) in bpf_array_map_seq_start() [all …]
|
| A D | reuseport_array.c | 55 if (unlikely(index >= array->map.max_entries)) in reuseport_array_lookup_elem() 69 if (index >= map->max_entries) in reuseport_array_delete_elem() 126 for (i = 0; i < map->max_entries; i++) { in reuseport_array_free() 155 array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node); in reuseport_array_alloc() 246 if (index >= map->max_entries) in bpf_fd_reuseport_array_update_elem() 323 if (index >= array->map.max_entries) { in reuseport_array_get_next_key() 328 if (index == array->map.max_entries - 1) in reuseport_array_get_next_key() 339 return struct_size(array, ptrs, map->max_entries); in reuseport_array_mem_usage()
|
| A D | devmap.c | 127 if (attr->max_entries > 1UL << 31) in dev_map_alloc_check() 235 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_free() 260 if (index >= dtab->map.max_entries) { in dev_map_get_next_key() 265 if (index == dtab->map.max_entries - 1) in dev_map_get_next_key() 444 if (key >= map->max_entries) in __dev_map_lookup_elem() 623 for (i = 0; i < map->max_entries; i++) { in dev_map_enqueue_multi() 741 for (i = 0; i < map->max_entries; i++) { in dev_map_redirect_multi() 826 if (k >= map->max_entries) in dev_map_delete_elem() 917 if (unlikely(i >= dtab->map.max_entries)) in __dev_map_update_elem() 987 if (dtab->items >= dtab->map.max_entries) { in __dev_map_hash_update_elem() [all …]
|
| A D | cpumap.c | 89 if (attr->max_entries == 0 || attr->key_size != 4 || in cpu_map_alloc() 96 if (attr->max_entries > NR_CPUS) in cpu_map_alloc() 106 cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * in cpu_map_alloc() 562 if (key_cpu >= map->max_entries) in cpu_map_delete_elem() 583 if (unlikely(key_cpu >= cmap->map.max_entries)) in cpu_map_update_elem() 626 for (i = 0; i < cmap->map.max_entries; i++) { in cpu_map_free() 649 if (key >= map->max_entries) in __cpu_map_lookup_elem() 671 if (index >= cmap->map.max_entries) { in cpu_map_get_next_key() 676 if (index == cmap->map.max_entries - 1) in cpu_map_get_next_key() 693 usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *); in cpu_map_mem_usage()
|
| A D | queue_stack_maps.c | 50 if (attr->max_entries == 0 || attr->key_size != 0 || in queue_stack_map_alloc_check() 71 size = (u64) attr->max_entries + 1; in queue_stack_map_alloc() 253 usage += ((u64)map->max_entries + 1) * map->value_size; in queue_stack_map_mem_usage()
|
| A D | hashtab.c | 220 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_timers_and_wq() 242 u32 num_entries = htab->map.max_entries; in htab_free_prealloced_fields() 277 for (i = 0; i < htab->map.max_entries; i++) { in htab_free_elems() 318 u32 num_entries = htab->map.max_entries; in prealloc_init() 448 if (attr->max_entries == 0 || attr->key_size == 0 || in htab_map_alloc_check() 492 htab->map.max_entries = roundup(attr->max_entries, in htab_map_alloc() 494 if (htab->map.max_entries < attr->max_entries) in htab_map_alloc() 495 htab->map.max_entries = rounddown(attr->max_entries, in htab_map_alloc() 503 if (htab->map.max_entries > 1UL << 31) in htab_map_alloc() 901 return atomic_read(&htab->count) >= htab->map.max_entries; in is_map_full() [all …]
|
| A D | stackmap.c | 51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, in prealloc_elems_and_freelist() 61 smap->map.max_entries); in prealloc_elems_and_freelist() 81 if (attr->max_entries == 0 || attr->key_size != 4 || in stack_map_alloc() 97 if (attr->max_entries > 1UL << 31) in stack_map_alloc() 100 n_buckets = roundup_pow_of_two(attr->max_entries); in stack_map_alloc() 742 u64 enties = map->max_entries; in stack_map_mem_usage()
|
| A D | map_in_map.c | 40 inner_map_meta->max_entries = inner_map->max_entries; in bpf_map_meta_alloc()
|
| A D | bloom_filter.c | 102 attr->max_entries == 0 || in bloom_map_alloc() 127 if (check_mul_overflow(attr->max_entries, nr_hash_funcs, &nr_bits) || in bloom_map_alloc()
|
| A D | ringbuf.c | 198 !is_power_of_2(attr->max_entries) || in ringbuf_map_alloc() 199 !PAGE_ALIGNED(attr->max_entries)) in ringbuf_map_alloc() 208 rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node); in ringbuf_map_alloc() 346 nr_data_pages = map->max_entries >> PAGE_SHIFT; in ringbuf_map_mem_usage()
|
| A D | arena.c | 106 if (attr->key_size || attr->value_size || attr->max_entries == 0 || in arena_map_alloc() 117 vm_range = (u64)attr->max_entries * PAGE_SIZE; in arena_map_alloc() 141 err = range_tree_set(&arena->rt, 0, attr->max_entries); in arena_map_alloc()
|
| A D | lpm_trie.c | 315 if (trie->n_entries == trie->map.max_entries) in trie_check_add_elem() 578 if (attr->max_entries == 0 || in trie_alloc()
|
| A D | local_storage.c | 312 if (attr->max_entries) in cgroup_storage_map_alloc()
|
| A D | bpf_local_storage.c | 708 attr->max_entries || in bpf_local_storage_map_alloc_check()
|
| A D | syscall.c | 436 map->max_entries = attr->max_entries; in bpf_map_init_from_attr() 1005 map->max_entries, in bpf_map_show_fdinfo() 5168 info.max_entries = map->max_entries; in bpf_map_get_info_by_fd()
|
| A D | bpf_struct_ops.c | 1007 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 || in bpf_struct_ops_map_alloc_check()
|
| A D | core.c | 2049 if (unlikely(index >= array->map.max_entries)) in ___bpf_prog_run()
|
| A D | helpers.c | 2572 if (unlikely(idx >= array->map.max_entries)) in BPF_CALL_2()
|
| A D | verifier.c | 11160 max = map->max_entries; in record_func_key() 11714 meta.const_map_key < meta.map_ptr->max_entries) in check_helper_call() 16947 WARN_ON_ONCE(map->max_entries != 1); in check_ld_imm() 22436 map_ptr->max_entries, 2); in do_misc_fixups()
|
| /kernel/ |
| A D | stacktrace.c | 273 .max_entries = size, in stack_trace_save() 297 .max_entries = size, in stack_trace_save_tsk() 321 .max_entries = size, in stack_trace_save_regs() 347 .max_entries = size, in stack_trace_save_tsk_reliable() 367 .max_entries = size, in stack_trace_save_user()
|
| /kernel/unwind/ |
| A D | user.c | 109 int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries) in unwind_user() argument 115 if (!max_entries) in unwind_user() 123 if (trace->nr >= max_entries) in unwind_user()
|
| /kernel/bpf/preload/iterators/ |
| A D | iterators.bpf.c | 18 __u32 max_entries; member 92 map->id, map->name, map->max_entries, in dump_bpf_map()
|
| /kernel/locking/ |
| A D | lockdep.c | 567 int max_entries; in save_trace() local 573 max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries - in save_trace() 576 if (max_entries <= 0) { in save_trace() 587 trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3); in save_trace()
|
| /kernel/trace/ |
| A D | bpf_trace.c | 550 if (unlikely(index >= array->map.max_entries)) in get_map_perf_counter() 628 if (unlikely(index >= array->map.max_entries)) in __bpf_perf_event_output()
|
| A D | trace_events_hist.c | 5437 unsigned int max_entries) in hist_trigger_stacktrace_print() argument 5442 for (i = 0; i < max_entries; i++) { in hist_trigger_stacktrace_print()
|