Lines Matching refs:BPF_REG_SIZE
580 return (-off - 1) / BPF_REG_SIZE; in __get_spi()
593 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
616 if (off % BPF_REG_SIZE) { in stack_slot_obj_get_spi()
734 for (i = 0; i < BPF_REG_SIZE; i++) { in mark_stack_slots_dynptr()
772 for (i = 0; i < BPF_REG_SIZE; i++) { in invalidate_dynptr()
832 for (i = 1; i < state->allocated_stack / BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
890 for (i = 0; i < BPF_REG_SIZE; i++) { in destroy_if_dynptr_stack_slot()
966 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_init()
1038 for (j = 0; j < BPF_REG_SIZE; j++) in mark_stack_slots_iter()
1069 for (j = 0; j < BPF_REG_SIZE; j++) in unmark_stack_slots_iter()
1097 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_uninit()
1129 for (j = 0; j < BPF_REG_SIZE; j++) in is_iter_reg_valid_init()
1144 enum bpf_stack_slot_type type = stack->slot_type[BPF_REG_SIZE - 1]; in is_stack_slot_special()
1166 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1171 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL && in is_spilled_scalar_reg()
1274 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1301 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n; in grow_stack_state()
1304 size = round_up(size, BPF_REG_SIZE); in grow_stack_state()
1305 n = size / BPF_REG_SIZE; in grow_stack_state()
4146 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
4176 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
4404 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
4406 i, func->allocated_stack / BPF_REG_SIZE); in __mark_chain_precision()
4564 if (size == BPF_REG_SIZE) in save_register_state()
4567 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
4620 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
4630 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
4660 if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) { in check_stack_write_fixed_off()
4671 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && in check_stack_write_fixed_off()
4681 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
4698 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
4709 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
4729 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; in check_stack_write_fixed_off()
4800 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
4801 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
4888 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
4891 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
4923 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
4937 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
4940 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
4970 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5024 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
7141 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
7360 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
7374 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
7380 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
7397 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
7997 nr_slots = t->size / BPF_REG_SIZE; in process_iter_arg()
8007 for (i = 0; i < nr_slots * 8; i += BPF_REG_SIZE) { in process_iter_arg()
8120 for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { in widen_imprecise_scalars()
15293 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in collect_linked_regs()
16308 for (i = 1, off = lowest_off; i <= ARRAY_SIZE(caller_saved); ++i, off += BPF_REG_SIZE) { in mark_fastcall_pattern_for_call()
17126 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
17132 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
17395 spi = i / BPF_REG_SIZE; in stacksafe()
17399 old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
17400 cur->stack[spi].slot_type[i % BPF_REG_SIZE])) in stacksafe()
17405 i += BPF_REG_SIZE - 1; in stacksafe()
17410 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
17414 old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC) in stacksafe()
17433 i += BPF_REG_SIZE - 1; in stacksafe()
17441 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
17442 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
17444 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
17445 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
17452 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
17455 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
17696 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
17697 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
17739 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
17750 fr, (-i - 1) * BPF_REG_SIZE); in propagate_precision()
17752 verbose(env, ",fp%d", (-i - 1) * BPF_REG_SIZE); in propagate_precision()
17857 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in iter_active_depths_differ()
18209 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
21309 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; in inline_bpf_loop()
21310 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; in inline_bpf_loop()
21311 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; in inline_bpf_loop()
21412 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; in optimize_bpf_loop()