Lines Matching refs:BPF_REG_SIZE

653 	return (-off - 1) / BPF_REG_SIZE;  in __get_spi()
666 int allocated_slots = state->allocated_stack / BPF_REG_SIZE; in is_spi_bounds_valid()
688 if (off % BPF_REG_SIZE) { in dynptr_get_spi()
819 for (i = 0; i < BPF_REG_SIZE; i++) { in mark_stack_slots_dynptr()
856 for (i = 0; i < BPF_REG_SIZE; i++) { in unmark_stack_slots_dynptr()
926 for (i = 0; i < BPF_REG_SIZE; i++) { in destroy_if_dynptr_stack_slot()
996 for (i = 0; i < BPF_REG_SIZE; i++) { in is_dynptr_reg_valid_init()
1032 return stack->slot_type[BPF_REG_SIZE - 1] == STACK_SPILL; in is_spilled_reg()
1138 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in print_verifier_state()
1139 char types_buf[BPF_REG_SIZE + 1]; in print_verifier_state()
1143 for (j = 0; j < BPF_REG_SIZE; j++) { in print_verifier_state()
1149 types_buf[BPF_REG_SIZE] = 0; in print_verifier_state()
1154 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); in print_verifier_state()
1273 size_t n = src->allocated_stack / BPF_REG_SIZE; in copy_stack_state()
1297 size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE; in grow_stack_state()
2915 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
2932 spi = (-insn->off - 1) / BPF_REG_SIZE; in backtrack_insn()
3060 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_precise()
3086 for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { in mark_all_scalars_imprecise()
3323 if (i >= func->allocated_stack / BPF_REG_SIZE) { in __mark_chain_precision()
3465 if (size == BPF_REG_SIZE) in save_register_state()
3468 for (i = BPF_REG_SIZE; i > BPF_REG_SIZE - size; i--) in save_register_state()
3491 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; in check_stack_write_fixed_off()
3496 err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE)); in check_stack_write_fixed_off()
3504 size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
3533 if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && in check_stack_write_fixed_off()
3547 } else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) && in check_stack_write_fixed_off()
3556 if (size != BPF_REG_SIZE) { in check_stack_write_fixed_off()
3573 for (i = 0; i < BPF_REG_SIZE; i++) in check_stack_write_fixed_off()
3584 if (size == BPF_REG_SIZE) in check_stack_write_fixed_off()
3599 state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = in check_stack_write_fixed_off()
3651 err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE)); in check_stack_write_var_off()
3670 spi = slot / BPF_REG_SIZE; in check_stack_write_var_off()
3671 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_write_var_off()
3745 spi = slot / BPF_REG_SIZE; in mark_reg_stack_read()
3747 if (stype[slot % BPF_REG_SIZE] != STACK_ZERO) in mark_reg_stack_read()
3790 int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; in check_stack_read_fixed_off()
3800 for (i = BPF_REG_SIZE - 1; i > 0 && stype[i - 1] == STACK_SPILL; i--) in check_stack_read_fixed_off()
3803 if (size != BPF_REG_SIZE || spill_size != BPF_REG_SIZE) { in check_stack_read_fixed_off()
3814 if (!(off % BPF_REG_SIZE) && size == spill_size) { in check_stack_read_fixed_off()
3824 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
3861 type = stype[(slot - i) % BPF_REG_SIZE]; in check_stack_read_fixed_off()
5530 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && in check_mem_access()
5737 if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) { in check_stack_range_initialized()
5751 spi = slot / BPF_REG_SIZE; in check_stack_range_initialized()
5754 stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; in check_stack_range_initialized()
5770 for (j = 0; j < BPF_REG_SIZE; j++) in check_stack_range_initialized()
13742 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { in clean_func_state()
13748 for (j = 0; j < BPF_REG_SIZE; j++) in clean_func_state()
13928 spi = i / BPF_REG_SIZE; in stacksafe()
13931 i += BPF_REG_SIZE - 1; in stacksafe()
13936 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) in stacksafe()
13949 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && in stacksafe()
13950 cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) in stacksafe()
13952 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != in stacksafe()
13953 cur->stack[spi].slot_type[i % BPF_REG_SIZE]) in stacksafe()
13960 if (i % BPF_REG_SIZE != BPF_REG_SIZE - 1) in stacksafe()
13963 switch (old->stack[spi].slot_type[BPF_REG_SIZE - 1]) { in stacksafe()
14176 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && in propagate_liveness()
14177 i < parent->allocated_stack / BPF_REG_SIZE; i++) { in propagate_liveness()
14213 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { in propagate_precision()
14222 (-i - 1) * BPF_REG_SIZE, fr); in propagate_precision()
14460 for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { in is_state_visited()
16864 s32 r6_offset = stack_base + 0 * BPF_REG_SIZE; in inline_bpf_loop()
16865 s32 r7_offset = stack_base + 1 * BPF_REG_SIZE; in inline_bpf_loop()
16866 s32 r8_offset = stack_base + 2 * BPF_REG_SIZE; in inline_bpf_loop()
16966 stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup; in optimize_bpf_loop()