Lines Matching refs:cpu_role

213 	return !!(mmu->cpu_role. base_or_ext . reg##_##name);	\
226 return mmu->cpu_role.base.level > 0; in is_cr0_pg()
231 return !mmu->cpu_role.base.has_4_byte_gpte; in is_cr4_pae()
2305 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
3744 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3768 if (mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3807 if (mmu->cpu_role.base.level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3821 quadrant = (mmu->cpu_role.base.level == PT32_ROOT_LEVEL) ? i : 0; in mmu_alloc_shadow_roots()
3857 mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL || in mmu_alloc_special_roots()
3962 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
4751 context->cpu_role.base.level, is_efer_nx(context), in reset_guest_rsvds_bits_mask()
5130 union kvm_cpu_role cpu_role) in kvm_calc_tdp_mmu_root_page_role() argument
5137 role.smm = cpu_role.base.smm; in kvm_calc_tdp_mmu_root_page_role()
5138 role.guest_mode = cpu_role.base.guest_mode; in kvm_calc_tdp_mmu_root_page_role()
5148 union kvm_cpu_role cpu_role) in init_kvm_tdp_mmu() argument
5151 union kvm_mmu_page_role root_role = kvm_calc_tdp_mmu_root_page_role(vcpu, cpu_role); in init_kvm_tdp_mmu()
5153 if (cpu_role.as_u64 == context->cpu_role.as_u64 && in init_kvm_tdp_mmu()
5157 context->cpu_role.as_u64 = cpu_role.as_u64; in init_kvm_tdp_mmu()
5178 union kvm_cpu_role cpu_role, in shadow_mmu_init_context() argument
5181 if (cpu_role.as_u64 == context->cpu_role.as_u64 && in shadow_mmu_init_context()
5185 context->cpu_role.as_u64 = cpu_role.as_u64; in shadow_mmu_init_context()
5200 union kvm_cpu_role cpu_role) in kvm_init_shadow_mmu() argument
5205 root_role = cpu_role.base; in kvm_init_shadow_mmu()
5221 shadow_mmu_init_context(vcpu, context, cpu_role, root_role); in kvm_init_shadow_mmu()
5233 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs); in kvm_init_shadow_npt_mmu() local
5237 WARN_ON_ONCE(cpu_role.base.direct); in kvm_init_shadow_npt_mmu()
5239 root_role = cpu_role.base; in kvm_init_shadow_npt_mmu()
5242 cpu_role.base.level == PT64_ROOT_4LEVEL) in kvm_init_shadow_npt_mmu()
5245 shadow_mmu_init_context(vcpu, context, cpu_role, root_role); in kvm_init_shadow_npt_mmu()
5285 if (new_mode.as_u64 != context->cpu_role.as_u64) { in kvm_init_shadow_ept_mmu()
5287 context->cpu_role.as_u64 = new_mode.as_u64; in kvm_init_shadow_ept_mmu()
5306 union kvm_cpu_role cpu_role) in init_kvm_softmmu() argument
5310 kvm_init_shadow_mmu(vcpu, cpu_role); in init_kvm_softmmu()
5322 if (new_mode.as_u64 == g_context->cpu_role.as_u64) in init_kvm_nested_mmu()
5325 g_context->cpu_role.as_u64 = new_mode.as_u64; in init_kvm_nested_mmu()
5359 union kvm_cpu_role cpu_role = kvm_calc_cpu_role(vcpu, &regs); in kvm_init_mmu() local
5362 init_kvm_nested_mmu(vcpu, cpu_role); in kvm_init_mmu()
5364 init_kvm_tdp_mmu(vcpu, cpu_role); in kvm_init_mmu()
5366 init_kvm_softmmu(vcpu, cpu_role); in kvm_init_mmu()
5387 vcpu->arch.root_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5388 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()
5389 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0; in kvm_mmu_after_set_cpuid()