| /arch/x86/boot/startup/ |
| A D | sev-shared.c | 334 if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn) in __sev_cpuid_hv_msr() 337 ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax); in __sev_cpuid_hv_msr() 338 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx); in __sev_cpuid_hv_msr() 339 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx); in __sev_cpuid_hv_msr() 463 if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn) in snp_cpuid_get_validated_func() 506 leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9)); in snp_cpuid_postprocess() 530 if (leaf->subfn != 0 && leaf->subfn != 1) in snp_cpuid_postprocess() 615 leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; in snp_cpuid() 619 (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || in snp_cpuid() 620 (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) in snp_cpuid() [all …]
|
| /arch/loongarch/mm/ |
| A D | cache.c | 44 static void flush_cache_leaf(unsigned int leaf) in flush_cache_leaf() argument 55 flush_cache_line(leaf, addr); in flush_cache_leaf() 68 int leaf; in __flush_cache_all() local 72 leaf = cache_present - 1; in __flush_cache_all() 73 if (cache_inclusive(cdesc + leaf)) { in __flush_cache_all() 74 flush_cache_leaf(leaf); in __flush_cache_all() 78 for (leaf = 0; leaf < cache_present; leaf++) in __flush_cache_all() 79 flush_cache_leaf(leaf); in __flush_cache_all() 112 cdesc++; leaf++; \ 117 unsigned int leaf = 0, level = 1; in cpu_cache_init() local [all …]
|
| /arch/x86/include/asm/cpuid/ |
| A D | api.h | 129 regs[CPUID_EAX] = leaf; in __cpuid_read() 134 #define cpuid_subleaf(leaf, subleaf, regs) { \ argument 136 __cpuid_read(leaf, subleaf, (u32 *)(regs)); \ 139 #define cpuid_leaf(leaf, regs) { \ argument 141 __cpuid_read(leaf, 0, (u32 *)(regs)); \ 144 static inline void __cpuid_read_reg(u32 leaf, u32 subleaf, in __cpuid_read_reg() argument 149 __cpuid_read(leaf, subleaf, regs); in __cpuid_read_reg() 153 #define cpuid_subleaf_reg(leaf, subleaf, regidx, reg) { \ argument 155 __cpuid_read_reg(leaf, subleaf, regidx, (u32 *)(reg)); \ 158 #define cpuid_leaf_reg(leaf, regidx, reg) { \ argument [all …]
|
| /arch/x86/kernel/cpu/ |
| A D | topology_ext.c | 45 static inline bool topo_subleaf(struct topo_scan *tscan, u32 leaf, u32 subleaf, in topo_subleaf() argument 66 switch (leaf) { in topo_subleaf() 73 cpuid_subleaf(leaf, subleaf, &sl); in topo_subleaf() 80 leaf, subleaf, sl.type); in topo_subleaf() 97 leaf, subleaf, tscan->c->topo.initial_apicid, sl.x2apic_id); in topo_subleaf() 104 static bool parse_topology_leaf(struct topo_scan *tscan, u32 leaf) in parse_topology_leaf() argument 110 for (subleaf = 0, last_dom = 0; topo_subleaf(tscan, leaf, subleaf, &last_dom); subleaf++); in parse_topology_leaf() 125 leaf, tscan->dom_ncpus[TOPO_SMT_DOMAIN]); in parse_topology_leaf()
|
| A D | topology_amd.c | 77 } leaf; in parse_8000_001e() local 82 cpuid_leaf(0x8000001e, &leaf); in parse_8000_001e() 84 tscan->c->topo.initial_apicid = leaf.ext_apic_id; in parse_8000_001e() 95 unsigned int nthreads = leaf.core_nthreads + 1; in parse_8000_001e() 100 store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id); in parse_8000_001e() 104 tscan->c->topo.cu_id = leaf.core_id; in parse_8000_001e() 106 cacheinfo_amd_init_llc_id(tscan->c, leaf.node_id); in parse_8000_001e()
|
| /arch/mips/kernel/ |
| A D | cacheinfo.c | 8 #define populate_cache(cache, leaf, c_level, c_type) \ argument 10 leaf->type = c_type; \ 11 leaf->level = c_level; \ 12 leaf->coherency_line_size = c->cache.linesz; \ 13 leaf->number_of_sets = c->cache.sets; \ 14 leaf->ways_of_associativity = c->cache.ways; \ 15 leaf->size = c->cache.linesz * c->cache.sets * \ 17 leaf++; \
|
| A D | process.c | 553 int leaf; in unwind_stack_by_address() local 611 leaf = get_frame_info(&info); in unwind_stack_by_address() 612 if (leaf < 0) in unwind_stack_by_address() 618 if (leaf) in unwind_stack_by_address()
|
| /arch/x86/kvm/vmx/ |
| A D | sgx.c | 359 static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf) in encls_leaf_enabled_in_guest() argument 365 if (leaf >= ECREATE && leaf <= ETRACK) in encls_leaf_enabled_in_guest() 368 if (leaf >= EAUG && leaf <= EMODT) in encls_leaf_enabled_in_guest() 383 u32 leaf = (u32)kvm_rax_read(vcpu); in handle_encls() local 388 } else if (!encls_leaf_enabled_in_guest(vcpu, leaf) || in handle_encls() 392 if (leaf == ECREATE) in handle_encls() 394 if (leaf == EINIT) in handle_encls() 396 WARN_ONCE(1, "unexpected exit on ENCLS[%u]", leaf); in handle_encls()
|
| A D | tdx.c | 1489 vcpu->run->tdx.get_tdvmcall_info.leaf = tdx->vp_enter_args.r12; in tdx_get_td_vm_call_info() 2673 if (leaf & TDX_MD_UNREADABLE_LEAF_MASK || in tdx_read_cpuid() 2688 field_id |= ((leaf & 0x80000000) ? 1 : 0) << 16; in tdx_read_cpuid() 2689 field_id |= (leaf & 0x7f) << 9; in tdx_read_cpuid() 2714 out->function = leaf; in tdx_read_cpuid() 2722 if (leaf == 0x80000008) { in tdx_read_cpuid() 3042 int r = 0, i = 0, leaf; in tdx_vcpu_get_cpuid() local 3064 for (leaf = 1; leaf <= level; leaf++) in tdx_vcpu_get_cpuid() 3065 tdx_vcpu_get_cpuid_leaf(vcpu, leaf, &i, &td_cpuid->entries[i]); in tdx_vcpu_get_cpuid() 3074 for (leaf = 0x80000001; leaf <= level; leaf++) in tdx_vcpu_get_cpuid() [all …]
|
| /arch/x86/coco/sev/ |
| A D | vc-shared.c | 415 struct cpuid_leaf leaf; in vc_handle_cpuid_snp() local 418 leaf.fn = regs->ax; in vc_handle_cpuid_snp() 419 leaf.subfn = regs->cx; in vc_handle_cpuid_snp() 420 ret = snp_cpuid(ghcb, ctxt, &leaf); in vc_handle_cpuid_snp() 422 regs->ax = leaf.eax; in vc_handle_cpuid_snp() 423 regs->bx = leaf.ebx; in vc_handle_cpuid_snp() 424 regs->cx = leaf.ecx; in vc_handle_cpuid_snp() 425 regs->dx = leaf.edx; in vc_handle_cpuid_snp()
|
| /arch/loongarch/include/asm/ |
| A D | cacheflush.h | 57 static inline void flush_cache_line(int leaf, unsigned long addr) in flush_cache_line() argument 59 switch (leaf) { in flush_cache_line()
|
| /arch/x86/kvm/ |
| A D | cpuid.h | 86 unsigned int leaf) in cpuid_entry_override() argument 88 u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); in cpuid_entry_override() 90 BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); in cpuid_entry_override() 91 *reg = kvm_cpu_caps[leaf]; in cpuid_entry_override()
|
| A D | cpuid.c | 675 #define kvm_cpu_cap_init(leaf, feature_initializers...) \ argument 677 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); \ 678 const u32 __maybe_unused kvm_cpu_cap_init_in_progress = leaf; \ 687 kvm_cpu_caps[leaf] = kvm_cpu_cap_features; \ 689 if (leaf < NCAPINTS) \ 690 kvm_cpu_caps[leaf] &= kernel_cpu_caps[leaf]; \ 692 kvm_cpu_caps[leaf] |= kvm_cpu_cap_passthrough; \ 693 kvm_cpu_caps[leaf] &= (raw_cpuid_get(cpuid) | \ 695 kvm_cpu_caps[leaf] |= kvm_cpu_cap_emulated; \
|
| /arch/x86/kvm/mmu/ |
| A D | tdp_mmu.c | 1918 int leaf = -1; in __kvm_tdp_mmu_get_walk() local 1921 leaf = iter.level; in __kvm_tdp_mmu_get_walk() 1922 sptes[leaf] = iter.old_spte; in __kvm_tdp_mmu_get_walk() 1925 return leaf; in __kvm_tdp_mmu_get_walk() 1944 int leaf; in kvm_tdp_mmu_gpa_is_mapped() local 1948 leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root)); in kvm_tdp_mmu_gpa_is_mapped() 1950 if (leaf < 0) in kvm_tdp_mmu_gpa_is_mapped() 1953 spte = sptes[leaf]; in kvm_tdp_mmu_gpa_is_mapped() 1954 return is_shadow_present_pte(spte) && is_last_spte(spte, leaf); in kvm_tdp_mmu_gpa_is_mapped()
|
| A D | mmu.c | 4311 int leaf = -1; in get_walk() local 4318 leaf = iterator.level; in get_walk() 4321 sptes[leaf] = spte; in get_walk() 4324 return leaf; in get_walk() 4330 int leaf; in get_sptes_lockless() local 4340 return leaf; in get_sptes_lockless() 4348 int root, leaf, level; in get_mmio_spte() local 4352 if (unlikely(leaf < 0)) { in get_mmio_spte() 4357 *sptep = sptes[leaf]; in get_mmio_spte() 4366 leaf++; in get_mmio_spte() [all …]
|
| /arch/arm64/ |
| A D | Makefile | 86 KBUILD_CFLAGS += -msign-return-address=non-leaf
|
| A D | Kconfig | 1280 then only for entries in the walk cache, since the leaf translation 2023 def_bool $(cc-option,-mbranch-protection=pac-ret+leaf) 2111 def_bool $(cc-option,-mbranch-protection=pac-ret+leaf+bti)
|
| /arch/x86/xen/ |
| A D | p2m.c | 154 static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) in p2m_mid_mfn_init() argument 159 mid[i] = virt_to_mfn(leaf); in p2m_mid_mfn_init()
|
| /arch/powerpc/kvm/ |
| A D | book3s_64_mmu_radix.c | 1380 goto leaf; in debugfs_radix_read() 1392 goto leaf; in debugfs_radix_read() 1402 leaf: in debugfs_radix_read()
|
| /arch/x86/events/intel/ |
| A D | pt.c | 48 [PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l, \ 53 u32 leaf; member 80 u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg]; in intel_pt_validate_cap()
|
| /arch/x86/include/asm/ |
| A D | sev.h | 555 int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf);
|
| /arch/ |
| A D | Kconfig | 1716 accessed bit in non-leaf PMD entries when using them as part of linear
|