| /arch/x86/kernel/ |
| A D | fred.c | 47 wrmsrq(MSR_IA32_FRED_CONFIG, in cpu_init_fred_exceptions() 53 wrmsrq(MSR_IA32_FRED_STKLVLS, 0); in cpu_init_fred_exceptions() 59 wrmsrq(MSR_IA32_FRED_RSP0, __this_cpu_read(fred_rsp0)); in cpu_init_fred_exceptions() 61 wrmsrq(MSR_IA32_FRED_RSP1, 0); in cpu_init_fred_exceptions() 62 wrmsrq(MSR_IA32_FRED_RSP2, 0); in cpu_init_fred_exceptions() 63 wrmsrq(MSR_IA32_FRED_RSP3, 0); in cpu_init_fred_exceptions() 83 wrmsrq(MSR_IA32_FRED_STKLVLS, in cpu_init_fred_rsps() 90 wrmsrq(MSR_IA32_FRED_RSP1, __this_cpu_ist_top_va(DB)); in cpu_init_fred_rsps() 91 wrmsrq(MSR_IA32_FRED_RSP2, __this_cpu_ist_top_va(NMI)); in cpu_init_fred_rsps() 92 wrmsrq(MSR_IA32_FRED_RSP3, __this_cpu_ist_top_va(DF)); in cpu_init_fred_rsps()
|
| A D | kvm.c | 305 wrmsrq(MSR_KVM_ASYNC_PF_ACK, 1); in DEFINE_IDTENTRY_SYSVEC() 367 wrmsrq(MSR_KVM_ASYNC_PF_EN, pa); in kvm_guest_cpu_init() 380 wrmsrq(MSR_KVM_PV_EOI_EN, pa); in kvm_guest_cpu_init() 392 wrmsrq(MSR_KVM_ASYNC_PF_EN, 0); in kvm_pv_disable_apf() 403 wrmsrq(MSR_KVM_STEAL_TIME, 0); in kvm_disable_steal_time() 455 wrmsrq(MSR_KVM_PV_EOI_EN, 0); in kvm_guest_cpu_offline() 457 wrmsrq(MSR_KVM_MIGRATION_CONTROL, 0); in kvm_guest_cpu_offline() 744 wrmsrq(MSR_KVM_POLL_CONTROL, 0); in kvm_resume() 979 wrmsrq(MSR_KVM_MIGRATION_CONTROL, in kvm_init_platform() 1128 wrmsrq(MSR_KVM_POLL_CONTROL, 0); in kvm_disable_host_haltpoll() [all …]
|
| A D | shstk.c | 176 wrmsrq(MSR_IA32_PL3_SSP, addr + size); in shstk_setup() 177 wrmsrq(MSR_IA32_U_CET, CET_SHSTK_EN); in shstk_setup() 375 wrmsrq(MSR_IA32_PL3_SSP, ssp); in setup_signal_shadow_stack() 399 wrmsrq(MSR_IA32_PL3_SSP, ssp); in restore_signal_shadow_stack() 476 wrmsrq(MSR_IA32_U_CET, msrval); in wrss_control() 495 wrmsrq(MSR_IA32_U_CET, 0); in shstk_disable() 496 wrmsrq(MSR_IA32_PL3_SSP, 0); in shstk_disable()
|
| A D | tsc_sync.c | 74 wrmsrq(MSR_IA32_TSC_ADJUST, adj->adjusted); in tsc_verify_tsc_adjust() 146 wrmsrq(MSR_IA32_TSC_ADJUST, 0); in tsc_sanitize_first_cpu() 233 wrmsrq(MSR_IA32_TSC_ADJUST, ref->adjusted); in tsc_store_and_check_tsc_adjust() 522 wrmsrq(MSR_IA32_TSC_ADJUST, cur->adjusted); in check_tsc_sync_target()
|
| A D | process.c | 345 wrmsrq(MSR_MISC_FEATURES_ENABLES, msrval); in set_cpuid_faulting() 573 wrmsrq(MSR_AMD64_LS_CFG, msr); in amd_set_core_ssb_state() 590 wrmsrq(MSR_AMD64_LS_CFG, msr); in amd_set_core_ssb_state() 600 wrmsrq(MSR_AMD64_LS_CFG, msr); in amd_set_core_ssb_state() 609 wrmsrq(MSR_AMD64_LS_CFG, msr); in amd_set_core_ssb_state() 619 wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); in amd_set_ssb_virt_state() 726 wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); in __switch_to_xtra()
|
| /arch/x86/hyperv/ |
| A D | hv_init.c | 133 wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); in hv_cpu_init() 259 wrmsrq(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64); in hv_cpu_die() 338 wrmsrq(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); in hv_suspend() 470 wrmsrq(HV_X64_MSR_GUEST_OS_ID, guest_id); in hyperv_init() 579 wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); in hyperv_init() 600 wrmsrq(HV_X64_MSR_GUEST_OS_ID, 0); in hyperv_cleanup() 640 wrmsrq(HV_X64_MSR_CRASH_P0, err); in hyperv_report_panic() 641 wrmsrq(HV_X64_MSR_CRASH_P1, guest_id); in hyperv_report_panic() 642 wrmsrq(HV_X64_MSR_CRASH_P2, regs->ip); in hyperv_report_panic() 643 wrmsrq(HV_X64_MSR_CRASH_P3, regs->ax); in hyperv_report_panic() [all …]
|
| A D | hv_apic.c | 53 wrmsrq(HV_X64_MSR_ICR, reg_val); in hv_apic_icr_write() 79 wrmsrq(HV_X64_MSR_EOI, val); in hv_apic_write() 82 wrmsrq(HV_X64_MSR_TPR, val); in hv_apic_write() 96 wrmsrq(HV_X64_MSR_EOI, APIC_EOI_ACK); in hv_apic_eoi_write()
|
| /arch/x86/kernel/cpu/ |
| A D | tsx.c | 41 wrmsrq(MSR_IA32_TSX_CTRL, tsx); in tsx_disable() 60 wrmsrq(MSR_IA32_TSX_CTRL, tsx); in tsx_enable() 121 wrmsrq(MSR_TSX_FORCE_ABORT, msr); in tsx_clear_cpuid() 125 wrmsrq(MSR_IA32_TSX_CTRL, msr); in tsx_clear_cpuid() 154 wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl); in tsx_dev_mode_disable()
|
| A D | common.c | 583 wrmsrq(MSR_IA32_S_CET, msr); in ibt_restore() 609 wrmsrq(MSR_IA32_S_CET, 0); in setup_cet() 615 wrmsrq(MSR_IA32_S_CET, 0); in setup_cet() 626 wrmsrq(MSR_IA32_S_CET, 0); in cet_disable() 627 wrmsrq(MSR_IA32_U_CET, 0); in cet_disable() 1847 wrmsrq(MSR_FS_BASE, 1); in detect_null_seg_behavior() 1850 wrmsrq(MSR_FS_BASE, old_base); in detect_null_seg_behavior() 2196 wrmsrq(MSR_CSTAR, val); in wrmsrq_cstar() 2226 wrmsrq(MSR_SYSCALL_MASK, in idt_syscall_init() 2291 wrmsrq(MSR_TSC_AUX, cpudata); in setup_getcpu() [all …]
|
| A D | bus_lock.c | 149 wrmsrq(MSR_TEST_CTRL, msr_test_ctrl_cache); in __split_lock_setup() 166 wrmsrq(MSR_TEST_CTRL, test_ctrl_val); in sld_update_msr() 315 wrmsrq(MSR_IA32_DEBUGCTLMSR, val); in bus_lock_init()
|
| A D | umwait.c | 36 wrmsrq(MSR_IA32_UMWAIT_CONTROL, READ_ONCE(umwait_control_cached)); in umwait_update_control_msr() 74 wrmsrq(MSR_IA32_UMWAIT_CONTROL, orig_umwait_control_cached); in umwait_cpu_offline()
|
| /arch/x86/events/intel/ |
| A D | uncore_nhmex.c | 209 wrmsrq(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0); in nhmex_uncore_msr_exit_box() 223 wrmsrq(msr, config); in nhmex_uncore_msr_disable_box() 238 wrmsrq(msr, config); in nhmex_uncore_msr_enable_box() 244 wrmsrq(event->hw.config_base, 0); in nhmex_uncore_msr_disable_event() 386 wrmsrq(reg1->reg, reg1->config); in nhmex_bbox_msr_enable_event() 387 wrmsrq(reg1->reg + 1, reg2->config); in nhmex_bbox_msr_enable_event() 471 wrmsrq(reg1->reg, 0); in nhmex_sbox_msr_enable_event() 472 wrmsrq(reg1->reg + 1, reg1->config); in nhmex_sbox_msr_enable_event() 473 wrmsrq(reg1->reg + 2, reg2->config); in nhmex_sbox_msr_enable_event() 854 wrmsrq(reg2->reg, 0); in nhmex_mbox_msr_enable_event() [all …]
|
| A D | uncore_snb.c | 266 wrmsrq(hwc->config_base, SNB_UNC_CTL_EN); in snb_uncore_msr_enable_event() 271 wrmsrq(event->hw.config_base, 0); in snb_uncore_msr_disable_event() 277 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, in snb_uncore_msr_init_box() 284 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, in snb_uncore_msr_enable_box() 291 wrmsrq(SNB_UNC_PERF_GLOBAL_CTL, 0); in snb_uncore_msr_exit_box() 376 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, in skl_uncore_msr_init_box() 387 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, in skl_uncore_msr_enable_box() 394 wrmsrq(SKL_UNC_PERF_GLOBAL_CTL, 0); in skl_uncore_msr_exit_box() 556 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); in adl_uncore_msr_disable_box() 562 wrmsrq(ADL_UNC_PERF_GLOBAL_CTL, 0); in adl_uncore_msr_exit_box() [all …]
|
| A D | knc.c | 165 wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); in knc_pmu_disable_all() 174 wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_CTRL, val); in knc_pmu_enable_all() 211 wrmsrq(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack); in knc_pmu_ack_status()
|
| A D | lbr.c | 140 wrmsrq(MSR_LBR_SELECT, lbr_select); in __intel_pmu_lbr_enable() 158 wrmsrq(MSR_IA32_DEBUGCTLMSR, debugctl); in __intel_pmu_lbr_enable() 169 wrmsrq(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_32() 177 wrmsrq(x86_pmu.lbr_from + i, 0); in intel_pmu_lbr_reset_64() 178 wrmsrq(x86_pmu.lbr_to + i, 0); in intel_pmu_lbr_reset_64() 180 wrmsrq(x86_pmu.lbr_info + i, 0); in intel_pmu_lbr_reset_64() 202 wrmsrq(MSR_LBR_SELECT, 0); in intel_pmu_lbr_reset() 285 wrmsrq(x86_pmu.lbr_from + idx, val); in wrlbr_from() 290 wrmsrq(x86_pmu.lbr_to + idx, val); in wrlbr_to() 295 wrmsrq(x86_pmu.lbr_info + idx, val); in wrlbr_info() [all …]
|
| A D | p6.c | 148 wrmsrq(MSR_P6_EVNTSEL0, val); in p6_pmu_disable_all() 158 wrmsrq(MSR_P6_EVNTSEL0, val); in p6_pmu_enable_all()
|
| /arch/x86/xen/ |
| A D | suspend.c | 43 wrmsrq(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); in xen_vcpu_notify_restore() 61 wrmsrq(MSR_IA32_SPEC_CTRL, 0); in xen_vcpu_notify_suspend()
|
| /arch/x86/power/ |
| A D | cpu.c | 60 wrmsrq(msr->info.msr_no, msr->info.reg.q); in msr_restore_context() 202 wrmsrq(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); in __restore_processor_state() 212 wrmsrq(MSR_EFER, ctxt->efer); in __restore_processor_state() 235 wrmsrq(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __restore_processor_state() 271 wrmsrq(MSR_FS_BASE, ctxt->fs_base); in __restore_processor_state() 272 wrmsrq(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __restore_processor_state()
|
| /arch/x86/kernel/cpu/mce/ |
| A D | inject.c | 479 wrmsrq(MSR_IA32_MCG_STATUS, m.mcgstatus); in prepare_msrs() 483 wrmsrq(MSR_AMD64_SMCA_MCx_DESTAT(b), m.status); in prepare_msrs() 484 wrmsrq(MSR_AMD64_SMCA_MCx_DEADDR(b), m.addr); in prepare_msrs() 486 wrmsrq(MSR_AMD64_SMCA_MCx_STATUS(b), m.status); in prepare_msrs() 487 wrmsrq(MSR_AMD64_SMCA_MCx_ADDR(b), m.addr); in prepare_msrs() 490 wrmsrq(MSR_AMD64_SMCA_MCx_SYND(b), m.synd); in prepare_msrs() 493 wrmsrq(MSR_AMD64_SMCA_MCx_MISC(b), m.misc); in prepare_msrs() 495 wrmsrq(MSR_IA32_MCx_STATUS(b), m.status); in prepare_msrs() 496 wrmsrq(MSR_IA32_MCx_ADDR(b), m.addr); in prepare_msrs() 499 wrmsrq(MSR_IA32_MCx_MISC(b), m.misc); in prepare_msrs()
|
| A D | intel.c | 146 wrmsrq(MSR_IA32_MCx_CTL2(bank), val | thresh); in cmci_set_threshold() 235 wrmsrq(MSR_IA32_MCx_CTL2(bank), val); in cmci_claim_bank() 329 wrmsrq(MSR_IA32_MCx_CTL2(bank), val); in __cmci_disable_bank() 436 wrmsrq(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); in intel_init_lmce() 448 wrmsrq(MSR_IA32_MCG_EXT_CTL, val); in intel_clear_lmce()
|
| /arch/x86/events/amd/ |
| A D | lbr.c | 65 wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2, val); in amd_pmu_lbr_set_from() 70 wrmsrq(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val); in amd_pmu_lbr_set_to() 337 wrmsrq(MSR_AMD64_LBR_SELECT, 0); in amd_pmu_lbr_reset() 400 wrmsrq(MSR_AMD64_LBR_SELECT, lbr_select); in amd_pmu_lbr_enable_all() 405 wrmsrq(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); in amd_pmu_lbr_enable_all() 409 wrmsrq(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); in amd_pmu_lbr_enable_all()
|
| /arch/x86/kernel/cpu/resctrl/ |
| A D | rdtgroup.c | 115 wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config); in resctrl_arch_mon_event_config_write() 122 wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); in l3_qos_cfg_update() 129 wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); in l2_qos_cfg_update()
|
| A D | pseudo_lock.c | 218 wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr); in resctrl_arch_pseudo_lock_fn() 254 wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); in resctrl_arch_measure_cycles_lat_fn() 350 wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits); in measure_residency_fn()
|
| /arch/x86/events/zhaoxin/ |
| A D | core.c | 258 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, 0); in zhaoxin_pmu_disable_all() 263 wrmsrq(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); in zhaoxin_pmu_enable_all() 277 wrmsrq(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); in zhaoxin_pmu_ack_status() 299 wrmsrq(hwc->config_base, ctrl_val); in zhaoxin_pmu_disable_fixed() 336 wrmsrq(hwc->config_base, ctrl_val); in zhaoxin_pmu_enable_fixed()
|
| /arch/x86/include/asm/ |
| A D | fsgsbase.h | 73 wrmsrq(MSR_FS_BASE, fsbase); in x86_fsbase_write_cpu()
|