1 /*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7 #include <types.h>
8 #include <machine/registerset.h>
9 #include <model/statedata.h>
10 #include <object/structures.h>
11 #include <arch/machine.h>
12 #include <arch/machine/hardware.h>
13 #include <arch/machine/registerset.h>
14 #include <linker.h>
15
16 /* initialises MSRs required to setup sysenter and sysexit */
init_sysenter_msrs(void)17 BOOT_CODE void init_sysenter_msrs(void)
18 {
19 x86_wrmsr(IA32_SYSENTER_CS_MSR, (uint64_t)(word_t)SEL_CS_0);
20 x86_wrmsr(IA32_SYSENTER_EIP_MSR, (uint64_t)(word_t)&handle_syscall);
21 if (config_set(CONFIG_ARCH_IA32) && !config_set(CONFIG_HARDWARE_DEBUG_API)) {
22 /* manually add 4 bytes to x86KStss so that it is valid for both
23 * 32-bit and 64-bit, although only ia32 actually requires a valid
24 * sysenter esp */
25 x86_wrmsr(IA32_SYSENTER_ESP_MSR, (uint64_t)(word_t)((char *)&x86KSGlobalState[CURRENT_CPU_INDEX()].x86KStss.tss.words[0]
26 + 4));
27 }
28 }
29
getRestartPC(tcb_t * thread)30 word_t PURE getRestartPC(tcb_t *thread)
31 {
32 return getRegister(thread, FaultIP);
33 }
34
setNextPC(tcb_t * thread,word_t v)35 void setNextPC(tcb_t *thread, word_t v)
36 {
37 setRegister(thread, NextIP, v);
38 }
39
40 /* Returns the size of CPU's cacheline */
getCacheLineSizeBits(void)41 BOOT_CODE uint32_t CONST getCacheLineSizeBits(void)
42 {
43 uint32_t line_size;
44 uint32_t n;
45
46 line_size = getCacheLineSize();
47 if (line_size == 0) {
48 printf("Cacheline size must be >0\n");
49 return 0;
50 }
51
52 /* determine size_bits */
53 n = 0;
54 while (!(line_size & 1)) {
55 line_size >>= 1;
56 n++;
57 }
58
59 if (line_size != 1) {
60 printf("Cacheline size must be a power of two\n");
61 return 0;
62 }
63
64 return n;
65 }
66
67 /* Flushes a specific memory range from the CPU cache */
68
flushCacheRange(void * vaddr,uint32_t size_bits)69 void flushCacheRange(void *vaddr, uint32_t size_bits)
70 {
71 word_t v;
72
73 assert(size_bits < seL4_WordBits);
74 assert(IS_ALIGNED((word_t)vaddr, size_bits));
75
76 x86_mfence();
77
78 for (v = ROUND_DOWN((word_t)vaddr, x86KScacheLineSizeBits);
79 v < (word_t)vaddr + BIT(size_bits);
80 v += BIT(x86KScacheLineSizeBits)) {
81 flushCacheLine((void *)v);
82 }
83 x86_mfence();
84 }
85
86 /* Disables as many prefetchers as possible */
disablePrefetchers(void)87 BOOT_CODE bool_t disablePrefetchers(void)
88 {
89 x86_cpu_identity_t *model_info;
90 uint32_t low, high;
91 word_t i;
92
93 uint32_t valid_models[] = { BROADWELL_1_MODEL_ID, BROADWELL_2_MODEL_ID,
94 BROADWELL_3_MODEL_ID, BROADWELL_4_MODEL_ID,
95 BROADWELL_5_MODEL_ID,
96 HASWELL_1_MODEL_ID, HASWELL_2_MODEL_ID,
97 HASWELL_3_MODEL_ID, HASWELL_4_MODEL_ID,
98 IVY_BRIDGE_1_MODEL_ID, IVY_BRIDGE_2_MODEL_ID,
99 IVY_BRIDGE_3_MODEL_ID,
100 SANDY_BRIDGE_1_MODEL_ID, SANDY_BRIDGE_2_MODEL_ID, WESTMERE_1_MODEL_ID, WESTMERE_2_MODEL_ID,
101 WESTMERE_3_MODEL_ID, NEHALEM_1_MODEL_ID, NEHALEM_2_MODEL_ID, NEHALEM_3_MODEL_ID,
102 SKYLAKE_1_MODEL_ID, SKYLAKE_2_MODEL_ID
103 };
104
105 model_info = x86_cpuid_get_model_info();
106
107 for (i = 0; i < ARRAY_SIZE(valid_models); i++) {
108 /* The model ID is only useful when hashed together with the family ID.
109 * They are both meant to be combined to form a unique identifier.
110 *
111 * As far as I can tell though, we might be able to actually just
112 * disable prefetching on anything that matches family_ID==0x6, and
113 * there is no need to also look at the model_ID.
114 */
115 if (model_info->family == IA32_PREFETCHER_COMPATIBLE_FAMILIES_ID
116 && model_info->model == valid_models[i]) {
117 low = x86_rdmsr_low(IA32_PREFETCHER_MSR);
118 high = x86_rdmsr_high(IA32_PREFETCHER_MSR);
119
120 low |= IA32_PREFETCHER_MSR_L2;
121 low |= IA32_PREFETCHER_MSR_L2_ADJACENT;
122 low |= IA32_PREFETCHER_MSR_DCU;
123 low |= IA32_PREFETCHER_MSR_DCU_IP;
124
125 x86_wrmsr(IA32_PREFETCHER_MSR, ((uint64_t)high) << 32 | low);
126
127 return true;
128 }
129 }
130
131 printf("Disabling prefetchers not implemented for CPU fam %x model %x\n",
132 model_info->family, model_info->model);
133 return false;
134 }
135
enablePMCUser(void)136 BOOT_CODE void enablePMCUser(void)
137 {
138 write_cr4(read_cr4() | CR4_PCE);
139 }
140
init_ibrs(void)141 BOOT_CODE bool_t init_ibrs(void)
142 {
143 cpuid_007h_edx_t edx;
144 edx.words[0] = x86_cpuid_edx(0x7, 0);
145 bool_t support_ibrs = cpuid_007h_edx_get_ibrs_ibpb(edx);
146 if ((config_set(CONFIG_KERNEL_X86_IBRS_BASIC) || config_set(CONFIG_KERNEL_X86_IBRS_STIBP)) && !support_ibrs) {
147 printf("IBRS not supported by CPU\n");
148 return false;
149 } else if (support_ibrs) {
150 /* 'disable' IBRS. For IBRS_BASIC this does nothing, and for STIBP this will cause
151 * us to enable STIBP, and we can then forget about it */
152 x86_disable_ibrs();
153 }
154 /* IBRS is also the feature flag for IBPB */
155 if (config_set(CONFIG_KERNEL_X86_IBPB_ON_CONTEXT_SWITCH) && !support_ibrs) {
156 printf("IBPB not supported by CPU\n");
157 return false;
158 }
159 /* check for enhanced IBRS */
160 bool_t enhanced_ibrs = false;
161 if (cpuid_007h_edx_get_ia32_arch_cap_msr(edx)) {
162 ia32_arch_capabilities_msr_t cap_msr;
163 cap_msr.words[0] = x86_rdmsr(IA32_ARCH_CAPABILITIES_MSR);
164 if (ia32_arch_capabilities_msr_get_ibrs_all(cap_msr)) {
165 enhanced_ibrs = true;
166 }
167 }
168 if (config_set(CONFIG_KERNEL_X86_IBRS_BASIC) && enhanced_ibrs) {
169 printf("Kernel configured for basic IBRS, but CPU supports enhanced IBRS. "
170 "Enable enhanced IBRS for improved performance\n");
171 }
172 if (config_set(CONFIG_KERNEL_X86_IBRS_ALL)) {
173 if (!enhanced_ibrs) {
174 printf("Enhanced IBRS not supported by CPU\n");
175 return false;
176 }
177 /* enable IBRS and then we can forget about it */
178 x86_enable_ibrs();
179 }
180 return true;
181 }
182