1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <asm/lib/bits.h>
9 #include <asm/msr.h>
10 #include <asm/cpu.h>
11 #include <asm/per_cpu.h>
12 #include <asm/cpu_caps.h>
13 #include <asm/lapic.h>
14 #include <asm/apicreg.h>
15 #include <asm/irq.h>
16 #include <delay.h>
17 
18 /* intr_lapic_icr_delivery_mode */
19 #define INTR_LAPIC_ICR_FIXED           0x0U
20 #define INTR_LAPIC_ICR_LP              0x1U
21 #define INTR_LAPIC_ICR_SMI             0x2U
22 #define INTR_LAPIC_ICR_NMI             0x4U
23 #define INTR_LAPIC_ICR_INIT            0x5U
24 #define INTR_LAPIC_ICR_STARTUP         0x6U
25 
26 /* intr_lapic_icr_dest_mode */
27 #define INTR_LAPIC_ICR_PHYSICAL        0x0U
28 #define INTR_LAPIC_ICR_LOGICAL         0x1U
29 
30 /* intr_lapic_icr_level */
31 #define INTR_LAPIC_ICR_DEASSERT        0x0U
32 #define INTR_LAPIC_ICR_ASSERT          0x1U
33 
34 /* intr_lapic_icr_trigger */
35 #define INTR_LAPIC_ICR_EDGE            0x0U
36 #define INTR_LAPIC_ICR_LEVEL           0x1U
37 
38 /* intr_lapic_icr_shorthand */
39 #define INTR_LAPIC_ICR_USE_DEST_ARRAY  0x0U
40 #define INTR_LAPIC_ICR_SELF            0x1U
41 #define INTR_LAPIC_ICR_ALL_INC_SELF    0x2U
42 #define INTR_LAPIC_ICR_ALL_EX_SELF     0x3U
43 
44 union lapic_base_msr {
45 	uint64_t value;
46 	struct {
47 		uint32_t rsvd_1:8;
48 		uint32_t bsp:1;
49 		uint32_t rsvd_2:1;
50 		uint32_t x2APIC_enable:1;
51 		uint32_t xAPIC_enable:1;
52 		uint32_t lapic_paddr:24;
53 		uint32_t rsvd_3:28;
54 	} fields;
55 };
56 
57 static struct lapic_regs saved_lapic_regs;
58 static union lapic_base_msr saved_lapic_base_msr;
59 
clear_lapic_isr(void)60 static void clear_lapic_isr(void)
61 {
62 	uint32_t i;
63 	uint32_t isr_reg;
64 
65 	/* This is a Intel recommended procedure and assures that the processor
66 	 * does not get hung up due to already set "in-service" interrupts left
67 	 * over from the boot loader environment. This actually occurs in real
68 	 * life, therefore we will ensure all the in-service bits are clear.
69 	 */
70 	for (isr_reg = MSR_IA32_EXT_APIC_ISR7; isr_reg >= MSR_IA32_EXT_APIC_ISR0; isr_reg--) {
71 		for (i = 0U; i < 32U; i++) {
72 			if (msr_read(isr_reg) != 0U) {
73 				msr_write(MSR_IA32_EXT_APIC_EOI, 0U);
74 			} else {
75 				break;
76 			}
77 		}
78 	}
79 }
80 
early_init_lapic(void)81 void early_init_lapic(void)
82 {
83 	union lapic_base_msr base;
84 
85 	/* Get local APIC base address */
86 	base.value = msr_read(MSR_IA32_APIC_BASE);
87 
88 	/* Enable LAPIC in x2APIC mode*/
89 	/* The following sequence of msr writes to enable x2APIC
90 	 * will work irrespective of the state of LAPIC
91 	 * left by BIOS
92 	 */
93 	/* Step1: Enable LAPIC in xAPIC mode */
94 	base.fields.xAPIC_enable = 1U;
95 	msr_write(MSR_IA32_APIC_BASE, base.value);
96 	/* Step2: Enable LAPIC in x2APIC mode */
97 	base.fields.x2APIC_enable = 1U;
98 	msr_write(MSR_IA32_APIC_BASE, base.value);
99 
100 	/* Set the mask bits for all the LVT entries by disabling a local APIC software. */
101 	msr_write(MSR_IA32_EXT_APIC_SIVR, 0UL);
102 
103 	/* Enable Local APIC */
104 	/* TODO: add spurious-interrupt handler */
105 	msr_write(MSR_IA32_EXT_APIC_SIVR, APIC_SVR_ENABLE | APIC_SVR_VECTOR);
106 
107 	/* Ensure there are no ISR bits set. */
108 	clear_lapic_isr();
109 }
110 
init_lapic(uint16_t pcpu_id)111 void init_lapic(uint16_t pcpu_id)
112 {
113 	/* Can not put this to early_init_lapic because logical ID is not
114 	 * updated yet.
115 	 */
116 	per_cpu(lapic_ldr, pcpu_id) = (uint32_t) msr_read(MSR_IA32_EXT_APIC_LDR);
117 }
118 
save_lapic(struct lapic_regs * regs)119 static void save_lapic(struct lapic_regs *regs)
120 {
121 	regs->tpr.v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TPR);
122 	regs->ppr.v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_PPR);
123 	regs->tmr[0].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR0);
124 	regs->tmr[1].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR1);
125 	regs->tmr[2].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR2);
126 	regs->tmr[3].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR3);
127 	regs->tmr[4].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR4);
128 	regs->tmr[5].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR5);
129 	regs->tmr[6].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR6);
130 	regs->tmr[7].v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_TMR7);
131 	regs->svr.v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_SIVR);
132 	regs->lvt[APIC_LVT_TIMER].v =
133 		(uint32_t) msr_read(MSR_IA32_EXT_APIC_LVT_TIMER);
134 	regs->lvt[APIC_LVT_LINT0].v =
135 		(uint32_t) msr_read(MSR_IA32_EXT_APIC_LVT_LINT0);
136 	regs->lvt[APIC_LVT_LINT1].v =
137 		(uint32_t) msr_read(MSR_IA32_EXT_APIC_LVT_LINT1);
138 	regs->lvt[APIC_LVT_ERROR].v =
139 		(uint32_t) msr_read(MSR_IA32_EXT_APIC_LVT_ERROR);
140 	regs->icr_timer.v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_INIT_COUNT);
141 	regs->ccr_timer.v = (uint32_t) msr_read(MSR_IA32_EXT_APIC_CUR_COUNT);
142 	regs->dcr_timer.v =
143 		(uint32_t) msr_read(MSR_IA32_EXT_APIC_DIV_CONF);
144 }
145 
restore_lapic(const struct lapic_regs * regs)146 static void restore_lapic(const struct lapic_regs *regs)
147 {
148 	msr_write(MSR_IA32_EXT_APIC_TPR, (uint64_t) regs->tpr.v);
149 	msr_write(MSR_IA32_EXT_APIC_SIVR, (uint64_t) regs->svr.v);
150 	msr_write(MSR_IA32_EXT_APIC_LVT_TIMER,
151 			(uint64_t) regs->lvt[APIC_LVT_TIMER].v);
152 
153 	msr_write(MSR_IA32_EXT_APIC_LVT_LINT0,
154 			(uint64_t) regs->lvt[APIC_LVT_LINT0].v);
155 	msr_write(MSR_IA32_EXT_APIC_LVT_LINT1,
156 			(uint64_t) regs->lvt[APIC_LVT_LINT1].v);
157 
158 	msr_write(MSR_IA32_EXT_APIC_LVT_ERROR,
159 			(uint64_t) regs->lvt[APIC_LVT_ERROR].v);
160 	msr_write(MSR_IA32_EXT_APIC_INIT_COUNT, (uint64_t) regs->icr_timer.v);
161 	msr_write(MSR_IA32_EXT_APIC_DIV_CONF, (uint64_t) regs->dcr_timer.v);
162 }
163 
suspend_lapic(void)164 void suspend_lapic(void)
165 {
166 	uint64_t val;
167 
168 	saved_lapic_base_msr.value = msr_read(MSR_IA32_APIC_BASE);
169 	save_lapic(&saved_lapic_regs);
170 
171 	/* disable APIC with software flag */
172 	val = msr_read(MSR_IA32_EXT_APIC_SIVR);
173 	val = (~(uint64_t)APIC_SVR_ENABLE) & val;
174 	msr_write(MSR_IA32_EXT_APIC_SIVR, val);
175 }
176 
resume_lapic(void)177 void resume_lapic(void)
178 {
179 	msr_write(MSR_IA32_APIC_BASE, saved_lapic_base_msr.value);
180 
181 	/* ACPI software flag will be restored also */
182 	restore_lapic(&saved_lapic_regs);
183 }
184 
send_lapic_eoi(void)185 void send_lapic_eoi(void)
186 {
187 	msr_write(MSR_IA32_EXT_APIC_EOI, 0U);
188 }
189 
get_cur_lapic_id(void)190 uint32_t get_cur_lapic_id(void)
191 {
192 	uint32_t lapic_id;
193 
194 	lapic_id = (uint32_t) msr_read(MSR_IA32_EXT_XAPICID);
195 
196 	return lapic_id;
197 }
198 
199 void
send_startup_ipi(uint16_t dest_pcpu_id,uint64_t cpu_startup_start_address)200 send_startup_ipi(uint16_t dest_pcpu_id, uint64_t cpu_startup_start_address)
201 {
202 	union apic_icr icr;
203 	struct cpuinfo_x86 *cpu_info = get_pcpu_info();
204 
205 	icr.value = 0U;
206 	icr.value_32.hi_32 = per_cpu(lapic_id, dest_pcpu_id);
207 
208 	/* Assert INIT IPI */
209 	icr.bits.destination_mode = INTR_LAPIC_ICR_PHYSICAL;
210 	icr.bits.shorthand = INTR_LAPIC_ICR_USE_DEST_ARRAY;
211 	icr.bits.delivery_mode = INTR_LAPIC_ICR_INIT;
212 	msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
213 
214 	/* Give 10ms for INIT sequence to complete for old processors.
215 	 * BWG states that a delay cannot be avoided between the INIT IPI
216 	 * and first Startup IPI, so on Modern processors (family == 6)
217 	 * setting a delay value of 10us.
218 	 */
219 	if (cpu_info->displayfamily != 6U) {
220 		/* delay 10ms */
221 		udelay(10000U);
222 	} else {
223 		udelay(10U); /* 10us is enough for Modern processors */
224 	}
225 
226 	/* Send Start IPI with page number of secondary reset code */
227 	icr.value_32.lo_32 = 0U;
228 	icr.bits.shorthand = INTR_LAPIC_ICR_USE_DEST_ARRAY;
229 	icr.bits.delivery_mode = INTR_LAPIC_ICR_STARTUP;
230 	icr.bits.vector = (uint8_t)(cpu_startup_start_address >> 12U);
231 	msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
232 
233 	if (cpu_info->displayfamily == 6U) {
234 		udelay(10U); /* 10us is enough for Modern processors */
235 	} else {
236 		udelay(200U); /* 200us for old processors */
237 	}
238 
239 	/* Send another start IPI as per the Intel Arch specification */
240 	msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
241 }
242 
send_dest_ipi_mask(uint32_t dest_mask,uint32_t vector)243 void send_dest_ipi_mask(uint32_t dest_mask, uint32_t vector)
244 {
245 	uint16_t pcpu_id;
246 	uint32_t mask = dest_mask;
247 
248 	pcpu_id = ffs64(mask);
249 	while (pcpu_id < MAX_PCPU_NUM) {
250 		bitmap32_clear_nolock(pcpu_id, &mask);
251 		send_single_ipi(pcpu_id, vector);
252 		pcpu_id = ffs64(mask);
253 	}
254 }
255 
send_single_ipi(uint16_t pcpu_id,uint32_t vector)256 void send_single_ipi(uint16_t pcpu_id, uint32_t vector)
257 {
258 	union apic_icr icr;
259 
260 	if (is_pcpu_active(pcpu_id)) {
261 		if (get_pcpu_id() == pcpu_id) {
262 			msr_write(MSR_IA32_EXT_APIC_SELF_IPI, vector);
263 		} else {
264 			/* Set the destination field to the target processor. */
265 			icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);
266 
267 			/* Write the vector ID to ICR. */
268 			icr.value_32.lo_32 = vector | (INTR_LAPIC_ICR_PHYSICAL << 11U);
269 
270 			msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
271 		}
272 	} else {
273 		pr_err("pcpu_id %d not in active!", pcpu_id);
274 	}
275 }
276 
277 /**
278  * @pre pcpu_id < MAX_PCPU_NUM
279  */
send_single_init(uint16_t pcpu_id)280 void send_single_init(uint16_t pcpu_id)
281 {
282 	union apic_icr icr;
283 
284 	/*
285 	 * Intel SDM Vol3 23.8:
286 	 *   The INIT signal is blocked whenever a logical processor is in VMX root operation.
287 	 *   It is not blocked in VMX non-root operation. Instead, INITs cause VM exits
288 	 */
289 
290 	icr.value_32.hi_32 = per_cpu(lapic_id, pcpu_id);
291 	icr.value_32.lo_32 = (INTR_LAPIC_ICR_PHYSICAL << 11U) | (INTR_LAPIC_ICR_INIT << 8U);
292 
293 	msr_write(MSR_IA32_EXT_APIC_ICR, icr.value);
294 
295 }
296 
kick_pcpu(uint16_t pcpu_id)297 void kick_pcpu(uint16_t pcpu_id)
298 {
299 	if (per_cpu(mode_to_kick_pcpu, pcpu_id) == DEL_MODE_INIT) {
300 		send_single_init(pcpu_id);
301 	} else {
302 		send_single_ipi(pcpu_id, NOTIFY_VCPU_VECTOR);
303 	}
304 }
305