1 /*
2 * Intel SMP support routines.
3 *
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 * This code is released under the GNU General Public License version 2 or
8 * later.
9 */
10
11 #include <xen/irq.h>
12 #include <xen/sched.h>
13 #include <xen/delay.h>
14 #include <xen/perfc.h>
15 #include <xen/spinlock.h>
16 #include <asm/current.h>
17 #include <asm/smp.h>
18 #include <asm/mc146818rtc.h>
19 #include <asm/flushtlb.h>
20 #include <asm/hardirq.h>
21 #include <asm/hpet.h>
22 #include <asm/hvm/support.h>
23 #include <mach_apic.h>
24
25 /*
26 * send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
27 * excluding the local CPU. @cpumask may be empty.
28 */
29
send_IPI_mask(const cpumask_t * mask,int vector)30 void send_IPI_mask(const cpumask_t *mask, int vector)
31 {
32 genapic->send_IPI_mask(mask, vector);
33 }
34
send_IPI_self(int vector)35 void send_IPI_self(int vector)
36 {
37 genapic->send_IPI_self(vector);
38 }
39
40 /*
41 * Some notes on x86 processor bugs affecting SMP operation:
42 *
43 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
44 * The Linux implications for SMP are handled as follows:
45 *
46 * Pentium III / [Xeon]
47 * None of the E1AP-E3AP errata are visible to the user.
48 *
49 * E1AP. see PII A1AP
50 * E2AP. see PII A2AP
51 * E3AP. see PII A3AP
52 *
53 * Pentium II / [Xeon]
54 * None of the A1AP-A3AP errata are visible to the user.
55 *
56 * A1AP. see PPro 1AP
57 * A2AP. see PPro 2AP
58 * A3AP. see PPro 7AP
59 *
60 * Pentium Pro
61 * None of 1AP-9AP errata are visible to the normal user,
62 * except occasional delivery of 'spurious interrupt' as trap #15.
63 * This is very rare and a non-problem.
64 *
65 * 1AP. Linux maps APIC as non-cacheable
66 * 2AP. worked around in hardware
67 * 3AP. fixed in C0 and above steppings microcode update.
68 * Linux does not use excessive STARTUP_IPIs.
69 * 4AP. worked around in hardware
70 * 5AP. symmetric IO mode (normal Linux operation) not affected.
71 * 'noapic' mode has vector 0xf filled out properly.
72 * 6AP. 'noapic' mode might be affected - fixed in later steppings
73 * 7AP. We do not assume writes to the LVT deassering IRQs
74 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
75 * 9AP. We do not use mixed mode
76 */
77
78 /*
79 * The following functions deal with sending IPIs between CPUs.
80 */
81
__prepare_ICR(unsigned int shortcut,int vector)82 static inline int __prepare_ICR (unsigned int shortcut, int vector)
83 {
84 return APIC_DM_FIXED | shortcut | vector;
85 }
86
__prepare_ICR2(unsigned int mask)87 static inline int __prepare_ICR2 (unsigned int mask)
88 {
89 return SET_xAPIC_DEST_FIELD(mask);
90 }
91
apic_wait_icr_idle(void)92 void apic_wait_icr_idle(void)
93 {
94 if ( x2apic_enabled )
95 return;
96
97 while ( apic_read( APIC_ICR ) & APIC_ICR_BUSY )
98 cpu_relax();
99 }
100
__default_send_IPI_shortcut(unsigned int shortcut,int vector,unsigned int dest)101 static void __default_send_IPI_shortcut(unsigned int shortcut, int vector,
102 unsigned int dest)
103 {
104 unsigned int cfg;
105
106 /*
107 * Wait for idle.
108 */
109 apic_wait_icr_idle();
110
111 /*
112 * prepare target chip field
113 */
114 cfg = __prepare_ICR(shortcut, vector) | dest;
115 /*
116 * Send the IPI. The write to APIC_ICR fires this off.
117 */
118 apic_write(APIC_ICR, cfg);
119 }
120
send_IPI_self_legacy(uint8_t vector)121 void send_IPI_self_legacy(uint8_t vector)
122 {
123 __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
124 }
125
send_IPI_mask_flat(const cpumask_t * cpumask,int vector)126 void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
127 {
128 unsigned long mask = cpumask_bits(cpumask)[0];
129 unsigned long cfg;
130 unsigned long flags;
131
132 mask &= cpumask_bits(&cpu_online_map)[0];
133 mask &= ~(1UL << smp_processor_id());
134 if ( mask == 0 )
135 return;
136
137 local_irq_save(flags);
138
139 /*
140 * Wait for idle.
141 */
142 apic_wait_icr_idle();
143
144 /*
145 * prepare target chip field
146 */
147 cfg = __prepare_ICR2(mask);
148 apic_write(APIC_ICR2, cfg);
149
150 /*
151 * program the ICR
152 */
153 cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
154
155 /*
156 * Send the IPI. The write to APIC_ICR fires this off.
157 */
158 apic_write(APIC_ICR, cfg);
159
160 local_irq_restore(flags);
161 }
162
send_IPI_mask_phys(const cpumask_t * mask,int vector)163 void send_IPI_mask_phys(const cpumask_t *mask, int vector)
164 {
165 unsigned long cfg, flags;
166 unsigned int query_cpu;
167
168 local_irq_save(flags);
169
170 for_each_cpu ( query_cpu, mask )
171 {
172 if ( !cpu_online(query_cpu) || (query_cpu == smp_processor_id()) )
173 continue;
174
175 /*
176 * Wait for idle.
177 */
178 apic_wait_icr_idle();
179
180 /*
181 * prepare target chip field
182 */
183 cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
184 apic_write(APIC_ICR2, cfg);
185
186 /*
187 * program the ICR
188 */
189 cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
190
191 /*
192 * Send the IPI. The write to APIC_ICR fires this off.
193 */
194 apic_write(APIC_ICR, cfg);
195 }
196
197 local_irq_restore(flags);
198 }
199
200 static DEFINE_SPINLOCK(flush_lock);
201 static cpumask_t flush_cpumask;
202 static const void *flush_va;
203 static unsigned int flush_flags;
204
invalidate_interrupt(struct cpu_user_regs * regs)205 void invalidate_interrupt(struct cpu_user_regs *regs)
206 {
207 unsigned int flags = flush_flags;
208 ack_APIC_irq();
209 perfc_incr(ipis);
210 if ( __sync_local_execstate() )
211 flags &= ~(FLUSH_TLB | FLUSH_TLB_GLOBAL);
212 flush_area_local(flush_va, flags);
213 cpumask_clear_cpu(smp_processor_id(), &flush_cpumask);
214 }
215
flush_area_mask(const cpumask_t * mask,const void * va,unsigned int flags)216 void flush_area_mask(const cpumask_t *mask, const void *va, unsigned int flags)
217 {
218 unsigned int cpu = smp_processor_id();
219
220 ASSERT(local_irq_is_enabled());
221
222 if ( cpumask_test_cpu(cpu, mask) )
223 flags = flush_area_local(va, flags);
224
225 if ( (flags & ~FLUSH_ORDER_MASK) &&
226 !cpumask_subset(mask, cpumask_of(cpu)) )
227 {
228 spin_lock(&flush_lock);
229 cpumask_and(&flush_cpumask, mask, &cpu_online_map);
230 cpumask_clear_cpu(cpu, &flush_cpumask);
231 flush_va = va;
232 flush_flags = flags;
233 send_IPI_mask(&flush_cpumask, INVALIDATE_TLB_VECTOR);
234 while ( !cpumask_empty(&flush_cpumask) )
235 cpu_relax();
236 spin_unlock(&flush_lock);
237 }
238 }
239
240 /* Call with no locks held and interrupts enabled (e.g., softirq context). */
new_tlbflush_clock_period(void)241 void new_tlbflush_clock_period(void)
242 {
243 cpumask_t allbutself;
244
245 /* Flush everyone else. We definitely flushed just before entry. */
246 cpumask_andnot(&allbutself, &cpu_online_map,
247 cpumask_of(smp_processor_id()));
248 flush_mask(&allbutself, FLUSH_TLB);
249
250 /* No need for atomicity: we are the only possible updater. */
251 ASSERT(tlbflush_clock == 0);
252 tlbflush_clock++;
253 }
254
smp_send_event_check_mask(const cpumask_t * mask)255 void smp_send_event_check_mask(const cpumask_t *mask)
256 {
257 send_IPI_mask(mask, EVENT_CHECK_VECTOR);
258 }
259
smp_send_call_function_mask(const cpumask_t * mask)260 void smp_send_call_function_mask(const cpumask_t *mask)
261 {
262 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
263
264 if ( cpumask_test_cpu(smp_processor_id(), mask) )
265 {
266 local_irq_disable();
267 smp_call_function_interrupt();
268 local_irq_enable();
269 }
270 }
271
__stop_this_cpu(void)272 void __stop_this_cpu(void)
273 {
274 ASSERT(!local_irq_is_enabled());
275
276 disable_local_APIC();
277
278 hvm_cpu_down();
279
280 /*
281 * Clear FPU, zapping any pending exceptions. Needed for warm reset with
282 * some BIOSes.
283 */
284 clts();
285 asm volatile ( "fninit" );
286
287 cpumask_clear_cpu(smp_processor_id(), &cpu_online_map);
288 }
289
stop_this_cpu(void * dummy)290 static void stop_this_cpu(void *dummy)
291 {
292 __stop_this_cpu();
293 for ( ; ; )
294 halt();
295 }
296
297 /*
298 * Stop all CPUs and turn off local APICs and the IO-APIC, so other OSs see a
299 * clean IRQ state.
300 */
smp_send_stop(void)301 void smp_send_stop(void)
302 {
303 int timeout = 10;
304
305 local_irq_disable();
306 fixup_irqs(cpumask_of(smp_processor_id()), 0);
307 local_irq_enable();
308
309 smp_call_function(stop_this_cpu, NULL, 0);
310
311 /* Wait 10ms for all other CPUs to go offline. */
312 while ( (num_online_cpus() > 1) && (timeout-- > 0) )
313 mdelay(1);
314
315 local_irq_disable();
316 disable_IO_APIC();
317 hpet_disable();
318 __stop_this_cpu();
319 local_irq_enable();
320 }
321
smp_send_nmi_allbutself(void)322 void smp_send_nmi_allbutself(void)
323 {
324 send_IPI_mask(&cpu_online_map, APIC_DM_NMI);
325 }
326
event_check_interrupt(struct cpu_user_regs * regs)327 void event_check_interrupt(struct cpu_user_regs *regs)
328 {
329 ack_APIC_irq();
330 perfc_incr(ipis);
331 this_cpu(irq_count)++;
332 }
333
call_function_interrupt(struct cpu_user_regs * regs)334 void call_function_interrupt(struct cpu_user_regs *regs)
335 {
336 ack_APIC_irq();
337 perfc_incr(ipis);
338 smp_call_function_interrupt();
339 }
340