1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 */
5 #ifndef _ASM_POWERPC_HW_IRQ_H
6 #define _ASM_POWERPC_HW_IRQ_H
7
8 #ifdef __KERNEL__
9
10 #include <linux/errno.h>
11 #include <linux/compiler.h>
12 #include <asm/ptrace.h>
13 #include <asm/processor.h>
14
15 #ifdef CONFIG_PPC64
16
17 /*
18 * PACA flags in paca->irq_happened.
19 *
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay.
22 *
23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
24 * always in synch with the MSR[EE] state, except:
25 * - A window in interrupt entry, where hardware disables MSR[EE] and that
26 * must be "reconciled" with the soft mask state.
27 * - NMI interrupts that hit in awkward places, until they fix the state.
28 * - When local irqs are being enabled and state is being fixed up.
29 * - When returning from an interrupt there are some windows where this
30 * can become out of synch, but gets fixed before the RFI or before
31 * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
32 */
33 #define PACA_IRQ_HARD_DIS 0x01
34 #define PACA_IRQ_DBELL 0x02
35 #define PACA_IRQ_EE 0x04
36 #define PACA_IRQ_DEC 0x08 /* Or FIT */
37 #define PACA_IRQ_HMI 0x10
38 #define PACA_IRQ_PMI 0x20
39 #define PACA_IRQ_REPLAYING 0x40
40
41 /*
42 * Some soft-masked interrupts must be hard masked until they are replayed
43 * (e.g., because the soft-masked handler does not clear the exception).
44 * Interrupt replay itself must remain hard masked too.
45 */
46 #ifdef CONFIG_PPC_BOOK3S
47 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING)
48 #else
49 #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING)
50 #endif
51
52 #endif /* CONFIG_PPC64 */
53
54 /*
55 * flags for paca->irq_soft_mask
56 */
57 #define IRQS_ENABLED 0
58 #define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
59 #define IRQS_PMI_DISABLED 2
60 #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
61
62 #ifndef __ASSEMBLY__
63
__hard_irq_enable(void)64 static inline void __hard_irq_enable(void)
65 {
66 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
67 wrtee(MSR_EE);
68 else if (IS_ENABLED(CONFIG_PPC_8xx))
69 wrtspr(SPRN_EIE);
70 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
71 __mtmsrd(MSR_EE | MSR_RI, 1);
72 else
73 mtmsr(mfmsr() | MSR_EE);
74 }
75
__hard_irq_disable(void)76 static inline void __hard_irq_disable(void)
77 {
78 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
79 wrtee(0);
80 else if (IS_ENABLED(CONFIG_PPC_8xx))
81 wrtspr(SPRN_EID);
82 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
83 __mtmsrd(MSR_RI, 1);
84 else
85 mtmsr(mfmsr() & ~MSR_EE);
86 }
87
__hard_EE_RI_disable(void)88 static inline void __hard_EE_RI_disable(void)
89 {
90 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
91 wrtee(0);
92 else if (IS_ENABLED(CONFIG_PPC_8xx))
93 wrtspr(SPRN_NRI);
94 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
95 __mtmsrd(0, 1);
96 else
97 mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
98 }
99
__hard_RI_enable(void)100 static inline void __hard_RI_enable(void)
101 {
102 if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
103 return;
104
105 if (IS_ENABLED(CONFIG_PPC_8xx))
106 wrtspr(SPRN_EID);
107 else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
108 __mtmsrd(MSR_RI, 1);
109 else
110 mtmsr(mfmsr() | MSR_RI);
111 }
112
113 #ifdef CONFIG_PPC64
114 #include <asm/paca.h>
115
irq_soft_mask_return(void)116 static inline notrace unsigned long irq_soft_mask_return(void)
117 {
118 unsigned long flags;
119
120 asm volatile(
121 "lbz %0,%1(13)"
122 : "=r" (flags)
123 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
124
125 return flags;
126 }
127
128 /*
129 * The "memory" clobber acts as both a compiler barrier
130 * for the critical section and as a clobber because
131 * we changed paca->irq_soft_mask
132 */
irq_soft_mask_set(unsigned long mask)133 static inline notrace void irq_soft_mask_set(unsigned long mask)
134 {
135 /*
136 * The irq mask must always include the STD bit if any are set.
137 *
138 * and interrupts don't get replayed until the standard
139 * interrupt (local_irq_disable()) is unmasked.
140 *
141 * Other masks must only provide additional masking beyond
142 * the standard, and they are also not replayed until the
143 * standard interrupt becomes unmasked.
144 *
145 * This could be changed, but it will require partial
146 * unmasks to be replayed, among other things. For now, take
147 * the simple approach.
148 */
149 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
150 WARN_ON(mask && !(mask & IRQS_DISABLED));
151
152 asm volatile(
153 "stb %0,%1(13)"
154 :
155 : "r" (mask),
156 "i" (offsetof(struct paca_struct, irq_soft_mask))
157 : "memory");
158 }
159
irq_soft_mask_set_return(unsigned long mask)160 static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
161 {
162 unsigned long flags = irq_soft_mask_return();
163
164 irq_soft_mask_set(mask);
165
166 return flags;
167 }
168
irq_soft_mask_or_return(unsigned long mask)169 static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
170 {
171 unsigned long flags = irq_soft_mask_return();
172
173 irq_soft_mask_set(flags | mask);
174
175 return flags;
176 }
177
irq_soft_mask_andc_return(unsigned long mask)178 static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
179 {
180 unsigned long flags = irq_soft_mask_return();
181
182 irq_soft_mask_set(flags & ~mask);
183
184 return flags;
185 }
186
arch_local_save_flags(void)187 static inline unsigned long arch_local_save_flags(void)
188 {
189 return irq_soft_mask_return();
190 }
191
arch_local_irq_disable(void)192 static inline void arch_local_irq_disable(void)
193 {
194 irq_soft_mask_set(IRQS_DISABLED);
195 }
196
197 extern void arch_local_irq_restore(unsigned long);
198
arch_local_irq_enable(void)199 static inline void arch_local_irq_enable(void)
200 {
201 arch_local_irq_restore(IRQS_ENABLED);
202 }
203
arch_local_irq_save(void)204 static inline unsigned long arch_local_irq_save(void)
205 {
206 return irq_soft_mask_or_return(IRQS_DISABLED);
207 }
208
arch_irqs_disabled_flags(unsigned long flags)209 static inline bool arch_irqs_disabled_flags(unsigned long flags)
210 {
211 return flags & IRQS_DISABLED;
212 }
213
arch_irqs_disabled(void)214 static inline bool arch_irqs_disabled(void)
215 {
216 return arch_irqs_disabled_flags(arch_local_save_flags());
217 }
218
set_pmi_irq_pending(void)219 static inline void set_pmi_irq_pending(void)
220 {
221 /*
222 * Invoked from PMU callback functions to set PMI bit in the paca.
223 * This has to be called with irq's disabled (via hard_irq_disable()).
224 */
225 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
226 WARN_ON_ONCE(mfmsr() & MSR_EE);
227
228 get_paca()->irq_happened |= PACA_IRQ_PMI;
229 }
230
clear_pmi_irq_pending(void)231 static inline void clear_pmi_irq_pending(void)
232 {
233 /*
234 * Invoked from PMU callback functions to clear the pending PMI bit
235 * in the paca.
236 */
237 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
238 WARN_ON_ONCE(mfmsr() & MSR_EE);
239
240 get_paca()->irq_happened &= ~PACA_IRQ_PMI;
241 }
242
pmi_irq_pending(void)243 static inline bool pmi_irq_pending(void)
244 {
245 /*
246 * Invoked from PMU callback functions to check if there is a pending
247 * PMI bit in the paca.
248 */
249 if (get_paca()->irq_happened & PACA_IRQ_PMI)
250 return true;
251
252 return false;
253 }
254
255 #ifdef CONFIG_PPC_BOOK3S
256 /*
257 * To support disabling and enabling of irq with PMI, set of
258 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
259 * functions are added. These macros are implemented using generic
260 * linux local_irq_* code from include/linux/irqflags.h.
261 */
262 #define raw_local_irq_pmu_save(flags) \
263 do { \
264 typecheck(unsigned long, flags); \
265 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
266 IRQS_PMI_DISABLED); \
267 } while(0)
268
269 #define raw_local_irq_pmu_restore(flags) \
270 do { \
271 typecheck(unsigned long, flags); \
272 arch_local_irq_restore(flags); \
273 } while(0)
274
275 #ifdef CONFIG_TRACE_IRQFLAGS
276 #define powerpc_local_irq_pmu_save(flags) \
277 do { \
278 raw_local_irq_pmu_save(flags); \
279 if (!raw_irqs_disabled_flags(flags)) \
280 trace_hardirqs_off(); \
281 } while(0)
282 #define powerpc_local_irq_pmu_restore(flags) \
283 do { \
284 if (!raw_irqs_disabled_flags(flags)) \
285 trace_hardirqs_on(); \
286 raw_local_irq_pmu_restore(flags); \
287 } while(0)
288 #else
289 #define powerpc_local_irq_pmu_save(flags) \
290 do { \
291 raw_local_irq_pmu_save(flags); \
292 } while(0)
293 #define powerpc_local_irq_pmu_restore(flags) \
294 do { \
295 raw_local_irq_pmu_restore(flags); \
296 } while (0)
297 #endif /* CONFIG_TRACE_IRQFLAGS */
298
299 #endif /* CONFIG_PPC_BOOK3S */
300
301 #define hard_irq_disable() do { \
302 unsigned long flags; \
303 __hard_irq_disable(); \
304 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
305 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
306 if (!arch_irqs_disabled_flags(flags)) { \
307 asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
308 : "r" (current_stack_pointer)); \
309 trace_hardirqs_off(); \
310 } \
311 } while(0)
312
__lazy_irq_pending(u8 irq_happened)313 static inline bool __lazy_irq_pending(u8 irq_happened)
314 {
315 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
316 }
317
318 /*
319 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
320 */
lazy_irq_pending(void)321 static inline bool lazy_irq_pending(void)
322 {
323 return __lazy_irq_pending(get_paca()->irq_happened);
324 }
325
326 /*
327 * Check if a lazy IRQ is pending, with no debugging checks.
328 * Should be called with IRQs hard disabled.
329 * For use in RI disabled code or other constrained situations.
330 */
lazy_irq_pending_nocheck(void)331 static inline bool lazy_irq_pending_nocheck(void)
332 {
333 return __lazy_irq_pending(local_paca->irq_happened);
334 }
335
336 bool power_pmu_wants_prompt_pmi(void);
337
338 /*
339 * This is called by asynchronous interrupts to check whether to
340 * conditionally re-enable hard interrupts after having cleared
341 * the source of the interrupt. They are kept disabled if there
342 * is a different soft-masked interrupt pending that requires hard
343 * masking.
344 */
should_hard_irq_enable(struct pt_regs * regs)345 static inline bool should_hard_irq_enable(struct pt_regs *regs)
346 {
347 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
348 WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
349 WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
350 WARN_ON(mfmsr() & MSR_EE);
351 }
352
353 if (!IS_ENABLED(CONFIG_PERF_EVENTS))
354 return false;
355 /*
356 * If the PMU is not running, there is not much reason to enable
357 * MSR[EE] in irq handlers because any interrupts would just be
358 * soft-masked.
359 *
360 * TODO: Add test for 64e
361 */
362 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
363 if (!power_pmu_wants_prompt_pmi())
364 return false;
365 /*
366 * If PMIs are disabled then IRQs should be disabled as well,
367 * so we shouldn't see this condition, check for it just in
368 * case because we are about to enable PMIs.
369 */
370 if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
371 return false;
372 }
373
374 if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
375 return false;
376
377 return true;
378 }
379
380 /*
381 * Do the hard enabling, only call this if should_hard_irq_enable is true.
382 * This allows PMI interrupts to profile irq handlers.
383 */
do_hard_irq_enable(void)384 static inline void do_hard_irq_enable(void)
385 {
386 /*
387 * Asynch interrupts come in with IRQS_ALL_DISABLED,
388 * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
389 */
390 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
391 irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
392 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
393 __hard_irq_enable();
394 }
395
arch_irq_disabled_regs(struct pt_regs * regs)396 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
397 {
398 return (regs->softe & IRQS_DISABLED);
399 }
400
401 extern bool prep_irq_for_idle(void);
402 extern bool prep_irq_for_idle_irqsoff(void);
403 extern void irq_set_pending_from_srr1(unsigned long srr1);
404
405 #define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
406
407 extern void force_external_irq_replay(void);
408
irq_soft_mask_regs_set_state(struct pt_regs * regs,unsigned long val)409 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
410 {
411 regs->softe = val;
412 }
413 #else /* CONFIG_PPC64 */
414
irq_soft_mask_return(void)415 static inline notrace unsigned long irq_soft_mask_return(void)
416 {
417 return 0;
418 }
419
arch_local_save_flags(void)420 static inline unsigned long arch_local_save_flags(void)
421 {
422 return mfmsr();
423 }
424
arch_local_irq_restore(unsigned long flags)425 static inline void arch_local_irq_restore(unsigned long flags)
426 {
427 if (IS_ENABLED(CONFIG_BOOKE))
428 wrtee(flags);
429 else
430 mtmsr(flags);
431 }
432
arch_local_irq_save(void)433 static inline unsigned long arch_local_irq_save(void)
434 {
435 unsigned long flags = arch_local_save_flags();
436
437 if (IS_ENABLED(CONFIG_BOOKE))
438 wrtee(0);
439 else if (IS_ENABLED(CONFIG_PPC_8xx))
440 wrtspr(SPRN_EID);
441 else
442 mtmsr(flags & ~MSR_EE);
443
444 return flags;
445 }
446
arch_local_irq_disable(void)447 static inline void arch_local_irq_disable(void)
448 {
449 __hard_irq_disable();
450 }
451
arch_local_irq_enable(void)452 static inline void arch_local_irq_enable(void)
453 {
454 __hard_irq_enable();
455 }
456
arch_irqs_disabled_flags(unsigned long flags)457 static inline bool arch_irqs_disabled_flags(unsigned long flags)
458 {
459 return (flags & MSR_EE) == 0;
460 }
461
arch_irqs_disabled(void)462 static inline bool arch_irqs_disabled(void)
463 {
464 return arch_irqs_disabled_flags(arch_local_save_flags());
465 }
466
467 #define hard_irq_disable() arch_local_irq_disable()
468
arch_irq_disabled_regs(struct pt_regs * regs)469 static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
470 {
471 return !(regs->msr & MSR_EE);
472 }
473
should_hard_irq_enable(struct pt_regs * regs)474 static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
475 {
476 return false;
477 }
478
do_hard_irq_enable(void)479 static inline void do_hard_irq_enable(void)
480 {
481 BUILD_BUG();
482 }
483
clear_pmi_irq_pending(void)484 static inline void clear_pmi_irq_pending(void) { }
set_pmi_irq_pending(void)485 static inline void set_pmi_irq_pending(void) { }
pmi_irq_pending(void)486 static inline bool pmi_irq_pending(void) { return false; }
487
irq_soft_mask_regs_set_state(struct pt_regs * regs,unsigned long val)488 static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
489 {
490 }
491 #endif /* CONFIG_PPC64 */
492
mtmsr_isync_irqsafe(unsigned long msr)493 static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
494 {
495 #ifdef CONFIG_PPC64
496 if (arch_irqs_disabled()) {
497 /*
498 * With soft-masking, MSR[EE] can change from 1 to 0
499 * asynchronously when irqs are disabled, and we don't want to
500 * set MSR[EE] back to 1 here if that has happened. A race-free
501 * way to do this is ensure EE is already 0. Another way it
502 * could be done is with a RESTART_TABLE handler, but that's
503 * probably overkill here.
504 */
505 msr &= ~MSR_EE;
506 mtmsr_isync(msr);
507 irq_soft_mask_set(IRQS_ALL_DISABLED);
508 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
509 } else
510 #endif
511 mtmsr_isync(msr);
512
513 return msr;
514 }
515
516
517 #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
518
519 #endif /* __ASSEMBLY__ */
520 #endif /* __KERNEL__ */
521 #endif /* _ASM_POWERPC_HW_IRQ_H */
522