1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_IDLE_H
3 #define _LINUX_SCHED_IDLE_H
4
5 #include <linux/sched.h>
6
7 enum cpu_idle_type {
8 CPU_IDLE,
9 CPU_NOT_IDLE,
10 CPU_NEWLY_IDLE,
11 CPU_MAX_IDLE_TYPES
12 };
13
14 #ifdef CONFIG_SMP
15 extern void wake_up_if_idle(int cpu);
16 #else
wake_up_if_idle(int cpu)17 static inline void wake_up_if_idle(int cpu) { }
18 #endif
19
20 /*
21 * Idle thread specific functions to determine the need_resched
22 * polling state.
23 */
24 #ifdef TIF_POLLING_NRFLAG
25
26 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
27
__current_set_polling(void)28 static __always_inline void __current_set_polling(void)
29 {
30 arch_set_bit(TIF_POLLING_NRFLAG,
31 (unsigned long *)(¤t_thread_info()->flags));
32 }
33
__current_clr_polling(void)34 static __always_inline void __current_clr_polling(void)
35 {
36 arch_clear_bit(TIF_POLLING_NRFLAG,
37 (unsigned long *)(¤t_thread_info()->flags));
38 }
39
40 #else
41
__current_set_polling(void)42 static __always_inline void __current_set_polling(void)
43 {
44 set_bit(TIF_POLLING_NRFLAG,
45 (unsigned long *)(¤t_thread_info()->flags));
46 }
47
__current_clr_polling(void)48 static __always_inline void __current_clr_polling(void)
49 {
50 clear_bit(TIF_POLLING_NRFLAG,
51 (unsigned long *)(¤t_thread_info()->flags));
52 }
53
54 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
55
current_set_polling_and_test(void)56 static __always_inline bool __must_check current_set_polling_and_test(void)
57 {
58 __current_set_polling();
59
60 /*
61 * Polling state must be visible before we test NEED_RESCHED,
62 * paired by resched_curr()
63 */
64 smp_mb__after_atomic();
65
66 return unlikely(tif_need_resched());
67 }
68
current_clr_polling_and_test(void)69 static __always_inline bool __must_check current_clr_polling_and_test(void)
70 {
71 __current_clr_polling();
72
73 /*
74 * Polling state must be visible before we test NEED_RESCHED,
75 * paired by resched_curr()
76 */
77 smp_mb__after_atomic();
78
79 return unlikely(tif_need_resched());
80 }
81
82 #else
__current_set_polling(void)83 static inline void __current_set_polling(void) { }
__current_clr_polling(void)84 static inline void __current_clr_polling(void) { }
85
current_set_polling_and_test(void)86 static inline bool __must_check current_set_polling_and_test(void)
87 {
88 return unlikely(tif_need_resched());
89 }
current_clr_polling_and_test(void)90 static inline bool __must_check current_clr_polling_and_test(void)
91 {
92 return unlikely(tif_need_resched());
93 }
94 #endif
95
current_clr_polling(void)96 static __always_inline void current_clr_polling(void)
97 {
98 __current_clr_polling();
99
100 /*
101 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
102 * Once the bit is cleared, we'll get IPIs with every new
103 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
104 * fold.
105 */
106 smp_mb(); /* paired with resched_curr() */
107
108 preempt_fold_need_resched();
109 }
110
111 #endif /* _LINUX_SCHED_IDLE_H */
112