1 #include <xen/cpumask.h>
2 #include <xen/cpu.h>
3 #include <xen/event.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/stop_machine.h>
7 #include <xen/rcupdate.h>
8
9 unsigned int __read_mostly nr_cpu_ids = NR_CPUS;
10 #ifndef nr_cpumask_bits
11 unsigned int __read_mostly nr_cpumask_bits
BITS_TO_LONGS(NR_CPUS)12 = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG;
13 #endif
14
15 const cpumask_t cpumask_all = {
16 .bits[0 ... (BITS_TO_LONGS(NR_CPUS) - 1)] = ~0UL
17 };
18
19 /*
20 * cpu_bit_bitmap[] is a special, "compressed" data structure that
21 * represents all NR_CPUS bits binary values of 1<<nr.
22 *
23 * It is used by cpumask_of() to get a constant address to a CPU
24 * mask value that has a single bit set only.
25 */
26
27 /* cpu_bit_bitmap[0] is empty - so we can back into it */
28 #define MASK_DECLARE_1(x) [(x) + 1][0] = 1UL << (x)
29 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1((x) + 1)
30 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2((x) + 2)
31 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4((x) + 4)
32
33 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
34
35 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
36 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
37 #if BITS_PER_LONG > 32
38 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
39 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
40 #endif
41 };
42
43 #undef MASK_DECLARE_8
44 #undef MASK_DECLARE_4
45 #undef MASK_DECLARE_2
46 #undef MASK_DECLARE_1
47
48 static DEFINE_RWLOCK(cpu_add_remove_lock);
49
get_cpu_maps(void)50 bool get_cpu_maps(void)
51 {
52 return read_trylock(&cpu_add_remove_lock);
53 }
54
put_cpu_maps(void)55 void put_cpu_maps(void)
56 {
57 read_unlock(&cpu_add_remove_lock);
58 }
59
cpu_hotplug_begin(void)60 void cpu_hotplug_begin(void)
61 {
62 rcu_barrier();
63 write_lock(&cpu_add_remove_lock);
64 }
65
cpu_hotplug_done(void)66 void cpu_hotplug_done(void)
67 {
68 write_unlock(&cpu_add_remove_lock);
69 }
70
cpu_in_hotplug_context(void)71 bool cpu_in_hotplug_context(void)
72 {
73 return rw_is_write_locked_by_me(&cpu_add_remove_lock);
74 }
75
76 static NOTIFIER_HEAD(cpu_chain);
77
register_cpu_notifier(struct notifier_block * nb)78 void __init register_cpu_notifier(struct notifier_block *nb)
79 {
80 write_lock(&cpu_add_remove_lock);
81 notifier_chain_register(&cpu_chain, nb);
82 write_unlock(&cpu_add_remove_lock);
83 }
84
cpu_notifier_call_chain(unsigned int cpu,unsigned long action,struct notifier_block ** nb,bool nofail)85 static int cpu_notifier_call_chain(unsigned int cpu, unsigned long action,
86 struct notifier_block **nb, bool nofail)
87 {
88 void *hcpu = (void *)(long)cpu;
89 int notifier_rc = notifier_call_chain(&cpu_chain, action, hcpu, nb);
90 int ret = notifier_to_errno(notifier_rc);
91
92 BUG_ON(ret && nofail);
93
94 return ret;
95 }
96
_take_cpu_down(void * unused)97 static void cf_check _take_cpu_down(void *unused)
98 {
99 cpu_notifier_call_chain(smp_processor_id(), CPU_DYING, NULL, true);
100 __cpu_disable();
101 }
102
take_cpu_down(void * arg)103 static int cf_check take_cpu_down(void *arg)
104 {
105 _take_cpu_down(arg);
106 return 0;
107 }
108
cpu_down(unsigned int cpu)109 int cpu_down(unsigned int cpu)
110 {
111 int err;
112 struct notifier_block *nb = NULL;
113
114 cpu_hotplug_begin();
115
116 err = -EINVAL;
117 if ( (cpu >= nr_cpu_ids) || (cpu == 0) )
118 goto out;
119
120 err = -EEXIST;
121 if ( !cpu_online(cpu) )
122 goto out;
123
124 err = cpu_notifier_call_chain(cpu, CPU_DOWN_PREPARE, &nb, false);
125 if ( err )
126 goto fail;
127
128 if ( system_state < SYS_STATE_active || system_state == SYS_STATE_resume )
129 on_selected_cpus(cpumask_of(cpu), _take_cpu_down, NULL, true);
130 else if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
131 goto fail;
132
133 __cpu_die(cpu);
134 err = cpu_online(cpu);
135 BUG_ON(err);
136
137 cpu_notifier_call_chain(cpu, CPU_DEAD, NULL, true);
138
139 send_global_virq(VIRQ_PCPU_STATE);
140 cpu_hotplug_done();
141 return 0;
142
143 fail:
144 cpu_notifier_call_chain(cpu, CPU_DOWN_FAILED, &nb, true);
145 out:
146 cpu_hotplug_done();
147 return err;
148 }
149
cpu_up(unsigned int cpu)150 int cpu_up(unsigned int cpu)
151 {
152 int err;
153 struct notifier_block *nb = NULL;
154
155 cpu_hotplug_begin();
156
157 err = -EINVAL;
158 if ( (cpu >= nr_cpu_ids) || !cpu_present(cpu) )
159 goto out;
160
161 err = -EEXIST;
162 if ( cpu_online(cpu) )
163 goto out;
164
165 err = cpu_notifier_call_chain(cpu, CPU_UP_PREPARE, &nb, false);
166 if ( err )
167 goto fail;
168
169 err = __cpu_up(cpu);
170 if ( err < 0 )
171 goto fail;
172
173 cpu_notifier_call_chain(cpu, CPU_ONLINE, NULL, true);
174
175 send_global_virq(VIRQ_PCPU_STATE);
176
177 cpu_hotplug_done();
178 return 0;
179
180 fail:
181 cpu_notifier_call_chain(cpu, CPU_UP_CANCELED, &nb, true);
182 out:
183 cpu_hotplug_done();
184 return err;
185 }
186
notify_cpu_starting(unsigned int cpu)187 void notify_cpu_starting(unsigned int cpu)
188 {
189 cpu_notifier_call_chain(cpu, CPU_STARTING, NULL, true);
190 }
191
192 static cpumask_t frozen_cpus;
193
disable_nonboot_cpus(void)194 int disable_nonboot_cpus(void)
195 {
196 int cpu, error = 0;
197
198 BUG_ON(smp_processor_id() != 0);
199
200 cpumask_clear(&frozen_cpus);
201
202 printk("Disabling non-boot CPUs ...\n");
203
204 for_each_online_cpu ( cpu )
205 {
206 if ( cpu == 0 )
207 continue;
208
209 if ( (error = cpu_down(cpu)) )
210 {
211 printk("Error taking CPU%d down: %d\n", cpu, error);
212 BUG_ON(error == -EBUSY);
213 break;
214 }
215
216 __cpumask_set_cpu(cpu, &frozen_cpus);
217 }
218
219 BUG_ON(!error && (num_online_cpus() != 1));
220 return error;
221 }
222
enable_nonboot_cpus(void)223 void enable_nonboot_cpus(void)
224 {
225 int cpu, error;
226
227 printk("Enabling non-boot CPUs ...\n");
228
229 for_each_present_cpu ( cpu )
230 {
231 if ( park_offline_cpus ? cpu == smp_processor_id()
232 : !cpumask_test_cpu(cpu, &frozen_cpus) )
233 continue;
234 if ( (error = cpu_up(cpu)) )
235 {
236 printk("Error bringing CPU%d up: %d\n", cpu, error);
237 BUG_ON(error == -EBUSY);
238 }
239 else if ( !__cpumask_test_and_clear_cpu(cpu, &frozen_cpus) &&
240 (error = cpu_down(cpu)) )
241 printk("Error re-offlining CPU%d: %d\n", cpu, error);
242 }
243
244 for_each_cpu ( cpu, &frozen_cpus )
245 cpu_notifier_call_chain(cpu, CPU_RESUME_FAILED, NULL, true);
246
247 cpumask_clear(&frozen_cpus);
248 }
249