1 #include <xen/cpumask.h>
2 #include <xen/cpu.h>
3 #include <xen/event.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/stop_machine.h>
7
8 unsigned int __read_mostly nr_cpu_ids = NR_CPUS;
9 #ifndef nr_cpumask_bits
10 unsigned int __read_mostly nr_cpumask_bits
BITS_TO_LONGS(NR_CPUS)11 = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG;
12 #endif
13
14 /*
15 * cpu_bit_bitmap[] is a special, "compressed" data structure that
16 * represents all NR_CPUS bits binary values of 1<<nr.
17 *
18 * It is used by cpumask_of() to get a constant address to a CPU
19 * mask value that has a single bit set only.
20 */
21
22 /* cpu_bit_bitmap[0] is empty - so we can back into it */
23 #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
24 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
25 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
26 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
27
28 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
29
30 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
31 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
32 #if BITS_PER_LONG > 32
33 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
34 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
35 #endif
36 };
37
38 static DEFINE_SPINLOCK(cpu_add_remove_lock);
39
get_cpu_maps(void)40 bool_t get_cpu_maps(void)
41 {
42 return spin_trylock_recursive(&cpu_add_remove_lock);
43 }
44
put_cpu_maps(void)45 void put_cpu_maps(void)
46 {
47 spin_unlock_recursive(&cpu_add_remove_lock);
48 }
49
cpu_hotplug_begin(void)50 bool_t cpu_hotplug_begin(void)
51 {
52 return get_cpu_maps();
53 }
54
cpu_hotplug_done(void)55 void cpu_hotplug_done(void)
56 {
57 put_cpu_maps();
58 }
59
60 static NOTIFIER_HEAD(cpu_chain);
61
register_cpu_notifier(struct notifier_block * nb)62 void __init register_cpu_notifier(struct notifier_block *nb)
63 {
64 if ( !spin_trylock(&cpu_add_remove_lock) )
65 BUG(); /* Should never fail as we are called only during boot. */
66 notifier_chain_register(&cpu_chain, nb);
67 spin_unlock(&cpu_add_remove_lock);
68 }
69
take_cpu_down(void * unused)70 static int take_cpu_down(void *unused)
71 {
72 void *hcpu = (void *)(long)smp_processor_id();
73 int notifier_rc = notifier_call_chain(&cpu_chain, CPU_DYING, hcpu, NULL);
74 BUG_ON(notifier_rc != NOTIFY_DONE);
75 __cpu_disable();
76 return 0;
77 }
78
cpu_down(unsigned int cpu)79 int cpu_down(unsigned int cpu)
80 {
81 int err, notifier_rc;
82 void *hcpu = (void *)(long)cpu;
83 struct notifier_block *nb = NULL;
84
85 if ( !cpu_hotplug_begin() )
86 return -EBUSY;
87
88 if ( (cpu >= nr_cpu_ids) || (cpu == 0) || !cpu_online(cpu) )
89 {
90 cpu_hotplug_done();
91 return -EINVAL;
92 }
93
94 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, hcpu, &nb);
95 if ( notifier_rc != NOTIFY_DONE )
96 {
97 err = notifier_to_errno(notifier_rc);
98 goto fail;
99 }
100
101 if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
102 goto fail;
103
104 __cpu_die(cpu);
105 BUG_ON(cpu_online(cpu));
106
107 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DEAD, hcpu, NULL);
108 BUG_ON(notifier_rc != NOTIFY_DONE);
109
110 send_global_virq(VIRQ_PCPU_STATE);
111 cpu_hotplug_done();
112 return 0;
113
114 fail:
115 notifier_rc = notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, hcpu, &nb);
116 BUG_ON(notifier_rc != NOTIFY_DONE);
117 cpu_hotplug_done();
118 return err;
119 }
120
cpu_up(unsigned int cpu)121 int cpu_up(unsigned int cpu)
122 {
123 int notifier_rc, err = 0;
124 void *hcpu = (void *)(long)cpu;
125 struct notifier_block *nb = NULL;
126
127 if ( !cpu_hotplug_begin() )
128 return -EBUSY;
129
130 if ( (cpu >= nr_cpu_ids) || cpu_online(cpu) || !cpu_present(cpu) )
131 {
132 cpu_hotplug_done();
133 return -EINVAL;
134 }
135
136 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu, &nb);
137 if ( notifier_rc != NOTIFY_DONE )
138 {
139 err = notifier_to_errno(notifier_rc);
140 goto fail;
141 }
142
143 err = __cpu_up(cpu);
144 if ( err < 0 )
145 goto fail;
146
147 notifier_rc = notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu, NULL);
148 BUG_ON(notifier_rc != NOTIFY_DONE);
149
150 send_global_virq(VIRQ_PCPU_STATE);
151
152 cpu_hotplug_done();
153 return 0;
154
155 fail:
156 notifier_rc = notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu, &nb);
157 BUG_ON(notifier_rc != NOTIFY_DONE);
158 cpu_hotplug_done();
159 return err;
160 }
161
notify_cpu_starting(unsigned int cpu)162 void notify_cpu_starting(unsigned int cpu)
163 {
164 void *hcpu = (void *)(long)cpu;
165 int notifier_rc = notifier_call_chain(
166 &cpu_chain, CPU_STARTING, hcpu, NULL);
167 BUG_ON(notifier_rc != NOTIFY_DONE);
168 }
169
170 static cpumask_t frozen_cpus;
171
disable_nonboot_cpus(void)172 int disable_nonboot_cpus(void)
173 {
174 int cpu, error = 0;
175
176 BUG_ON(smp_processor_id() != 0);
177
178 cpumask_clear(&frozen_cpus);
179
180 printk("Disabling non-boot CPUs ...\n");
181
182 for_each_online_cpu ( cpu )
183 {
184 if ( cpu == 0 )
185 continue;
186
187 if ( (error = cpu_down(cpu)) )
188 {
189 printk("Error taking CPU%d down: %d\n", cpu, error);
190 BUG_ON(error == -EBUSY);
191 break;
192 }
193
194 __cpumask_set_cpu(cpu, &frozen_cpus);
195 }
196
197 BUG_ON(!error && (num_online_cpus() != 1));
198 return error;
199 }
200
enable_nonboot_cpus(void)201 void enable_nonboot_cpus(void)
202 {
203 int cpu, error;
204
205 printk("Enabling non-boot CPUs ...\n");
206
207 for_each_cpu ( cpu, &frozen_cpus )
208 {
209 if ( (error = cpu_up(cpu)) )
210 {
211 printk("Error bringing CPU%d up: %d\n", cpu, error);
212 BUG_ON(error == -EBUSY);
213 }
214 }
215
216 cpumask_clear(&frozen_cpus);
217 }
218