1 #include <xen/cpumask.h>
2 #include <xen/cpu.h>
3 #include <xen/event.h>
4 #include <xen/init.h>
5 #include <xen/sched.h>
6 #include <xen/sections.h>
7 #include <xen/stop_machine.h>
8 #include <xen/rcupdate.h>
9 
10 unsigned int __read_mostly nr_cpu_ids = NR_CPUS;
11 #ifndef nr_cpumask_bits
12 unsigned int __read_mostly nr_cpumask_bits
BITS_TO_LONGS(NR_CPUS)13     = BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG;
14 #endif
15 
16 const cpumask_t cpumask_all = {
17     .bits[0 ... (BITS_TO_LONGS(NR_CPUS) - 1)] = ~0UL
18 };
19 
20 /*
21  * cpu_bit_bitmap[] is a special, "compressed" data structure that
22  * represents all NR_CPUS bits binary values of 1<<nr.
23  *
24  * It is used by cpumask_of() to get a constant address to a CPU
25  * mask value that has a single bit set only.
26  */
27 
28 /* cpu_bit_bitmap[0] is empty - so we can back into it */
29 #define MASK_DECLARE_1(x) [(x) + 1][0] = 1UL << (x)
30 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1((x) + 1)
31 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2((x) + 2)
32 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4((x) + 4)
33 
34 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
35 
36     MASK_DECLARE_8(0),  MASK_DECLARE_8(8),
37     MASK_DECLARE_8(16), MASK_DECLARE_8(24),
38 #if BITS_PER_LONG > 32
39     MASK_DECLARE_8(32), MASK_DECLARE_8(40),
40     MASK_DECLARE_8(48), MASK_DECLARE_8(56),
41 #endif
42 };
43 
44 #undef MASK_DECLARE_8
45 #undef MASK_DECLARE_4
46 #undef MASK_DECLARE_2
47 #undef MASK_DECLARE_1
48 
49 static DEFINE_RWLOCK(cpu_add_remove_lock);
50 
get_cpu_maps(void)51 bool get_cpu_maps(void)
52 {
53     return read_trylock(&cpu_add_remove_lock);
54 }
55 
put_cpu_maps(void)56 void put_cpu_maps(void)
57 {
58     read_unlock(&cpu_add_remove_lock);
59 }
60 
cpu_hotplug_begin(void)61 void cpu_hotplug_begin(void)
62 {
63     rcu_barrier();
64     write_lock(&cpu_add_remove_lock);
65 }
66 
cpu_hotplug_done(void)67 void cpu_hotplug_done(void)
68 {
69     write_unlock(&cpu_add_remove_lock);
70 }
71 
cpu_in_hotplug_context(void)72 bool cpu_in_hotplug_context(void)
73 {
74     return rw_is_write_locked_by_me(&cpu_add_remove_lock);
75 }
76 
77 static NOTIFIER_HEAD(cpu_chain);
78 
register_cpu_notifier(struct notifier_block * nb)79 void __init register_cpu_notifier(struct notifier_block *nb)
80 {
81     write_lock(&cpu_add_remove_lock);
82     notifier_chain_register(&cpu_chain, nb);
83     write_unlock(&cpu_add_remove_lock);
84 }
85 
cpu_notifier_call_chain(unsigned int cpu,unsigned long action,struct notifier_block ** nb,bool nofail)86 static int cpu_notifier_call_chain(unsigned int cpu, unsigned long action,
87                                    struct notifier_block **nb, bool nofail)
88 {
89     void *hcpu = (void *)(long)cpu;
90     int notifier_rc = notifier_call_chain(&cpu_chain, action, hcpu, nb);
91     int ret =  notifier_to_errno(notifier_rc);
92 
93     BUG_ON(ret && nofail);
94 
95     return ret;
96 }
97 
_take_cpu_down(void * unused)98 static void cf_check _take_cpu_down(void *unused)
99 {
100     cpu_notifier_call_chain(smp_processor_id(), CPU_DYING, NULL, true);
101     __cpu_disable();
102 }
103 
take_cpu_down(void * arg)104 static int cf_check take_cpu_down(void *arg)
105 {
106     _take_cpu_down(arg);
107     return 0;
108 }
109 
cpu_down(unsigned int cpu)110 int cpu_down(unsigned int cpu)
111 {
112     int err;
113     struct notifier_block *nb = NULL;
114 
115     cpu_hotplug_begin();
116 
117     err = -EINVAL;
118     if ( (cpu >= nr_cpu_ids) || (cpu == 0) )
119         goto out;
120 
121     err = -EEXIST;
122     if ( !cpu_online(cpu) )
123         goto out;
124 
125     err = cpu_notifier_call_chain(cpu, CPU_DOWN_PREPARE, &nb, false);
126     if ( err )
127         goto fail;
128 
129     if ( system_state < SYS_STATE_active || system_state == SYS_STATE_resume )
130         on_selected_cpus(cpumask_of(cpu), _take_cpu_down, NULL, true);
131     else if ( (err = stop_machine_run(take_cpu_down, NULL, cpu)) < 0 )
132         goto fail;
133 
134     __cpu_die(cpu);
135     err = cpu_online(cpu);
136     BUG_ON(err);
137 
138     cpu_notifier_call_chain(cpu, CPU_DEAD, NULL, true);
139 
140     send_global_virq(VIRQ_PCPU_STATE);
141     cpu_hotplug_done();
142     return 0;
143 
144  fail:
145     cpu_notifier_call_chain(cpu, CPU_DOWN_FAILED, &nb, true);
146  out:
147     cpu_hotplug_done();
148     return err;
149 }
150 
cpu_up(unsigned int cpu)151 int cpu_up(unsigned int cpu)
152 {
153     int err;
154     struct notifier_block *nb = NULL;
155 
156     cpu_hotplug_begin();
157 
158     err = -EINVAL;
159     if ( (cpu >= nr_cpu_ids) || !cpu_present(cpu) )
160         goto out;
161 
162     err = -EEXIST;
163     if ( cpu_online(cpu) )
164         goto out;
165 
166     err = cpu_notifier_call_chain(cpu, CPU_UP_PREPARE, &nb, false);
167     if ( err )
168         goto fail;
169 
170     err = __cpu_up(cpu);
171     if ( err < 0 )
172         goto fail;
173 
174     cpu_notifier_call_chain(cpu, CPU_ONLINE, NULL, true);
175 
176     send_global_virq(VIRQ_PCPU_STATE);
177 
178     cpu_hotplug_done();
179     return 0;
180 
181  fail:
182     cpu_notifier_call_chain(cpu, CPU_UP_CANCELED, &nb, true);
183  out:
184     cpu_hotplug_done();
185     return err;
186 }
187 
notify_cpu_starting(unsigned int cpu)188 void notify_cpu_starting(unsigned int cpu)
189 {
190     cpu_notifier_call_chain(cpu, CPU_STARTING, NULL, true);
191 }
192 
193 static cpumask_t frozen_cpus;
194 
disable_nonboot_cpus(void)195 int disable_nonboot_cpus(void)
196 {
197     int cpu, error = 0;
198 
199     BUG_ON(smp_processor_id() != 0);
200 
201     cpumask_clear(&frozen_cpus);
202 
203     printk("Disabling non-boot CPUs ...\n");
204 
205     for_each_online_cpu ( cpu )
206     {
207         if ( cpu == 0 )
208             continue;
209 
210         if ( (error = cpu_down(cpu)) )
211         {
212             printk("Error taking CPU%d down: %d\n", cpu, error);
213             BUG_ON(error == -EBUSY);
214             break;
215         }
216 
217         __cpumask_set_cpu(cpu, &frozen_cpus);
218     }
219 
220     BUG_ON(!error && (num_online_cpus() != 1));
221     return error;
222 }
223 
enable_nonboot_cpus(void)224 void enable_nonboot_cpus(void)
225 {
226     int cpu, error;
227 
228     printk("Enabling non-boot CPUs  ...\n");
229 
230     for_each_present_cpu ( cpu )
231     {
232         if ( park_offline_cpus ? cpu == smp_processor_id()
233                                : !cpumask_test_cpu(cpu, &frozen_cpus) )
234             continue;
235         if ( (error = cpu_up(cpu)) )
236         {
237             printk("Error bringing CPU%d up: %d\n", cpu, error);
238             BUG_ON(error == -EBUSY);
239         }
240         else if ( !__cpumask_test_and_clear_cpu(cpu, &frozen_cpus) &&
241                   (error = cpu_down(cpu)) )
242             printk("Error re-offlining CPU%d: %d\n", cpu, error);
243     }
244 
245     for_each_cpu ( cpu, &frozen_cpus )
246         cpu_notifier_call_chain(cpu, CPU_RESUME_FAILED, NULL, true);
247 
248     cpumask_clear(&frozen_cpus);
249 }
250