1 #ifndef __XEN_CPUMASK_H
2 #define __XEN_CPUMASK_H
3
4 /*
5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number.
7 *
8 * See detailed comments in the file xen/bitmap.h describing the
9 * data type on which these cpumasks are based.
10 *
11 * For details of cpumask_scnprintf() and cpulist_scnprintf(),
12 * see bitmap_scnprintf() and bitmap_scnlistprintf() in lib/bitmap.c.
13 *
14 * The available cpumask operations are:
15 *
16 * void cpumask_set_cpu(cpu, mask) turn on bit 'cpu' in mask
17 * void cpumask_clear_cpu(cpu, mask) turn off bit 'cpu' in mask
18 * void cpumask_setall(mask) set all bits
19 * void cpumask_clear(mask) clear all bits
20 * int cpumask_test_cpu(cpu, mask) true iff bit 'cpu' set in mask
21 * int cpumask_test_and_set_cpu(cpu, mask) test and set bit 'cpu' in mask
22 * int cpumask_test_and_clear_cpu(cpu, mask) test and clear bit 'cpu' in mask
23 *
24 * void cpumask_and(dst, src1, src2) dst = src1 & src2 [intersection]
25 * void cpumask_or(dst, src1, src2) dst = src1 | src2 [union]
26 * void cpumask_xor(dst, src1, src2) dst = src1 ^ src2
27 * void cpumask_andnot(dst, src1, src2) dst = src1 & ~src2
28 * void cpumask_complement(dst, src) dst = ~src
29 *
30 * int cpumask_equal(mask1, mask2) Does mask1 == mask2?
31 * int cpumask_intersects(mask1, mask2) Do mask1 and mask2 intersect?
32 * int cpumask_subset(mask1, mask2) Is mask1 a subset of mask2?
33 * int cpumask_empty(mask) Is mask empty (no bits sets)?
34 * int cpumask_full(mask) Is mask full (all bits sets)?
35 * int cpumask_weight(mask) Hamming weigh - number of set bits
36 *
37 * void cpumask_shift_right(dst, src, n) Shift right
38 * void cpumask_shift_left(dst, src, n) Shift left
39 *
40 * int cpumask_first(mask) Number lowest set bit, or NR_CPUS
41 * int cpumask_next(cpu, mask) Next cpu past 'cpu', or NR_CPUS
42 * int cpumask_last(mask) Number highest set bit, or NR_CPUS
43 * int cpumask_any(mask) Any cpu in mask, or NR_CPUS
44 * int cpumask_cycle(cpu, mask) Next cpu cycling from 'cpu', or NR_CPUS
45 *
46 * const cpumask_t *cpumask_of(cpu) Return cpumask with bit 'cpu' set
47 * unsigned long *cpumask_bits(mask) Array of unsigned long's in mask
48 *
49 * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing
50 * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
51 *
52 * for_each_cpu(cpu, mask) for-loop cpu over mask
53 *
54 * int num_online_cpus() Number of online CPUs
55 * int num_possible_cpus() Number of all possible CPUs
56 * int num_present_cpus() Number of present CPUs
57 *
58 * int cpu_online(cpu) Is some cpu online?
59 * int cpu_possible(cpu) Is some cpu possible?
60 * int cpu_present(cpu) Is some cpu present (can schedule)?
61 *
62 * int any_online_cpu(mask) First online cpu in mask, or NR_CPUS
63 *
64 * for_each_possible_cpu(cpu) for-loop cpu over cpu_possible_map
65 * for_each_online_cpu(cpu) for-loop cpu over cpu_online_map
66 * for_each_present_cpu(cpu) for-loop cpu over cpu_present_map
67 *
68 * Subtlety:
69 * 1) The 'type-checked' form of cpumask_test_cpu() causes gcc (3.3.2, anyway)
70 * to generate slightly worse code. Note for example the additional
71 * 40 lines of assembly code compiling the "for each possible cpu"
72 * loops buried in the disk_stat_read() macros calls when compiling
73 * drivers/block/genhd.c (arch i386, CONFIG_SMP=y). So use a simple
74 * one-line #define for cpumask_test_cpu(), instead of wrapping an inline
75 * inside a macro, the way we do the other calls.
76 */
77
78 #include <xen/bitmap.h>
79 #include <xen/kernel.h>
80 #include <xen/random.h>
81
82 typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
83
84 extern unsigned int nr_cpu_ids;
85
86 #if NR_CPUS > 4 * BITS_PER_LONG
87 /* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
88 * not all bits may be allocated. */
89 extern unsigned int nr_cpumask_bits;
90 #else
91 # define nr_cpumask_bits (BITS_TO_LONGS(NR_CPUS) * BITS_PER_LONG)
92 #endif
93
94 /* verify cpu argument to cpumask_* operators */
cpumask_check(unsigned int cpu)95 static inline unsigned int cpumask_check(unsigned int cpu)
96 {
97 ASSERT(cpu < nr_cpu_ids);
98 return cpu;
99 }
100
cpumask_set_cpu(int cpu,volatile cpumask_t * dstp)101 static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
102 {
103 set_bit(cpumask_check(cpu), dstp->bits);
104 }
105
__cpumask_set_cpu(int cpu,cpumask_t * dstp)106 static inline void __cpumask_set_cpu(int cpu, cpumask_t *dstp)
107 {
108 __set_bit(cpumask_check(cpu), dstp->bits);
109 }
110
cpumask_clear_cpu(int cpu,volatile cpumask_t * dstp)111 static inline void cpumask_clear_cpu(int cpu, volatile cpumask_t *dstp)
112 {
113 clear_bit(cpumask_check(cpu), dstp->bits);
114 }
115
__cpumask_clear_cpu(int cpu,cpumask_t * dstp)116 static inline void __cpumask_clear_cpu(int cpu, cpumask_t *dstp)
117 {
118 __clear_bit(cpumask_check(cpu), dstp->bits);
119 }
120
cpumask_setall(cpumask_t * dstp)121 static inline void cpumask_setall(cpumask_t *dstp)
122 {
123 bitmap_fill(dstp->bits, nr_cpumask_bits);
124 }
125
cpumask_clear(cpumask_t * dstp)126 static inline void cpumask_clear(cpumask_t *dstp)
127 {
128 bitmap_zero(dstp->bits, nr_cpumask_bits);
129 }
130
131 /* No static inline type checking - see Subtlety (1) above. */
132 #define cpumask_test_cpu(cpu, cpumask) \
133 test_bit(cpumask_check(cpu), (cpumask)->bits)
134
cpumask_test_and_set_cpu(int cpu,volatile cpumask_t * addr)135 static inline int cpumask_test_and_set_cpu(int cpu, volatile cpumask_t *addr)
136 {
137 return test_and_set_bit(cpumask_check(cpu), addr->bits);
138 }
139
__cpumask_test_and_set_cpu(int cpu,cpumask_t * addr)140 static inline int __cpumask_test_and_set_cpu(int cpu, cpumask_t *addr)
141 {
142 return __test_and_set_bit(cpumask_check(cpu), addr->bits);
143 }
144
cpumask_test_and_clear_cpu(int cpu,volatile cpumask_t * addr)145 static inline int cpumask_test_and_clear_cpu(int cpu, volatile cpumask_t *addr)
146 {
147 return test_and_clear_bit(cpumask_check(cpu), addr->bits);
148 }
149
__cpumask_test_and_clear_cpu(int cpu,cpumask_t * addr)150 static inline int __cpumask_test_and_clear_cpu(int cpu, cpumask_t *addr)
151 {
152 return __test_and_clear_bit(cpumask_check(cpu), addr->bits);
153 }
154
cpumask_and(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)155 static inline void cpumask_and(cpumask_t *dstp, const cpumask_t *src1p,
156 const cpumask_t *src2p)
157 {
158 bitmap_and(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
159 }
160
cpumask_or(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)161 static inline void cpumask_or(cpumask_t *dstp, const cpumask_t *src1p,
162 const cpumask_t *src2p)
163 {
164 bitmap_or(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
165 }
166
cpumask_xor(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)167 static inline void cpumask_xor(cpumask_t *dstp, const cpumask_t *src1p,
168 const cpumask_t *src2p)
169 {
170 bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
171 }
172
cpumask_andnot(cpumask_t * dstp,const cpumask_t * src1p,const cpumask_t * src2p)173 static inline void cpumask_andnot(cpumask_t *dstp, const cpumask_t *src1p,
174 const cpumask_t *src2p)
175 {
176 bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nr_cpumask_bits);
177 }
178
cpumask_complement(cpumask_t * dstp,const cpumask_t * srcp)179 static inline void cpumask_complement(cpumask_t *dstp, const cpumask_t *srcp)
180 {
181 bitmap_complement(dstp->bits, srcp->bits, nr_cpumask_bits);
182 }
183
cpumask_equal(const cpumask_t * src1p,const cpumask_t * src2p)184 static inline int cpumask_equal(const cpumask_t *src1p,
185 const cpumask_t *src2p)
186 {
187 return bitmap_equal(src1p->bits, src2p->bits, nr_cpu_ids);
188 }
189
cpumask_intersects(const cpumask_t * src1p,const cpumask_t * src2p)190 static inline int cpumask_intersects(const cpumask_t *src1p,
191 const cpumask_t *src2p)
192 {
193 return bitmap_intersects(src1p->bits, src2p->bits, nr_cpu_ids);
194 }
195
cpumask_subset(const cpumask_t * src1p,const cpumask_t * src2p)196 static inline int cpumask_subset(const cpumask_t *src1p,
197 const cpumask_t *src2p)
198 {
199 return bitmap_subset(src1p->bits, src2p->bits, nr_cpu_ids);
200 }
201
cpumask_empty(const cpumask_t * srcp)202 static inline int cpumask_empty(const cpumask_t *srcp)
203 {
204 return bitmap_empty(srcp->bits, nr_cpu_ids);
205 }
206
cpumask_full(const cpumask_t * srcp)207 static inline int cpumask_full(const cpumask_t *srcp)
208 {
209 return bitmap_full(srcp->bits, nr_cpu_ids);
210 }
211
cpumask_weight(const cpumask_t * srcp)212 static inline int cpumask_weight(const cpumask_t *srcp)
213 {
214 return bitmap_weight(srcp->bits, nr_cpu_ids);
215 }
216
cpumask_copy(cpumask_t * dstp,const cpumask_t * srcp)217 static inline void cpumask_copy(cpumask_t *dstp, const cpumask_t *srcp)
218 {
219 bitmap_copy(dstp->bits, srcp->bits, nr_cpumask_bits);
220 }
221
cpumask_shift_right(cpumask_t * dstp,const cpumask_t * srcp,int n)222 static inline void cpumask_shift_right(cpumask_t *dstp,
223 const cpumask_t *srcp, int n)
224 {
225 bitmap_shift_right(dstp->bits, srcp->bits, n, nr_cpumask_bits);
226 }
227
cpumask_shift_left(cpumask_t * dstp,const cpumask_t * srcp,int n)228 static inline void cpumask_shift_left(cpumask_t *dstp,
229 const cpumask_t *srcp, int n)
230 {
231 bitmap_shift_left(dstp->bits, srcp->bits, n, nr_cpumask_bits);
232 }
233
cpumask_first(const cpumask_t * srcp)234 static inline int cpumask_first(const cpumask_t *srcp)
235 {
236 return min_t(int, nr_cpu_ids, find_first_bit(srcp->bits, nr_cpu_ids));
237 }
238
cpumask_next(int n,const cpumask_t * srcp)239 static inline int cpumask_next(int n, const cpumask_t *srcp)
240 {
241 /* -1 is a legal arg here. */
242 if (n != -1)
243 cpumask_check(n);
244
245 return min_t(int, nr_cpu_ids,
246 find_next_bit(srcp->bits, nr_cpu_ids, n + 1));
247 }
248
cpumask_last(const cpumask_t * srcp)249 static inline int cpumask_last(const cpumask_t *srcp)
250 {
251 int cpu, pcpu = nr_cpu_ids;
252
253 for (cpu = cpumask_first(srcp);
254 cpu < nr_cpu_ids;
255 cpu = cpumask_next(cpu, srcp))
256 pcpu = cpu;
257 return pcpu;
258 }
259
cpumask_cycle(int n,const cpumask_t * srcp)260 static inline int cpumask_cycle(int n, const cpumask_t *srcp)
261 {
262 int nxt = cpumask_next(n, srcp);
263
264 if (nxt == nr_cpu_ids)
265 nxt = cpumask_first(srcp);
266 return nxt;
267 }
268
cpumask_test_or_cycle(int n,const cpumask_t * srcp)269 static inline int cpumask_test_or_cycle(int n, const cpumask_t *srcp)
270 {
271 if ( cpumask_test_cpu(n, srcp) )
272 return n;
273
274 return cpumask_cycle(n, srcp);
275 }
276
cpumask_any(const cpumask_t * srcp)277 static inline unsigned int cpumask_any(const cpumask_t *srcp)
278 {
279 unsigned int cpu = cpumask_first(srcp);
280 unsigned int w = cpumask_weight(srcp);
281
282 if ( w > 1 && cpu < nr_cpu_ids )
283 for ( w = get_random() % w; w--; )
284 {
285 unsigned int next = cpumask_next(cpu, srcp);
286
287 if ( next >= nr_cpu_ids )
288 break;
289 cpu = next;
290 }
291
292 return cpu;
293 }
294
295 /*
296 * Special-case data structure for "single bit set only" constant CPU masks.
297 *
298 * We pre-generate all the 64 (or 32) possible bit positions, with enough
299 * padding to the left and the right, and return the constant pointer
300 * appropriately offset.
301 */
302 extern const unsigned long
303 cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)];
304
cpumask_of(unsigned int cpu)305 static inline const cpumask_t *cpumask_of(unsigned int cpu)
306 {
307 const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG];
308 return (const cpumask_t *)(p - cpu / BITS_PER_LONG);
309 }
310
311 #define cpumask_bits(maskp) ((maskp)->bits)
312
cpumask_scnprintf(char * buf,int len,const cpumask_t * srcp)313 static inline int cpumask_scnprintf(char *buf, int len,
314 const cpumask_t *srcp)
315 {
316 return bitmap_scnprintf(buf, len, srcp->bits, nr_cpu_ids);
317 }
318
cpulist_scnprintf(char * buf,int len,const cpumask_t * srcp)319 static inline int cpulist_scnprintf(char *buf, int len,
320 const cpumask_t *srcp)
321 {
322 return bitmap_scnlistprintf(buf, len, srcp->bits, nr_cpu_ids);
323 }
324
325 /*
326 * cpumask_var_t: struct cpumask for stack usage.
327 *
328 * Oh, the wicked games we play! In order to make kernel coding a
329 * little more difficult, we typedef cpumask_var_t to an array or a
330 * pointer: doing &mask on an array is a noop, so it still works.
331 *
332 * ie.
333 * cpumask_var_t tmpmask;
334 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
335 * return -ENOMEM;
336 *
337 * ... use 'tmpmask' like a normal struct cpumask * ...
338 *
339 * free_cpumask_var(tmpmask);
340 */
341 #if NR_CPUS > 2 * BITS_PER_LONG
342 #include <xen/xmalloc.h>
343
344 typedef cpumask_t *cpumask_var_t;
345
alloc_cpumask_var(cpumask_var_t * mask)346 static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
347 {
348 *(void **)mask = _xmalloc(nr_cpumask_bits / 8, sizeof(long));
349 return *mask != NULL;
350 }
351
zalloc_cpumask_var(cpumask_var_t * mask)352 static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
353 {
354 *(void **)mask = _xzalloc(nr_cpumask_bits / 8, sizeof(long));
355 return *mask != NULL;
356 }
357
free_cpumask_var(cpumask_var_t mask)358 static inline void free_cpumask_var(cpumask_var_t mask)
359 {
360 xfree(mask);
361 }
362 #else
363 typedef cpumask_t cpumask_var_t[1];
364
alloc_cpumask_var(cpumask_var_t * mask)365 static inline bool_t alloc_cpumask_var(cpumask_var_t *mask)
366 {
367 return 1;
368 }
369
zalloc_cpumask_var(cpumask_var_t * mask)370 static inline bool_t zalloc_cpumask_var(cpumask_var_t *mask)
371 {
372 cpumask_clear(*mask);
373 return 1;
374 }
375
free_cpumask_var(cpumask_var_t mask)376 static inline void free_cpumask_var(cpumask_var_t mask)
377 {
378 }
379 #endif
380
381 #if NR_CPUS > 1
382 #define for_each_cpu(cpu, mask) \
383 for ((cpu) = cpumask_first(mask); \
384 (cpu) < nr_cpu_ids; \
385 (cpu) = cpumask_next(cpu, mask))
386 #else /* NR_CPUS == 1 */
387 #define for_each_cpu(cpu, mask) \
388 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)(mask))
389 #endif /* NR_CPUS */
390
391 /*
392 * The following particular system cpumasks and operations manage
393 * possible, present and online cpus. Each of them is a fixed size
394 * bitmap of size NR_CPUS.
395 *
396 * #ifdef CONFIG_HOTPLUG_CPU
397 * cpu_possible_map - has bit 'cpu' set iff cpu is populatable
398 * cpu_present_map - has bit 'cpu' set iff cpu is populated
399 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
400 * #else
401 * cpu_possible_map - has bit 'cpu' set iff cpu is populated
402 * cpu_present_map - copy of cpu_possible_map
403 * cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
404 * #endif
405 *
406 * In either case, NR_CPUS is fixed at compile time, as the static
407 * size of these bitmaps. The cpu_possible_map is fixed at boot
408 * time, as the set of CPU id's that it is possible might ever
409 * be plugged in at anytime during the life of that system boot.
410 * The cpu_present_map is dynamic(*), representing which CPUs
411 * are currently plugged in. And cpu_online_map is the dynamic
412 * subset of cpu_present_map, indicating those CPUs available
413 * for scheduling.
414 *
415 * If HOTPLUG is enabled, then cpu_possible_map is forced to have
416 * all NR_CPUS bits set, otherwise it is just the set of CPUs that
417 * ACPI reports present at boot.
418 *
419 * If HOTPLUG is enabled, then cpu_present_map varies dynamically,
420 * depending on what ACPI reports as currently plugged in, otherwise
421 * cpu_present_map is just a copy of cpu_possible_map.
422 *
423 * (*) Well, cpu_present_map is dynamic in the hotplug case. If not
424 * hotplug, it's a copy of cpu_possible_map, hence fixed at boot.
425 *
426 * Subtleties:
427 * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
428 * assumption that their single CPU is online. The UP
429 * cpu_{online,possible,present}_maps are placebos. Changing them
430 * will have no useful affect on the following num_*_cpus()
431 * and cpu_*() macros in the UP case. This ugliness is a UP
432 * optimization - don't waste any instructions or memory references
433 * asking if you're online or how many CPUs there are if there is
434 * only one CPU.
435 * 2) Most SMP arch's #define some of these maps to be some
436 * other map specific to that arch. Therefore, the following
437 * must be #define macros, not inlines. To see why, examine
438 * the assembly code produced by the following. Note that
439 * set1() writes phys_x_map, but set2() writes x_map:
440 * int x_map, phys_x_map;
441 * #define set1(a) x_map = a
442 * inline void set2(int a) { x_map = a; }
443 * #define x_map phys_x_map
444 * main(){ set1(3); set2(5); }
445 */
446
447 extern cpumask_t cpu_possible_map;
448 extern cpumask_t cpu_online_map;
449 extern cpumask_t cpu_present_map;
450
451 #if NR_CPUS > 1
452 #define num_online_cpus() cpumask_weight(&cpu_online_map)
453 #define num_possible_cpus() cpumask_weight(&cpu_possible_map)
454 #define num_present_cpus() cpumask_weight(&cpu_present_map)
455 #define cpu_online(cpu) cpumask_test_cpu(cpu, &cpu_online_map)
456 #define cpu_possible(cpu) cpumask_test_cpu(cpu, &cpu_possible_map)
457 #define cpu_present(cpu) cpumask_test_cpu(cpu, &cpu_present_map)
458 #else
459 #define num_online_cpus() 1
460 #define num_possible_cpus() 1
461 #define num_present_cpus() 1
462 #define cpu_online(cpu) ((cpu) == 0)
463 #define cpu_possible(cpu) ((cpu) == 0)
464 #define cpu_present(cpu) ((cpu) == 0)
465 #endif
466
467 #define for_each_possible_cpu(cpu) for_each_cpu(cpu, &cpu_possible_map)
468 #define for_each_online_cpu(cpu) for_each_cpu(cpu, &cpu_online_map)
469 #define for_each_present_cpu(cpu) for_each_cpu(cpu, &cpu_present_map)
470
471 /* Copy to/from cpumap provided by control tools. */
472 struct xenctl_bitmap;
473 int cpumask_to_xenctl_bitmap(struct xenctl_bitmap *, const cpumask_t *);
474 int xenctl_bitmap_to_cpumask(cpumask_var_t *, const struct xenctl_bitmap *);
475
476 #endif /* __XEN_CPUMASK_H */
477