1 /******************************************************************************
2  * cpupool.c
3  *
4  * Generic cpupool-handling functions.
5  *
6  * Cpupools are a feature to have configurable scheduling domains. Each
7  * cpupool runs an own scheduler on a dedicated set of physical cpus.
8  * A domain is bound to one cpupool at any time, but it can be moved to
9  * another cpupool.
10  *
11  * (C) 2009, Juergen Gross, Fujitsu Technology Solutions
12  */
13 
14 #include <xen/lib.h>
15 #include <xen/init.h>
16 #include <xen/cpumask.h>
17 #include <xen/percpu.h>
18 #include <xen/sched.h>
19 #include <xen/sched-if.h>
20 #include <xen/keyhandler.h>
21 #include <xen/cpu.h>
22 
23 #define for_each_cpupool(ptr)    \
24     for ((ptr) = &cpupool_list; *(ptr) != NULL; (ptr) = &((*(ptr))->next))
25 
26 struct cpupool *cpupool0;                /* Initial cpupool with Dom0 */
27 cpumask_t cpupool_free_cpus;             /* cpus not in any cpupool */
28 
29 static struct cpupool *cpupool_list;     /* linked list, sorted by poolid */
30 
31 static int cpupool_moving_cpu = -1;
32 static struct cpupool *cpupool_cpu_moving = NULL;
33 static cpumask_t cpupool_locked_cpus;
34 
35 static DEFINE_SPINLOCK(cpupool_lock);
36 
37 DEFINE_PER_CPU(struct cpupool *, cpupool);
38 
39 #define cpupool_dprintk(x...) ((void)0)
40 
alloc_cpupool_struct(void)41 static struct cpupool *alloc_cpupool_struct(void)
42 {
43     struct cpupool *c = xzalloc(struct cpupool);
44 
45     if ( !c || !zalloc_cpumask_var(&c->cpu_valid) )
46     {
47         xfree(c);
48         c = NULL;
49     }
50     else if ( !zalloc_cpumask_var(&c->cpu_suspended) )
51     {
52         free_cpumask_var(c->cpu_valid);
53         xfree(c);
54         c = NULL;
55     }
56 
57     return c;
58 }
59 
free_cpupool_struct(struct cpupool * c)60 static void free_cpupool_struct(struct cpupool *c)
61 {
62     if ( c )
63     {
64         free_cpumask_var(c->cpu_suspended);
65         free_cpumask_var(c->cpu_valid);
66     }
67     xfree(c);
68 }
69 
70 /*
71  * find a cpupool by it's id. to be called with cpupool lock held
72  * if exact is not specified, the first cpupool with an id larger or equal to
73  * the searched id is returned
74  * returns NULL if not found.
75  */
__cpupool_find_by_id(int id,int exact)76 static struct cpupool *__cpupool_find_by_id(int id, int exact)
77 {
78     struct cpupool **q;
79 
80     ASSERT(spin_is_locked(&cpupool_lock));
81 
82     for_each_cpupool(q)
83         if ( (*q)->cpupool_id >= id )
84             break;
85 
86     return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL;
87 }
88 
cpupool_find_by_id(int poolid)89 static struct cpupool *cpupool_find_by_id(int poolid)
90 {
91     return __cpupool_find_by_id(poolid, 1);
92 }
93 
__cpupool_get_by_id(int poolid,int exact)94 static struct cpupool *__cpupool_get_by_id(int poolid, int exact)
95 {
96     struct cpupool *c;
97     spin_lock(&cpupool_lock);
98     c = __cpupool_find_by_id(poolid, exact);
99     if ( c != NULL )
100         atomic_inc(&c->refcnt);
101     spin_unlock(&cpupool_lock);
102     return c;
103 }
104 
cpupool_get_by_id(int poolid)105 struct cpupool *cpupool_get_by_id(int poolid)
106 {
107     return __cpupool_get_by_id(poolid, 1);
108 }
109 
cpupool_get_next_by_id(int poolid)110 static struct cpupool *cpupool_get_next_by_id(int poolid)
111 {
112     return __cpupool_get_by_id(poolid, 0);
113 }
114 
cpupool_put(struct cpupool * pool)115 void cpupool_put(struct cpupool *pool)
116 {
117     if ( !atomic_dec_and_test(&pool->refcnt) )
118         return;
119     scheduler_free(pool->sched);
120     free_cpupool_struct(pool);
121 }
122 
123 /*
124  * create a new cpupool with specified poolid and scheduler
125  * returns pointer to new cpupool structure if okay, NULL else
126  * possible failures:
127  * - no memory
128  * - poolid already used
129  * - unknown scheduler
130  */
cpupool_create(int poolid,unsigned int sched_id,int * perr)131 static struct cpupool *cpupool_create(
132     int poolid, unsigned int sched_id, int *perr)
133 {
134     struct cpupool *c;
135     struct cpupool **q;
136     int last = 0;
137 
138     *perr = -ENOMEM;
139     if ( (c = alloc_cpupool_struct()) == NULL )
140         return NULL;
141 
142     /* One reference for caller, one reference for cpupool_destroy(). */
143     atomic_set(&c->refcnt, 2);
144 
145     cpupool_dprintk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
146 
147     spin_lock(&cpupool_lock);
148 
149     for_each_cpupool(q)
150     {
151         last = (*q)->cpupool_id;
152         if ( (poolid != CPUPOOLID_NONE) && (last >= poolid) )
153             break;
154     }
155     if ( *q != NULL )
156     {
157         if ( (*q)->cpupool_id == poolid )
158         {
159             spin_unlock(&cpupool_lock);
160             free_cpupool_struct(c);
161             *perr = -EEXIST;
162             return NULL;
163         }
164         c->next = *q;
165     }
166 
167     c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
168     if ( poolid == 0 )
169     {
170         c->sched = scheduler_get_default();
171     }
172     else
173     {
174         c->sched = scheduler_alloc(sched_id, perr);
175         if ( c->sched == NULL )
176         {
177             spin_unlock(&cpupool_lock);
178             free_cpupool_struct(c);
179             return NULL;
180         }
181     }
182 
183     *q = c;
184 
185     spin_unlock(&cpupool_lock);
186 
187     cpupool_dprintk("Created cpupool %d with scheduler %s (%s)\n",
188                     c->cpupool_id, c->sched->name, c->sched->opt_name);
189 
190     *perr = 0;
191     return c;
192 }
193 /*
194  * destroys the given cpupool
195  * returns 0 on success, 1 else
196  * possible failures:
197  * - pool still in use
198  * - cpus still assigned to pool
199  * - pool not in list
200  */
cpupool_destroy(struct cpupool * c)201 static int cpupool_destroy(struct cpupool *c)
202 {
203     struct cpupool **q;
204 
205     spin_lock(&cpupool_lock);
206     for_each_cpupool(q)
207         if ( *q == c )
208             break;
209     if ( *q != c )
210     {
211         spin_unlock(&cpupool_lock);
212         return -ENOENT;
213     }
214     if ( (c->n_dom != 0) || cpumask_weight(c->cpu_valid) )
215     {
216         spin_unlock(&cpupool_lock);
217         return -EBUSY;
218     }
219     *q = c->next;
220     spin_unlock(&cpupool_lock);
221 
222     cpupool_put(c);
223 
224     cpupool_dprintk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
225     return 0;
226 }
227 
228 /*
229  * Move domain to another cpupool
230  */
cpupool_move_domain_locked(struct domain * d,struct cpupool * c)231 static int cpupool_move_domain_locked(struct domain *d, struct cpupool *c)
232 {
233     int ret;
234 
235     if ( unlikely(d->cpupool == c) )
236         return 0;
237 
238     d->cpupool->n_dom--;
239     ret = sched_move_domain(d, c);
240     if ( ret )
241         d->cpupool->n_dom++;
242     else
243         c->n_dom++;
244 
245     return ret;
246 }
cpupool_move_domain(struct domain * d,struct cpupool * c)247 int cpupool_move_domain(struct domain *d, struct cpupool *c)
248 {
249     int ret;
250 
251     spin_lock(&cpupool_lock);
252 
253     ret = cpupool_move_domain_locked(d, c);
254 
255     spin_unlock(&cpupool_lock);
256 
257     return ret;
258 }
259 
260 /*
261  * assign a specific cpu to a cpupool
262  * cpupool_lock must be held
263  */
cpupool_assign_cpu_locked(struct cpupool * c,unsigned int cpu)264 static int cpupool_assign_cpu_locked(struct cpupool *c, unsigned int cpu)
265 {
266     int ret;
267     struct domain *d;
268 
269     if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) )
270         return -EADDRNOTAVAIL;
271     ret = schedule_cpu_switch(cpu, c);
272     if ( ret )
273         return ret;
274 
275     cpumask_clear_cpu(cpu, &cpupool_free_cpus);
276     if (cpupool_moving_cpu == cpu)
277     {
278         cpupool_moving_cpu = -1;
279         cpupool_put(cpupool_cpu_moving);
280         cpupool_cpu_moving = NULL;
281     }
282     cpumask_set_cpu(cpu, c->cpu_valid);
283 
284     rcu_read_lock(&domlist_read_lock);
285     for_each_domain_in_cpupool(d, c)
286     {
287         domain_update_node_affinity(d);
288     }
289     rcu_read_unlock(&domlist_read_lock);
290 
291     return 0;
292 }
293 
cpupool_unassign_cpu_helper(void * info)294 static long cpupool_unassign_cpu_helper(void *info)
295 {
296     int cpu = cpupool_moving_cpu;
297     struct cpupool *c = info;
298     struct domain *d;
299     long ret;
300 
301     cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
302                     cpupool_cpu_moving->cpupool_id, cpu);
303 
304     spin_lock(&cpupool_lock);
305     if ( c != cpupool_cpu_moving )
306     {
307         ret = -EADDRNOTAVAIL;
308         goto out;
309     }
310 
311     /*
312      * We need this for scanning the domain list, both in
313      * cpu_disable_scheduler(), and at the bottom of this function.
314      */
315     rcu_read_lock(&domlist_read_lock);
316     ret = cpu_disable_scheduler(cpu);
317     cpumask_set_cpu(cpu, &cpupool_free_cpus);
318 
319     /*
320      * cpu_disable_scheduler() returning an error doesn't require resetting
321      * cpupool_free_cpus' cpu bit. All error cases should be of temporary
322      * nature and tools will retry the operation. Even if the number of
323      * retries may be limited, the in-between state can easily be repaired
324      * by adding the cpu to the cpupool again.
325      */
326     if ( !ret )
327     {
328         ret = schedule_cpu_switch(cpu, NULL);
329         if ( ret )
330             cpumask_clear_cpu(cpu, &cpupool_free_cpus);
331         else
332         {
333             cpupool_moving_cpu = -1;
334             cpupool_put(cpupool_cpu_moving);
335             cpupool_cpu_moving = NULL;
336         }
337     }
338 
339     for_each_domain_in_cpupool(d, c)
340     {
341         domain_update_node_affinity(d);
342     }
343     rcu_read_unlock(&domlist_read_lock);
344 out:
345     spin_unlock(&cpupool_lock);
346     cpupool_dprintk("cpupool_unassign_cpu ret=%ld\n", ret);
347     return ret;
348 }
349 
350 /*
351  * unassign a specific cpu from a cpupool
352  * we must be sure not to run on the cpu to be unassigned! to achieve this
353  * the main functionality is performed via continue_hypercall_on_cpu on a
354  * specific cpu.
355  * if the cpu to be removed is the last one of the cpupool no active domain
356  * must be bound to the cpupool. dying domains are moved to cpupool0 as they
357  * might be zombies.
358  * possible failures:
359  * - last cpu and still active domains in cpupool
360  * - cpu just being unplugged
361  */
cpupool_unassign_cpu(struct cpupool * c,unsigned int cpu)362 static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
363 {
364     int work_cpu;
365     int ret;
366     struct domain *d;
367 
368     cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
369                     c->cpupool_id, cpu);
370 
371     spin_lock(&cpupool_lock);
372     ret = -EADDRNOTAVAIL;
373     if ( (cpupool_moving_cpu != -1) && (cpu != cpupool_moving_cpu) )
374         goto out;
375     if ( cpumask_test_cpu(cpu, &cpupool_locked_cpus) )
376         goto out;
377 
378     ret = 0;
379     if ( !cpumask_test_cpu(cpu, c->cpu_valid) && (cpu != cpupool_moving_cpu) )
380         goto out;
381 
382     if ( (c->n_dom > 0) && (cpumask_weight(c->cpu_valid) == 1) &&
383          (cpu != cpupool_moving_cpu) )
384     {
385         rcu_read_lock(&domlist_read_lock);
386         for_each_domain_in_cpupool(d, c)
387         {
388             if ( !d->is_dying )
389             {
390                 ret = -EBUSY;
391                 break;
392             }
393             ret = cpupool_move_domain_locked(d, cpupool0);
394             if ( ret )
395                 break;
396         }
397         rcu_read_unlock(&domlist_read_lock);
398         if ( ret )
399             goto out;
400     }
401     cpupool_moving_cpu = cpu;
402     atomic_inc(&c->refcnt);
403     cpupool_cpu_moving = c;
404     cpumask_clear_cpu(cpu, c->cpu_valid);
405     spin_unlock(&cpupool_lock);
406 
407     work_cpu = smp_processor_id();
408     if ( work_cpu == cpu )
409     {
410         work_cpu = cpumask_first(cpupool0->cpu_valid);
411         if ( work_cpu == cpu )
412             work_cpu = cpumask_next(cpu, cpupool0->cpu_valid);
413     }
414     return continue_hypercall_on_cpu(work_cpu, cpupool_unassign_cpu_helper, c);
415 
416 out:
417     spin_unlock(&cpupool_lock);
418     cpupool_dprintk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
419                     c->cpupool_id, cpu, ret);
420     return ret;
421 }
422 
423 /*
424  * add a new domain to a cpupool
425  * possible failures:
426  * - pool does not exist
427  * - no cpu assigned to pool
428  */
cpupool_add_domain(struct domain * d,int poolid)429 int cpupool_add_domain(struct domain *d, int poolid)
430 {
431     struct cpupool *c;
432     int rc;
433     int n_dom = 0;
434 
435     if ( poolid == CPUPOOLID_NONE )
436         return 0;
437     spin_lock(&cpupool_lock);
438     c = cpupool_find_by_id(poolid);
439     if ( c == NULL )
440         rc = -ESRCH;
441     else if ( !cpumask_weight(c->cpu_valid) )
442         rc = -ENODEV;
443     else
444     {
445         c->n_dom++;
446         n_dom = c->n_dom;
447         d->cpupool = c;
448         rc = 0;
449     }
450     spin_unlock(&cpupool_lock);
451     cpupool_dprintk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
452                     d->domain_id, poolid, n_dom, rc);
453     return rc;
454 }
455 
456 /*
457  * remove a domain from a cpupool
458  */
cpupool_rm_domain(struct domain * d)459 void cpupool_rm_domain(struct domain *d)
460 {
461     int cpupool_id;
462     int n_dom;
463 
464     if ( d->cpupool == NULL )
465         return;
466     spin_lock(&cpupool_lock);
467     cpupool_id = d->cpupool->cpupool_id;
468     d->cpupool->n_dom--;
469     n_dom = d->cpupool->n_dom;
470     d->cpupool = NULL;
471     spin_unlock(&cpupool_lock);
472     cpupool_dprintk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
473                     d->domain_id, cpupool_id, n_dom);
474     return;
475 }
476 
477 /*
478  * Called to add a cpu to a pool. CPUs being hot-plugged are added to pool0,
479  * as they must have been in there when unplugged.
480  *
481  * If, on the other hand, we are adding CPUs because we are resuming (e.g.,
482  * after ACPI S3) we put the cpu back in the pool where it was in prior when
483  * we suspended.
484  */
cpupool_cpu_add(unsigned int cpu)485 static int cpupool_cpu_add(unsigned int cpu)
486 {
487     int ret = 0;
488 
489     spin_lock(&cpupool_lock);
490     cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
491     cpumask_set_cpu(cpu, &cpupool_free_cpus);
492 
493     if ( system_state == SYS_STATE_resume )
494     {
495         struct cpupool **c;
496 
497         for_each_cpupool(c)
498         {
499             if ( cpumask_test_cpu(cpu, (*c)->cpu_suspended ) )
500             {
501                 ret = cpupool_assign_cpu_locked(*c, cpu);
502                 if ( ret )
503                     goto out;
504                 cpumask_clear_cpu(cpu, (*c)->cpu_suspended);
505                 break;
506             }
507         }
508 
509         /*
510          * Either cpu has been found as suspended in a pool, and added back
511          * there, or it stayed free (if it did not belong to any pool when
512          * suspending), and we don't want to do anything.
513          */
514         ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
515                cpumask_test_cpu(cpu, (*c)->cpu_valid));
516     }
517     else
518     {
519         /*
520          * If we are not resuming, we are hot-plugging cpu, and in which case
521          * we add it to pool0, as it certainly was there when hot-unplagged
522          * (or unplugging would have failed) and that is the default behavior
523          * anyway.
524          */
525         ret = cpupool_assign_cpu_locked(cpupool0, cpu);
526     }
527  out:
528     spin_unlock(&cpupool_lock);
529 
530     return ret;
531 }
532 
533 /*
534  * Called to remove a CPU from a pool. The CPU is locked, to forbid removing
535  * it from pool0. In fact, if we want to hot-unplug a CPU, it must belong to
536  * pool0, or we fail.
537  *
538  * However, if we are suspending (e.g., to ACPI S3), we mark the CPU in such
539  * a way that it can be put back in its pool when resuming.
540  */
cpupool_cpu_remove(unsigned int cpu)541 static int cpupool_cpu_remove(unsigned int cpu)
542 {
543     int ret = -ENODEV;
544 
545     spin_lock(&cpupool_lock);
546     if ( system_state == SYS_STATE_suspend )
547     {
548         struct cpupool **c;
549 
550         for_each_cpupool(c)
551         {
552             if ( cpumask_test_cpu(cpu, (*c)->cpu_valid ) )
553             {
554                 cpumask_set_cpu(cpu, (*c)->cpu_suspended);
555                 cpumask_clear_cpu(cpu, (*c)->cpu_valid);
556                 break;
557             }
558         }
559 
560         /*
561          * Either we found cpu in a pool, or it must be free (if it has been
562          * hot-unplagged, then we must have found it in pool0). It is, of
563          * course, fine to suspend or shutdown with CPUs not assigned to a
564          * pool, and (in case of suspend) they will stay free when resuming.
565          */
566         ASSERT(cpumask_test_cpu(cpu, &cpupool_free_cpus) ||
567                cpumask_test_cpu(cpu, (*c)->cpu_suspended));
568         ASSERT(cpumask_test_cpu(cpu, &cpu_online_map) ||
569                cpumask_test_cpu(cpu, cpupool0->cpu_suspended));
570         ret = 0;
571     }
572     else if ( cpumask_test_cpu(cpu, cpupool0->cpu_valid) )
573     {
574         /*
575          * If we are not suspending, we are hot-unplugging cpu, and that is
576          * allowed only for CPUs in pool0.
577          */
578         cpumask_clear_cpu(cpu, cpupool0->cpu_valid);
579         ret = 0;
580     }
581 
582     if ( !ret )
583         cpumask_set_cpu(cpu, &cpupool_locked_cpus);
584     spin_unlock(&cpupool_lock);
585 
586     return ret;
587 }
588 
589 /*
590  * do cpupool related sysctl operations
591  */
cpupool_do_sysctl(struct xen_sysctl_cpupool_op * op)592 int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
593 {
594     int ret;
595     struct cpupool *c;
596 
597     switch ( op->op )
598     {
599 
600     case XEN_SYSCTL_CPUPOOL_OP_CREATE:
601     {
602         int poolid;
603 
604         poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
605             CPUPOOLID_NONE: op->cpupool_id;
606         c = cpupool_create(poolid, op->sched_id, &ret);
607         if ( c != NULL )
608         {
609             op->cpupool_id = c->cpupool_id;
610             cpupool_put(c);
611         }
612     }
613     break;
614 
615     case XEN_SYSCTL_CPUPOOL_OP_DESTROY:
616     {
617         c = cpupool_get_by_id(op->cpupool_id);
618         ret = -ENOENT;
619         if ( c == NULL )
620             break;
621         ret = cpupool_destroy(c);
622         cpupool_put(c);
623     }
624     break;
625 
626     case XEN_SYSCTL_CPUPOOL_OP_INFO:
627     {
628         c = cpupool_get_next_by_id(op->cpupool_id);
629         ret = -ENOENT;
630         if ( c == NULL )
631             break;
632         op->cpupool_id = c->cpupool_id;
633         op->sched_id = c->sched->sched_id;
634         op->n_dom = c->n_dom;
635         ret = cpumask_to_xenctl_bitmap(&op->cpumap, c->cpu_valid);
636         cpupool_put(c);
637     }
638     break;
639 
640     case XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
641     {
642         unsigned cpu;
643 
644         cpu = op->cpu;
645         cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
646                         op->cpupool_id, cpu);
647         spin_lock(&cpupool_lock);
648         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
649             cpu = cpumask_first(&cpupool_free_cpus);
650         ret = -EINVAL;
651         if ( cpu >= nr_cpu_ids )
652             goto addcpu_out;
653         ret = -ENODEV;
654         if ( !cpumask_test_cpu(cpu, &cpupool_free_cpus) )
655             goto addcpu_out;
656         c = cpupool_find_by_id(op->cpupool_id);
657         ret = -ENOENT;
658         if ( c == NULL )
659             goto addcpu_out;
660         ret = cpupool_assign_cpu_locked(c, cpu);
661     addcpu_out:
662         spin_unlock(&cpupool_lock);
663         cpupool_dprintk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
664                         op->cpupool_id, cpu, ret);
665     }
666     break;
667 
668     case XEN_SYSCTL_CPUPOOL_OP_RMCPU:
669     {
670         unsigned cpu;
671 
672         c = cpupool_get_by_id(op->cpupool_id);
673         ret = -ENOENT;
674         if ( c == NULL )
675             break;
676         cpu = op->cpu;
677         if ( cpu == XEN_SYSCTL_CPUPOOL_PAR_ANY )
678             cpu = cpumask_last(c->cpu_valid);
679         ret = (cpu < nr_cpu_ids) ? cpupool_unassign_cpu(c, cpu) : -EINVAL;
680         cpupool_put(c);
681     }
682     break;
683 
684     case XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
685     {
686         struct domain *d;
687 
688         ret = rcu_lock_remote_domain_by_id(op->domid, &d);
689         if ( ret )
690             break;
691         if ( d->cpupool == NULL )
692         {
693             ret = -EINVAL;
694             rcu_unlock_domain(d);
695             break;
696         }
697         if ( op->cpupool_id == d->cpupool->cpupool_id )
698         {
699             ret = 0;
700             rcu_unlock_domain(d);
701             break;
702         }
703         cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d\n",
704                         d->domain_id, op->cpupool_id);
705         ret = -ENOENT;
706         spin_lock(&cpupool_lock);
707 
708         c = cpupool_find_by_id(op->cpupool_id);
709         if ( (c != NULL) && cpumask_weight(c->cpu_valid) )
710             ret = cpupool_move_domain_locked(d, c);
711 
712         spin_unlock(&cpupool_lock);
713         cpupool_dprintk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
714                         d->domain_id, op->cpupool_id, ret);
715         rcu_unlock_domain(d);
716     }
717     break;
718 
719     case XEN_SYSCTL_CPUPOOL_OP_FREEINFO:
720     {
721         ret = cpumask_to_xenctl_bitmap(
722             &op->cpumap, &cpupool_free_cpus);
723     }
724     break;
725 
726     default:
727         ret = -ENOSYS;
728         break;
729     }
730 
731     return ret;
732 }
733 
print_cpumap(const char * str,const cpumask_t * map)734 static void print_cpumap(const char *str, const cpumask_t *map)
735 {
736     cpulist_scnprintf(keyhandler_scratch, sizeof(keyhandler_scratch), map);
737     printk("%s: %s\n", str, keyhandler_scratch);
738 }
739 
dump_runq(unsigned char key)740 void dump_runq(unsigned char key)
741 {
742     unsigned long    flags;
743     s_time_t         now = NOW();
744     struct cpupool **c;
745 
746     spin_lock(&cpupool_lock);
747     local_irq_save(flags);
748 
749     printk("sched_smt_power_savings: %s\n",
750             sched_smt_power_savings? "enabled":"disabled");
751     printk("NOW=%"PRI_stime"\n", now);
752 
753     print_cpumap("Online Cpus", &cpu_online_map);
754     if ( !cpumask_empty(&cpupool_free_cpus) )
755     {
756         print_cpumap("Free Cpus", &cpupool_free_cpus);
757         schedule_dump(NULL);
758     }
759 
760     for_each_cpupool(c)
761     {
762         printk("Cpupool %d:\n", (*c)->cpupool_id);
763         print_cpumap("Cpus", (*c)->cpu_valid);
764         schedule_dump(*c);
765     }
766 
767     local_irq_restore(flags);
768     spin_unlock(&cpupool_lock);
769 }
770 
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)771 static int cpu_callback(
772     struct notifier_block *nfb, unsigned long action, void *hcpu)
773 {
774     unsigned int cpu = (unsigned long)hcpu;
775     int rc = 0;
776 
777     switch ( action )
778     {
779     case CPU_DOWN_FAILED:
780     case CPU_ONLINE:
781         rc = cpupool_cpu_add(cpu);
782         break;
783     case CPU_DOWN_PREPARE:
784         rc = cpupool_cpu_remove(cpu);
785         break;
786     default:
787         break;
788     }
789 
790     return !rc ? NOTIFY_DONE : notifier_from_errno(rc);
791 }
792 
793 static struct notifier_block cpu_nfb = {
794     .notifier_call = cpu_callback
795 };
796 
cpupool_presmp_init(void)797 static int __init cpupool_presmp_init(void)
798 {
799     int err;
800     void *cpu = (void *)(long)smp_processor_id();
801     cpupool0 = cpupool_create(0, 0, &err);
802     BUG_ON(cpupool0 == NULL);
803     cpupool_put(cpupool0);
804     cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
805     register_cpu_notifier(&cpu_nfb);
806     return 0;
807 }
808 presmp_initcall(cpupool_presmp_init);
809 
810 /*
811  * Local variables:
812  * mode: C
813  * c-file-style: "BSD"
814  * c-basic-offset: 4
815  * tab-width: 4
816  * indent-tabs-mode: nil
817  * End:
818  */
819