1 /******************************************************************************
2 * sched_arinc653.c
3 *
4 * An ARINC653-compatible scheduling algorithm for use in Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2010, DornerWorks, Ltd. <DornerWorks.com>
25 */
26
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <xen/sched-if.h>
30 #include <xen/timer.h>
31 #include <xen/softirq.h>
32 #include <xen/time.h>
33 #include <xen/errno.h>
34 #include <xen/list.h>
35 #include <xen/guest_access.h>
36 #include <public/sysctl.h>
37
38 /**************************************************************************
39 * Private Macros *
40 **************************************************************************/
41
42 /**
43 * Default timeslice for domain 0.
44 */
45 #define DEFAULT_TIMESLICE MILLISECS(10)
46
47 /**
48 * Retrieve the idle VCPU for a given physical CPU
49 */
50 #define IDLETASK(cpu) (idle_vcpu[cpu])
51
52 /**
53 * Return a pointer to the ARINC 653-specific scheduler data information
54 * associated with the given VCPU (vc)
55 */
56 #define AVCPU(vc) ((arinc653_vcpu_t *)(vc)->sched_priv)
57
58 /**
59 * Return the global scheduler private data given the scheduler ops pointer
60 */
61 #define SCHED_PRIV(s) ((a653sched_priv_t *)((s)->sched_data))
62
63 /**************************************************************************
64 * Private Type Definitions *
65 **************************************************************************/
66
67 /**
68 * The arinc653_vcpu_t structure holds ARINC 653-scheduler-specific
69 * information for all non-idle VCPUs
70 */
71 typedef struct arinc653_vcpu_s
72 {
73 /* vc points to Xen's struct vcpu so we can get to it from an
74 * arinc653_vcpu_t pointer. */
75 struct vcpu * vc;
76 /* awake holds whether the VCPU has been woken with vcpu_wake() */
77 bool_t awake;
78 /* list holds the linked list information for the list this VCPU
79 * is stored in */
80 struct list_head list;
81 } arinc653_vcpu_t;
82
83 /**
84 * The sched_entry_t structure holds a single entry of the
85 * ARINC 653 schedule.
86 */
87 typedef struct sched_entry_s
88 {
89 /* dom_handle holds the handle ("UUID") for the domain that this
90 * schedule entry refers to. */
91 xen_domain_handle_t dom_handle;
92 /* vcpu_id holds the VCPU number for the VCPU that this schedule
93 * entry refers to. */
94 int vcpu_id;
95 /* runtime holds the number of nanoseconds that the VCPU for this
96 * schedule entry should be allowed to run per major frame. */
97 s_time_t runtime;
98 /* vc holds a pointer to the Xen VCPU structure */
99 struct vcpu * vc;
100 } sched_entry_t;
101
102 /**
103 * This structure defines data that is global to an instance of the scheduler
104 */
105 typedef struct a653sched_priv_s
106 {
107 /* lock for the whole pluggable scheduler, nests inside cpupool_lock */
108 spinlock_t lock;
109
110 /**
111 * This array holds the active ARINC 653 schedule.
112 *
113 * When the system tries to start a new VCPU, this schedule is scanned
114 * to look for a matching (handle, VCPU #) pair. If both the handle (UUID)
115 * and VCPU number match, then the VCPU is allowed to run. Its run time
116 * (per major frame) is given in the third entry of the schedule.
117 */
118 sched_entry_t schedule[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
119
120 /**
121 * This variable holds the number of entries that are valid in
122 * the arinc653_schedule table.
123 *
124 * This is not necessarily the same as the number of domains in the
125 * schedule. A domain could be listed multiple times within the schedule,
126 * or a domain with multiple VCPUs could have a different
127 * schedule entry for each VCPU.
128 */
129 unsigned int num_schedule_entries;
130
131 /**
132 * the major frame time for the ARINC 653 schedule.
133 */
134 s_time_t major_frame;
135
136 /**
137 * the time that the next major frame starts
138 */
139 s_time_t next_major_frame;
140
141 /**
142 * pointers to all Xen VCPU structures for iterating through
143 */
144 struct list_head vcpu_list;
145 } a653sched_priv_t;
146
147 /**************************************************************************
148 * Helper functions *
149 **************************************************************************/
150
151 /**
152 * This function compares two domain handles.
153 *
154 * @param h1 Pointer to handle 1
155 * @param h2 Pointer to handle 2
156 *
157 * @return <ul>
158 * <li> <0: handle 1 is less than handle 2
159 * <li> 0: handle 1 is equal to handle 2
160 * <li> >0: handle 1 is greater than handle 2
161 * </ul>
162 */
dom_handle_cmp(const xen_domain_handle_t h1,const xen_domain_handle_t h2)163 static int dom_handle_cmp(const xen_domain_handle_t h1,
164 const xen_domain_handle_t h2)
165 {
166 return memcmp(h1, h2, sizeof(xen_domain_handle_t));
167 }
168
169 /**
170 * This function searches the vcpu list to find a VCPU that matches
171 * the domain handle and VCPU ID specified.
172 *
173 * @param ops Pointer to this instance of the scheduler structure
174 * @param handle Pointer to handler
175 * @param vcpu_id VCPU ID
176 *
177 * @return <ul>
178 * <li> Pointer to the matching VCPU if one is found
179 * <li> NULL otherwise
180 * </ul>
181 */
find_vcpu(const struct scheduler * ops,xen_domain_handle_t handle,int vcpu_id)182 static struct vcpu *find_vcpu(
183 const struct scheduler *ops,
184 xen_domain_handle_t handle,
185 int vcpu_id)
186 {
187 arinc653_vcpu_t *avcpu;
188
189 /* loop through the vcpu_list looking for the specified VCPU */
190 list_for_each_entry ( avcpu, &SCHED_PRIV(ops)->vcpu_list, list )
191 if ( (dom_handle_cmp(avcpu->vc->domain->handle, handle) == 0)
192 && (vcpu_id == avcpu->vc->vcpu_id) )
193 return avcpu->vc;
194
195 return NULL;
196 }
197
198 /**
199 * This function updates the pointer to the Xen VCPU structure for each entry
200 * in the ARINC 653 schedule.
201 *
202 * @param ops Pointer to this instance of the scheduler structure
203 * @return <None>
204 */
update_schedule_vcpus(const struct scheduler * ops)205 static void update_schedule_vcpus(const struct scheduler *ops)
206 {
207 unsigned int i, n_entries = SCHED_PRIV(ops)->num_schedule_entries;
208
209 for ( i = 0; i < n_entries; i++ )
210 SCHED_PRIV(ops)->schedule[i].vc =
211 find_vcpu(ops,
212 SCHED_PRIV(ops)->schedule[i].dom_handle,
213 SCHED_PRIV(ops)->schedule[i].vcpu_id);
214 }
215
216 /**
217 * This function is called by the adjust_global scheduler hook to put
218 * in place a new ARINC653 schedule.
219 *
220 * @param ops Pointer to this instance of the scheduler structure
221 *
222 * @return <ul>
223 * <li> 0 = success
224 * <li> !0 = error
225 * </ul>
226 */
227 static int
arinc653_sched_set(const struct scheduler * ops,struct xen_sysctl_arinc653_schedule * schedule)228 arinc653_sched_set(
229 const struct scheduler *ops,
230 struct xen_sysctl_arinc653_schedule *schedule)
231 {
232 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
233 s_time_t total_runtime = 0;
234 unsigned int i;
235 unsigned long flags;
236 int rc = -EINVAL;
237
238 spin_lock_irqsave(&sched_priv->lock, flags);
239
240 /* Check for valid major frame and number of schedule entries. */
241 if ( (schedule->major_frame <= 0)
242 || (schedule->num_sched_entries < 1)
243 || (schedule->num_sched_entries > ARINC653_MAX_DOMAINS_PER_SCHEDULE) )
244 goto fail;
245
246 for ( i = 0; i < schedule->num_sched_entries; i++ )
247 {
248 /* Check for a valid run time. */
249 if ( schedule->sched_entries[i].runtime <= 0 )
250 goto fail;
251
252 /* Add this entry's run time to total run time. */
253 total_runtime += schedule->sched_entries[i].runtime;
254 }
255
256 /*
257 * Error if the major frame is not large enough to run all entries as
258 * indicated by comparing the total run time to the major frame length.
259 */
260 if ( total_runtime > schedule->major_frame )
261 goto fail;
262
263 /* Copy the new schedule into place. */
264 sched_priv->num_schedule_entries = schedule->num_sched_entries;
265 sched_priv->major_frame = schedule->major_frame;
266 for ( i = 0; i < schedule->num_sched_entries; i++ )
267 {
268 memcpy(sched_priv->schedule[i].dom_handle,
269 schedule->sched_entries[i].dom_handle,
270 sizeof(sched_priv->schedule[i].dom_handle));
271 sched_priv->schedule[i].vcpu_id =
272 schedule->sched_entries[i].vcpu_id;
273 sched_priv->schedule[i].runtime =
274 schedule->sched_entries[i].runtime;
275 }
276 update_schedule_vcpus(ops);
277
278 /*
279 * The newly-installed schedule takes effect immediately. We do not even
280 * wait for the current major frame to expire.
281 *
282 * Signal a new major frame to begin. The next major frame is set up by
283 * the do_schedule callback function when it is next invoked.
284 */
285 sched_priv->next_major_frame = NOW();
286
287 rc = 0;
288
289 fail:
290 spin_unlock_irqrestore(&sched_priv->lock, flags);
291 return rc;
292 }
293
294 /**
295 * This function is called by the adjust_global scheduler hook to read the
296 * current ARINC 653 schedule
297 *
298 * @param ops Pointer to this instance of the scheduler structure
299 * @return <ul>
300 * <li> 0 = success
301 * <li> !0 = error
302 * </ul>
303 */
304 static int
arinc653_sched_get(const struct scheduler * ops,struct xen_sysctl_arinc653_schedule * schedule)305 arinc653_sched_get(
306 const struct scheduler *ops,
307 struct xen_sysctl_arinc653_schedule *schedule)
308 {
309 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
310 unsigned int i;
311 unsigned long flags;
312
313 spin_lock_irqsave(&sched_priv->lock, flags);
314
315 schedule->num_sched_entries = sched_priv->num_schedule_entries;
316 schedule->major_frame = sched_priv->major_frame;
317 for ( i = 0; i < sched_priv->num_schedule_entries; i++ )
318 {
319 memcpy(schedule->sched_entries[i].dom_handle,
320 sched_priv->schedule[i].dom_handle,
321 sizeof(sched_priv->schedule[i].dom_handle));
322 schedule->sched_entries[i].vcpu_id = sched_priv->schedule[i].vcpu_id;
323 schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
324 }
325
326 spin_unlock_irqrestore(&sched_priv->lock, flags);
327
328 return 0;
329 }
330
331 /**************************************************************************
332 * Scheduler callback functions *
333 **************************************************************************/
334
335 /**
336 * This function performs initialization for an instance of the scheduler.
337 *
338 * @param ops Pointer to this instance of the scheduler structure
339 *
340 * @return <ul>
341 * <li> 0 = success
342 * <li> !0 = error
343 * </ul>
344 */
345 static int
a653sched_init(struct scheduler * ops)346 a653sched_init(struct scheduler *ops)
347 {
348 a653sched_priv_t *prv;
349
350 prv = xzalloc(a653sched_priv_t);
351 if ( prv == NULL )
352 return -ENOMEM;
353
354 ops->sched_data = prv;
355
356 prv->next_major_frame = 0;
357 spin_lock_init(&prv->lock);
358 INIT_LIST_HEAD(&prv->vcpu_list);
359
360 return 0;
361 }
362
363 /**
364 * This function performs deinitialization for an instance of the scheduler
365 *
366 * @param ops Pointer to this instance of the scheduler structure
367 */
368 static void
a653sched_deinit(struct scheduler * ops)369 a653sched_deinit(struct scheduler *ops)
370 {
371 xfree(SCHED_PRIV(ops));
372 ops->sched_data = NULL;
373 }
374
375 /**
376 * This function allocates scheduler-specific data for a VCPU
377 *
378 * @param ops Pointer to this instance of the scheduler structure
379 *
380 * @return Pointer to the allocated data
381 */
382 static void *
a653sched_alloc_vdata(const struct scheduler * ops,struct vcpu * vc,void * dd)383 a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
384 {
385 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
386 arinc653_vcpu_t *svc;
387 unsigned int entry;
388 unsigned long flags;
389
390 /*
391 * Allocate memory for the ARINC 653-specific scheduler data information
392 * associated with the given VCPU (vc).
393 */
394 svc = xmalloc(arinc653_vcpu_t);
395 if ( svc == NULL )
396 return NULL;
397
398 spin_lock_irqsave(&sched_priv->lock, flags);
399
400 /*
401 * Add every one of dom0's vcpus to the schedule, as long as there are
402 * slots available.
403 */
404 if ( vc->domain->domain_id == 0 )
405 {
406 entry = sched_priv->num_schedule_entries;
407
408 if ( entry < ARINC653_MAX_DOMAINS_PER_SCHEDULE )
409 {
410 sched_priv->schedule[entry].dom_handle[0] = '\0';
411 sched_priv->schedule[entry].vcpu_id = vc->vcpu_id;
412 sched_priv->schedule[entry].runtime = DEFAULT_TIMESLICE;
413 sched_priv->schedule[entry].vc = vc;
414
415 sched_priv->major_frame += DEFAULT_TIMESLICE;
416 ++sched_priv->num_schedule_entries;
417 }
418 }
419
420 /*
421 * Initialize our ARINC 653 scheduler-specific information for the VCPU.
422 * The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
423 * will call the vcpu_wake scheduler callback function and our scheduler
424 * will mark the VCPU awake.
425 */
426 svc->vc = vc;
427 svc->awake = 0;
428 if ( !is_idle_vcpu(vc) )
429 list_add(&svc->list, &SCHED_PRIV(ops)->vcpu_list);
430 update_schedule_vcpus(ops);
431
432 spin_unlock_irqrestore(&sched_priv->lock, flags);
433
434 return svc;
435 }
436
437 /**
438 * This function frees scheduler-specific VCPU data
439 *
440 * @param ops Pointer to this instance of the scheduler structure
441 */
442 static void
a653sched_free_vdata(const struct scheduler * ops,void * priv)443 a653sched_free_vdata(const struct scheduler *ops, void *priv)
444 {
445 arinc653_vcpu_t *av = priv;
446
447 if (av == NULL)
448 return;
449
450 if ( !is_idle_vcpu(av->vc) )
451 list_del(&av->list);
452
453 xfree(av);
454 update_schedule_vcpus(ops);
455 }
456
457 /**
458 * This function allocates scheduler-specific data for a domain
459 *
460 * We do not actually make use of any per-domain data but the hypervisor
461 * expects a non-NULL return value
462 *
463 * @param ops Pointer to this instance of the scheduler structure
464 *
465 * @return Pointer to the allocated data
466 */
467 static void *
a653sched_alloc_domdata(const struct scheduler * ops,struct domain * dom)468 a653sched_alloc_domdata(const struct scheduler *ops, struct domain *dom)
469 {
470 /* return a non-NULL value to keep schedule.c happy */
471 return SCHED_PRIV(ops);
472 }
473
474 /**
475 * This function frees scheduler-specific data for a domain
476 *
477 * @param ops Pointer to this instance of the scheduler structure
478 */
479 static void
a653sched_free_domdata(const struct scheduler * ops,void * data)480 a653sched_free_domdata(const struct scheduler *ops, void *data)
481 {
482 /* nop */
483 }
484
485 /**
486 * Xen scheduler callback function to sleep a VCPU
487 *
488 * @param ops Pointer to this instance of the scheduler structure
489 * @param vc Pointer to the VCPU structure for the current domain
490 */
491 static void
a653sched_vcpu_sleep(const struct scheduler * ops,struct vcpu * vc)492 a653sched_vcpu_sleep(const struct scheduler *ops, struct vcpu *vc)
493 {
494 if ( AVCPU(vc) != NULL )
495 AVCPU(vc)->awake = 0;
496
497 /*
498 * If the VCPU being put to sleep is the same one that is currently
499 * running, raise a softirq to invoke the scheduler to switch domains.
500 */
501 if ( per_cpu(schedule_data, vc->processor).curr == vc )
502 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
503 }
504
505 /**
506 * Xen scheduler callback function to wake up a VCPU
507 *
508 * @param ops Pointer to this instance of the scheduler structure
509 * @param vc Pointer to the VCPU structure for the current domain
510 */
511 static void
a653sched_vcpu_wake(const struct scheduler * ops,struct vcpu * vc)512 a653sched_vcpu_wake(const struct scheduler *ops, struct vcpu *vc)
513 {
514 if ( AVCPU(vc) != NULL )
515 AVCPU(vc)->awake = 1;
516
517 cpu_raise_softirq(vc->processor, SCHEDULE_SOFTIRQ);
518 }
519
520 /**
521 * Xen scheduler callback function to select a VCPU to run.
522 * This is the main scheduler routine.
523 *
524 * @param ops Pointer to this instance of the scheduler structure
525 * @param now Current time
526 *
527 * @return Address of the VCPU structure scheduled to be run next
528 * Amount of time to execute the returned VCPU
529 * Flag for whether the VCPU was migrated
530 */
531 static struct task_slice
a653sched_do_schedule(const struct scheduler * ops,s_time_t now,bool_t tasklet_work_scheduled)532 a653sched_do_schedule(
533 const struct scheduler *ops,
534 s_time_t now,
535 bool_t tasklet_work_scheduled)
536 {
537 struct task_slice ret; /* hold the chosen domain */
538 struct vcpu * new_task = NULL;
539 static unsigned int sched_index = 0;
540 static s_time_t next_switch_time;
541 a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
542 const unsigned int cpu = smp_processor_id();
543 unsigned long flags;
544
545 spin_lock_irqsave(&sched_priv->lock, flags);
546
547 if ( sched_priv->num_schedule_entries < 1 )
548 sched_priv->next_major_frame = now + DEFAULT_TIMESLICE;
549 else if ( now >= sched_priv->next_major_frame )
550 {
551 /* time to enter a new major frame
552 * the first time this function is called, this will be true */
553 /* start with the first domain in the schedule */
554 sched_index = 0;
555 sched_priv->next_major_frame = now + sched_priv->major_frame;
556 next_switch_time = now + sched_priv->schedule[0].runtime;
557 }
558 else
559 {
560 while ( (now >= next_switch_time)
561 && (sched_index < sched_priv->num_schedule_entries) )
562 {
563 /* time to switch to the next domain in this major frame */
564 sched_index++;
565 next_switch_time += sched_priv->schedule[sched_index].runtime;
566 }
567 }
568
569 /*
570 * If we exhausted the domains in the schedule and still have time left
571 * in the major frame then switch next at the next major frame.
572 */
573 if ( sched_index >= sched_priv->num_schedule_entries )
574 next_switch_time = sched_priv->next_major_frame;
575
576 /*
577 * If there are more domains to run in the current major frame, set
578 * new_task equal to the address of next domain's VCPU structure.
579 * Otherwise, set new_task equal to the address of the idle task's VCPU
580 * structure.
581 */
582 new_task = (sched_index < sched_priv->num_schedule_entries)
583 ? sched_priv->schedule[sched_index].vc
584 : IDLETASK(cpu);
585
586 /* Check to see if the new task can be run (awake & runnable). */
587 if ( !((new_task != NULL)
588 && (AVCPU(new_task) != NULL)
589 && AVCPU(new_task)->awake
590 && vcpu_runnable(new_task)) )
591 new_task = IDLETASK(cpu);
592 BUG_ON(new_task == NULL);
593
594 /*
595 * Check to make sure we did not miss a major frame.
596 * This is a good test for robust partitioning.
597 */
598 BUG_ON(now >= sched_priv->next_major_frame);
599
600 spin_unlock_irqrestore(&sched_priv->lock, flags);
601
602 /* Tasklet work (which runs in idle VCPU context) overrides all else. */
603 if ( tasklet_work_scheduled )
604 new_task = IDLETASK(cpu);
605
606 /* Running this task would result in a migration */
607 if ( !is_idle_vcpu(new_task)
608 && (new_task->processor != cpu) )
609 new_task = IDLETASK(cpu);
610
611 /*
612 * Return the amount of time the next domain has to run and the address
613 * of the selected task's VCPU structure.
614 */
615 ret.time = next_switch_time - now;
616 ret.task = new_task;
617 ret.migrated = 0;
618
619 BUG_ON(ret.time <= 0);
620
621 return ret;
622 }
623
624 /**
625 * Xen scheduler callback function to select a CPU for the VCPU to run on
626 *
627 * @param ops Pointer to this instance of the scheduler structure
628 * @param v Pointer to the VCPU structure for the current domain
629 *
630 * @return Number of selected physical CPU
631 */
632 static int
a653sched_pick_cpu(const struct scheduler * ops,struct vcpu * vc)633 a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
634 {
635 cpumask_t *online;
636 unsigned int cpu;
637
638 /*
639 * If present, prefer vc's current processor, else
640 * just find the first valid vcpu .
641 */
642 online = cpupool_domain_cpumask(vc->domain);
643
644 cpu = cpumask_first(online);
645
646 if ( cpumask_test_cpu(vc->processor, online)
647 || (cpu >= nr_cpu_ids) )
648 cpu = vc->processor;
649
650 return cpu;
651 }
652
653 /**
654 * Xen scheduler callback to change the scheduler of a cpu
655 *
656 * @param new_ops Pointer to this instance of the scheduler structure
657 * @param cpu The cpu that is changing scheduler
658 * @param pdata scheduler specific PCPU data (we don't have any)
659 * @param vdata scheduler specific VCPU data of the idle vcpu
660 */
661 static void
a653_switch_sched(struct scheduler * new_ops,unsigned int cpu,void * pdata,void * vdata)662 a653_switch_sched(struct scheduler *new_ops, unsigned int cpu,
663 void *pdata, void *vdata)
664 {
665 struct schedule_data *sd = &per_cpu(schedule_data, cpu);
666 arinc653_vcpu_t *svc = vdata;
667
668 ASSERT(!pdata && svc && is_idle_vcpu(svc->vc));
669
670 idle_vcpu[cpu]->sched_priv = vdata;
671
672 per_cpu(scheduler, cpu) = new_ops;
673 per_cpu(schedule_data, cpu).sched_priv = NULL; /* no pdata */
674
675 /*
676 * (Re?)route the lock to its default location. We actually do not use
677 * it, but if we leave it pointing to where it does now (i.e., the
678 * runqueue lock for this PCPU in the default scheduler), we'd be
679 * causing unnecessary contention on that lock (in cases where it is
680 * shared among multiple PCPUs, like in Credit2 and RTDS).
681 */
682 sd->schedule_lock = &sd->_lock;
683 }
684
685 /**
686 * Xen scheduler callback function to perform a global (not domain-specific)
687 * adjustment. It is used by the ARINC 653 scheduler to put in place a new
688 * ARINC 653 schedule or to retrieve the schedule currently in place.
689 *
690 * @param ops Pointer to this instance of the scheduler structure
691 * @param sc Pointer to the scheduler operation specified by Domain 0
692 */
693 static int
a653sched_adjust_global(const struct scheduler * ops,struct xen_sysctl_scheduler_op * sc)694 a653sched_adjust_global(const struct scheduler *ops,
695 struct xen_sysctl_scheduler_op *sc)
696 {
697 struct xen_sysctl_arinc653_schedule local_sched;
698 int rc = -EINVAL;
699
700 switch ( sc->cmd )
701 {
702 case XEN_SYSCTL_SCHEDOP_putinfo:
703 if ( copy_from_guest(&local_sched, sc->u.sched_arinc653.schedule, 1) )
704 {
705 rc = -EFAULT;
706 break;
707 }
708
709 rc = arinc653_sched_set(ops, &local_sched);
710 break;
711 case XEN_SYSCTL_SCHEDOP_getinfo:
712 memset(&local_sched, -1, sizeof(local_sched));
713 rc = arinc653_sched_get(ops, &local_sched);
714 if ( rc )
715 break;
716
717 if ( copy_to_guest(sc->u.sched_arinc653.schedule, &local_sched, 1) )
718 rc = -EFAULT;
719 break;
720 }
721
722 return rc;
723 }
724
725 /**
726 * This structure defines our scheduler for Xen.
727 * The entries tell Xen where to find our scheduler-specific
728 * callback functions.
729 * The symbol must be visible to the rest of Xen at link time.
730 */
731 static const struct scheduler sched_arinc653_def = {
732 .name = "ARINC 653 Scheduler",
733 .opt_name = "arinc653",
734 .sched_id = XEN_SCHEDULER_ARINC653,
735 .sched_data = NULL,
736
737 .init = a653sched_init,
738 .deinit = a653sched_deinit,
739
740 .free_vdata = a653sched_free_vdata,
741 .alloc_vdata = a653sched_alloc_vdata,
742
743 .free_domdata = a653sched_free_domdata,
744 .alloc_domdata = a653sched_alloc_domdata,
745
746 .init_domain = NULL,
747 .destroy_domain = NULL,
748
749 .insert_vcpu = NULL,
750 .remove_vcpu = NULL,
751
752 .sleep = a653sched_vcpu_sleep,
753 .wake = a653sched_vcpu_wake,
754 .yield = NULL,
755 .context_saved = NULL,
756
757 .do_schedule = a653sched_do_schedule,
758
759 .pick_cpu = a653sched_pick_cpu,
760
761 .switch_sched = a653_switch_sched,
762
763 .adjust = NULL,
764 .adjust_global = a653sched_adjust_global,
765
766 .dump_settings = NULL,
767 .dump_cpu_state = NULL,
768
769 .tick_suspend = NULL,
770 .tick_resume = NULL,
771 };
772
773 REGISTER_SCHEDULER(sched_arinc653_def);
774
775 /*
776 * Local variables:
777 * mode: C
778 * c-file-style: "BSD"
779 * c-basic-offset: 4
780 * tab-width: 4
781 * indent-tabs-mode: nil
782 * End:
783 */
784