1 /******************************************************************************
2 * vm_event.c
3 *
4 * VM event support.
5 *
6 * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; If not, see <http://www.gnu.org/licenses/>.
20 */
21
22
23 #include <xen/sched.h>
24 #include <xen/event.h>
25 #include <xen/wait.h>
26 #include <xen/vm_event.h>
27 #include <xen/mem_access.h>
28 #include <asm/p2m.h>
29 #include <asm/monitor.h>
30 #include <asm/vm_event.h>
31
32 #ifdef CONFIG_MEM_SHARING
33 #include <asm/mem_sharing.h>
34 #endif
35
36 #include <xsm/xsm.h>
37 #include <public/hvm/params.h>
38
39 /* for public/io/ring.h macros */
40 #define xen_mb() smp_mb()
41 #define xen_rmb() smp_rmb()
42 #define xen_wmb() smp_wmb()
43
vm_event_enable(struct domain * d,struct xen_domctl_vm_event_op * vec,struct vm_event_domain ** p_ved,int pause_flag,int param,xen_event_channel_notification_t notification_fn)44 static int vm_event_enable(
45 struct domain *d,
46 struct xen_domctl_vm_event_op *vec,
47 struct vm_event_domain **p_ved,
48 int pause_flag,
49 int param,
50 xen_event_channel_notification_t notification_fn)
51 {
52 int rc;
53 unsigned long ring_gfn = d->arch.hvm.params[param];
54 struct vm_event_domain *ved;
55
56 /*
57 * Only one connected agent at a time. If the helper crashed, the ring is
58 * in an undefined state, and the guest is most likely unrecoverable.
59 */
60 if ( *p_ved != NULL )
61 return -EBUSY;
62
63 /* No chosen ring GFN? Nothing we can do. */
64 if ( ring_gfn == 0 )
65 return -EOPNOTSUPP;
66
67 ved = xzalloc(struct vm_event_domain);
68 if ( !ved )
69 return -ENOMEM;
70
71 /* Trivial setup. */
72 spin_lock_init(&ved->lock);
73 init_waitqueue_head(&ved->wq);
74 ved->pause_flag = pause_flag;
75
76 rc = vm_event_init_domain(d);
77 if ( rc < 0 )
78 goto err;
79
80 rc = prepare_ring_for_helper(d, ring_gfn, &ved->ring_pg_struct,
81 &ved->ring_page);
82 if ( rc < 0 )
83 goto err;
84
85 FRONT_RING_INIT(&ved->front_ring,
86 (vm_event_sring_t *)ved->ring_page,
87 PAGE_SIZE);
88
89 rc = alloc_unbound_xen_event_channel(d, 0, current->domain->domain_id,
90 notification_fn);
91 if ( rc < 0 )
92 goto err;
93
94 ved->xen_port = vec->u.enable.port = rc;
95
96 /* Success. Fill in the domain's appropriate ved. */
97 *p_ved = ved;
98
99 return 0;
100
101 err:
102 destroy_ring_for_helper(&ved->ring_page, ved->ring_pg_struct);
103 xfree(ved);
104
105 return rc;
106 }
107
vm_event_ring_available(struct vm_event_domain * ved)108 static unsigned int vm_event_ring_available(struct vm_event_domain *ved)
109 {
110 int avail_req = RING_FREE_REQUESTS(&ved->front_ring);
111
112 avail_req -= ved->target_producers;
113 avail_req -= ved->foreign_producers;
114
115 BUG_ON(avail_req < 0);
116
117 return avail_req;
118 }
119
120 /*
121 * vm_event_wake_blocked() will wakeup vcpus waiting for room in the
122 * ring. These vCPUs were paused on their way out after placing an event,
123 * but need to be resumed where the ring is capable of processing at least
124 * one event from them.
125 */
vm_event_wake_blocked(struct domain * d,struct vm_event_domain * ved)126 static void vm_event_wake_blocked(struct domain *d, struct vm_event_domain *ved)
127 {
128 struct vcpu *v;
129 unsigned int i, j, k, avail_req = vm_event_ring_available(ved);
130
131 if ( avail_req == 0 || ved->blocked == 0 )
132 return;
133
134 /* We remember which vcpu last woke up to avoid scanning always linearly
135 * from zero and starving higher-numbered vcpus under high load */
136 for ( i = ved->last_vcpu_wake_up + 1, j = 0; j < d->max_vcpus; i++, j++ )
137 {
138 k = i % d->max_vcpus;
139 v = d->vcpu[k];
140 if ( !v )
141 continue;
142
143 if ( !ved->blocked || avail_req == 0 )
144 break;
145
146 if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
147 {
148 vcpu_unpause(v);
149 avail_req--;
150 ved->blocked--;
151 ved->last_vcpu_wake_up = k;
152 }
153 }
154 }
155
156 /*
157 * In the event that a vCPU attempted to place an event in the ring and
158 * was unable to do so, it is queued on a wait queue. These are woken as
159 * needed, and take precedence over the blocked vCPUs.
160 */
vm_event_wake_queued(struct domain * d,struct vm_event_domain * ved)161 static void vm_event_wake_queued(struct domain *d, struct vm_event_domain *ved)
162 {
163 unsigned int avail_req = vm_event_ring_available(ved);
164
165 if ( avail_req > 0 )
166 wake_up_nr(&ved->wq, avail_req);
167 }
168
169 /*
170 * vm_event_wake() will wakeup all vcpus waiting for the ring to
171 * become available. If we have queued vCPUs, they get top priority. We
172 * are guaranteed that they will go through code paths that will eventually
173 * call vm_event_wake() again, ensuring that any blocked vCPUs will get
174 * unpaused once all the queued vCPUs have made it through.
175 */
vm_event_wake(struct domain * d,struct vm_event_domain * ved)176 static void vm_event_wake(struct domain *d, struct vm_event_domain *ved)
177 {
178 if ( !list_empty(&ved->wq.list) )
179 vm_event_wake_queued(d, ved);
180 else
181 vm_event_wake_blocked(d, ved);
182 }
183
vm_event_disable(struct domain * d,struct vm_event_domain ** p_ved)184 static int vm_event_disable(struct domain *d, struct vm_event_domain **p_ved)
185 {
186 struct vm_event_domain *ved = *p_ved;
187
188 if ( vm_event_check_ring(ved) )
189 {
190 struct vcpu *v;
191
192 spin_lock(&ved->lock);
193
194 if ( !list_empty(&ved->wq.list) )
195 {
196 spin_unlock(&ved->lock);
197 return -EBUSY;
198 }
199
200 /* Free domU's event channel and leave the other one unbound */
201 free_xen_event_channel(d, ved->xen_port);
202
203 /* Unblock all vCPUs */
204 for_each_vcpu ( d, v )
205 {
206 if ( test_and_clear_bit(ved->pause_flag, &v->pause_flags) )
207 {
208 vcpu_unpause(v);
209 ved->blocked--;
210 }
211 }
212
213 destroy_ring_for_helper(&ved->ring_page, ved->ring_pg_struct);
214
215 vm_event_cleanup_domain(d);
216
217 spin_unlock(&ved->lock);
218 }
219
220 xfree(ved);
221 *p_ved = NULL;
222
223 return 0;
224 }
225
vm_event_release_slot(struct domain * d,struct vm_event_domain * ved)226 static void vm_event_release_slot(struct domain *d,
227 struct vm_event_domain *ved)
228 {
229 /* Update the accounting */
230 if ( current->domain == d )
231 ved->target_producers--;
232 else
233 ved->foreign_producers--;
234
235 /* Kick any waiters */
236 vm_event_wake(d, ved);
237 }
238
239 /*
240 * vm_event_mark_and_pause() tags vcpu and put it to sleep.
241 * The vcpu will resume execution in vm_event_wake_blocked().
242 */
vm_event_mark_and_pause(struct vcpu * v,struct vm_event_domain * ved)243 static void vm_event_mark_and_pause(struct vcpu *v, struct vm_event_domain *ved)
244 {
245 if ( !test_and_set_bit(ved->pause_flag, &v->pause_flags) )
246 {
247 vcpu_pause_nosync(v);
248 ved->blocked++;
249 }
250 }
251
252 /*
253 * This must be preceded by a call to claim_slot(), and is guaranteed to
254 * succeed. As a side-effect however, the vCPU may be paused if the ring is
255 * overly full and its continued execution would cause stalling and excessive
256 * waiting. The vCPU will be automatically unpaused when the ring clears.
257 */
vm_event_put_request(struct domain * d,struct vm_event_domain * ved,vm_event_request_t * req)258 void vm_event_put_request(struct domain *d,
259 struct vm_event_domain *ved,
260 vm_event_request_t *req)
261 {
262 vm_event_front_ring_t *front_ring;
263 int free_req;
264 unsigned int avail_req;
265 RING_IDX req_prod;
266 struct vcpu *curr = current;
267
268 if( !vm_event_check_ring(ved) )
269 return;
270
271 if ( curr->domain != d )
272 {
273 req->flags |= VM_EVENT_FLAG_FOREIGN;
274
275 if ( !(req->flags & VM_EVENT_FLAG_VCPU_PAUSED) )
276 gdprintk(XENLOG_WARNING, "d%dv%d was not paused.\n",
277 d->domain_id, req->vcpu_id);
278 }
279
280 req->version = VM_EVENT_INTERFACE_VERSION;
281
282 spin_lock(&ved->lock);
283
284 /* Due to the reservations, this step must succeed. */
285 front_ring = &ved->front_ring;
286 free_req = RING_FREE_REQUESTS(front_ring);
287 ASSERT(free_req > 0);
288
289 /* Copy request */
290 req_prod = front_ring->req_prod_pvt;
291 memcpy(RING_GET_REQUEST(front_ring, req_prod), req, sizeof(*req));
292 req_prod++;
293
294 /* Update ring */
295 front_ring->req_prod_pvt = req_prod;
296 RING_PUSH_REQUESTS(front_ring);
297
298 /* We've actually *used* our reservation, so release the slot. */
299 vm_event_release_slot(d, ved);
300
301 /* Give this vCPU a black eye if necessary, on the way out.
302 * See the comments above wake_blocked() for more information
303 * on how this mechanism works to avoid waiting. */
304 avail_req = vm_event_ring_available(ved);
305 if( curr->domain == d && avail_req < d->max_vcpus &&
306 !atomic_read(&curr->vm_event_pause_count) )
307 vm_event_mark_and_pause(curr, ved);
308
309 spin_unlock(&ved->lock);
310
311 notify_via_xen_event_channel(d, ved->xen_port);
312 }
313
vm_event_get_response(struct domain * d,struct vm_event_domain * ved,vm_event_response_t * rsp)314 static int vm_event_get_response(struct domain *d, struct vm_event_domain *ved,
315 vm_event_response_t *rsp)
316 {
317 vm_event_front_ring_t *front_ring;
318 RING_IDX rsp_cons;
319 int rc = 0;
320
321 spin_lock(&ved->lock);
322
323 front_ring = &ved->front_ring;
324 rsp_cons = front_ring->rsp_cons;
325
326 if ( !RING_HAS_UNCONSUMED_RESPONSES(front_ring) )
327 goto out;
328
329 /* Copy response */
330 memcpy(rsp, RING_GET_RESPONSE(front_ring, rsp_cons), sizeof(*rsp));
331 rsp_cons++;
332
333 /* Update ring */
334 front_ring->rsp_cons = rsp_cons;
335 front_ring->sring->rsp_event = rsp_cons + 1;
336
337 /* Kick any waiters -- since we've just consumed an event,
338 * there may be additional space available in the ring. */
339 vm_event_wake(d, ved);
340
341 rc = 1;
342
343 out:
344 spin_unlock(&ved->lock);
345
346 return rc;
347 }
348
349 /*
350 * Pull all responses from the given ring and unpause the corresponding vCPU
351 * if required. Based on the response type, here we can also call custom
352 * handlers.
353 *
354 * Note: responses are handled the same way regardless of which ring they
355 * arrive on.
356 */
vm_event_resume(struct domain * d,struct vm_event_domain * ved)357 static int vm_event_resume(struct domain *d, struct vm_event_domain *ved)
358 {
359 vm_event_response_t rsp;
360
361 /*
362 * vm_event_resume() runs in either XEN_DOMCTL_VM_EVENT_OP_*, or
363 * EVTCHN_send context from the introspection consumer. Both contexts
364 * are guaranteed not to be the subject of vm_event responses.
365 * While we could ASSERT(v != current) for each VCPU in d in the loop
366 * below, this covers the case where we would need to iterate over all
367 * of them more succintly.
368 */
369 ASSERT(d != current->domain);
370
371 if ( unlikely(!vm_event_check_ring(ved)) )
372 return -ENODEV;
373
374 /* Pull all responses off the ring. */
375 while ( vm_event_get_response(d, ved, &rsp) )
376 {
377 struct vcpu *v;
378
379 if ( rsp.version != VM_EVENT_INTERFACE_VERSION )
380 {
381 printk(XENLOG_G_WARNING "vm_event interface version mismatch\n");
382 continue;
383 }
384
385 /* Validate the vcpu_id in the response. */
386 v = domain_vcpu(d, rsp.vcpu_id);
387 if ( !v )
388 continue;
389
390 /*
391 * In some cases the response type needs extra handling, so here
392 * we call the appropriate handlers.
393 */
394
395 /* Check flags which apply only when the vCPU is paused */
396 if ( atomic_read(&v->vm_event_pause_count) )
397 {
398 #ifdef CONFIG_MEM_PAGING
399 if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
400 p2m_mem_paging_resume(d, &rsp);
401 #endif
402 #ifdef CONFIG_MEM_SHARING
403 if ( mem_sharing_is_fork(d) )
404 {
405 bool reset_state = rsp.flags & VM_EVENT_FLAG_RESET_FORK_STATE;
406 bool reset_mem = rsp.flags & VM_EVENT_FLAG_RESET_FORK_MEMORY;
407
408 if ( (reset_state || reset_mem) &&
409 mem_sharing_fork_reset(d, reset_state, reset_mem) )
410 ASSERT_UNREACHABLE();
411 }
412 #endif
413
414 /*
415 * Check emulation flags in the arch-specific handler only, as it
416 * has to set arch-specific flags when supported, and to avoid
417 * bitmask overhead when it isn't supported.
418 */
419 vm_event_emulate_check(v, &rsp);
420
421 /*
422 * Check in arch-specific handler to avoid bitmask overhead when
423 * not supported.
424 */
425 vm_event_register_write_resume(v, &rsp);
426
427 /*
428 * Check in arch-specific handler to avoid bitmask overhead when
429 * not supported.
430 */
431 vm_event_toggle_singlestep(d, v, &rsp);
432
433 /* Check for altp2m switch */
434 if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
435 p2m_altp2m_check(v, rsp.altp2m_idx);
436
437 if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
438 vm_event_set_registers(v, &rsp);
439
440 if ( rsp.flags & VM_EVENT_FLAG_GET_NEXT_INTERRUPT )
441 vm_event_monitor_next_interrupt(v);
442
443 if ( rsp.flags & VM_EVENT_FLAG_RESET_VMTRACE )
444 vm_event_reset_vmtrace(v);
445
446 if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
447 vm_event_vcpu_unpause(v);
448 }
449 }
450
451 return 0;
452 }
453
vm_event_cancel_slot(struct domain * d,struct vm_event_domain * ved)454 void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved)
455 {
456 if( !vm_event_check_ring(ved) )
457 return;
458
459 spin_lock(&ved->lock);
460 vm_event_release_slot(d, ved);
461 spin_unlock(&ved->lock);
462 }
463
vm_event_grab_slot(struct vm_event_domain * ved,int foreign)464 static int vm_event_grab_slot(struct vm_event_domain *ved, int foreign)
465 {
466 unsigned int avail_req;
467 int rc;
468
469 if ( !ved->ring_page )
470 return -EOPNOTSUPP;
471
472 spin_lock(&ved->lock);
473
474 avail_req = vm_event_ring_available(ved);
475
476 rc = -EBUSY;
477 if ( avail_req == 0 )
478 goto out;
479
480 if ( !foreign )
481 ved->target_producers++;
482 else
483 ved->foreign_producers++;
484
485 rc = 0;
486
487 out:
488 spin_unlock(&ved->lock);
489
490 return rc;
491 }
492
493 /* Simple try_grab wrapper for use in the wait_event() macro. */
vm_event_wait_try_grab(struct vm_event_domain * ved,int * rc)494 static int vm_event_wait_try_grab(struct vm_event_domain *ved, int *rc)
495 {
496 *rc = vm_event_grab_slot(ved, 0);
497
498 return *rc;
499 }
500
501 /* Call vm_event_grab_slot() until the ring doesn't exist, or is available. */
vm_event_wait_slot(struct vm_event_domain * ved)502 static int vm_event_wait_slot(struct vm_event_domain *ved)
503 {
504 int rc = -EBUSY;
505
506 wait_event(ved->wq, vm_event_wait_try_grab(ved, &rc) != -EBUSY);
507
508 return rc;
509 }
510
vm_event_check_ring(struct vm_event_domain * ved)511 bool vm_event_check_ring(struct vm_event_domain *ved)
512 {
513 return ved && ved->ring_page;
514 }
515
516 /*
517 * Determines whether or not the current vCPU belongs to the target domain,
518 * and calls the appropriate wait function. If it is a guest vCPU, then we
519 * use vm_event_wait_slot() to reserve a slot. As long as there is a ring,
520 * this function will always return 0 for a guest. For a non-guest, we check
521 * for space and return -EBUSY if the ring is not available.
522 *
523 * Return codes: -EOPNOTSUPP: the ring is not yet configured
524 * -EBUSY: the ring is busy
525 * 0: a spot has been reserved
526 *
527 */
__vm_event_claim_slot(struct domain * d,struct vm_event_domain * ved,bool allow_sleep)528 int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
529 bool allow_sleep)
530 {
531 if ( !vm_event_check_ring(ved) )
532 return -EOPNOTSUPP;
533
534 if ( (current->domain == d) && allow_sleep )
535 return vm_event_wait_slot(ved);
536 else
537 return vm_event_grab_slot(ved, current->domain != d);
538 }
539
540 #ifdef CONFIG_MEM_PAGING
541 /* Registered with Xen-bound event channel for incoming notifications. */
mem_paging_notification(struct vcpu * v,unsigned int port)542 static void cf_check mem_paging_notification(struct vcpu *v, unsigned int port)
543 {
544 vm_event_resume(v->domain, v->domain->vm_event_paging);
545 }
546 #endif
547
548 /* Registered with Xen-bound event channel for incoming notifications. */
monitor_notification(struct vcpu * v,unsigned int port)549 static void cf_check monitor_notification(struct vcpu *v, unsigned int port)
550 {
551 vm_event_resume(v->domain, v->domain->vm_event_monitor);
552 }
553
554 #ifdef CONFIG_MEM_SHARING
555 /* Registered with Xen-bound event channel for incoming notifications. */
mem_sharing_notification(struct vcpu * v,unsigned int port)556 static void cf_check mem_sharing_notification(struct vcpu *v, unsigned int port)
557 {
558 vm_event_resume(v->domain, v->domain->vm_event_share);
559 }
560 #endif
561
562 /* Clean up on domain destruction */
vm_event_cleanup(struct domain * d)563 void vm_event_cleanup(struct domain *d)
564 {
565 #ifdef CONFIG_MEM_PAGING
566 if ( vm_event_check_ring(d->vm_event_paging) )
567 {
568 /* Destroying the wait queue head means waking up all
569 * queued vcpus. This will drain the list, allowing
570 * the disable routine to complete. It will also drop
571 * all domain refs the wait-queued vcpus are holding.
572 * Finally, because this code path involves previously
573 * pausing the domain (domain_kill), unpausing the
574 * vcpus causes no harm. */
575 destroy_waitqueue_head(&d->vm_event_paging->wq);
576 (void)vm_event_disable(d, &d->vm_event_paging);
577 }
578 #endif
579 if ( vm_event_check_ring(d->vm_event_monitor) )
580 {
581 destroy_waitqueue_head(&d->vm_event_monitor->wq);
582 (void)vm_event_disable(d, &d->vm_event_monitor);
583 }
584 #ifdef CONFIG_MEM_SHARING
585 if ( vm_event_check_ring(d->vm_event_share) )
586 {
587 destroy_waitqueue_head(&d->vm_event_share->wq);
588 (void)vm_event_disable(d, &d->vm_event_share);
589 }
590 #endif
591 }
592
vm_event_domctl(struct domain * d,struct xen_domctl_vm_event_op * vec)593 int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec)
594 {
595 int rc;
596
597 if ( vec->op == XEN_VM_EVENT_GET_VERSION )
598 {
599 vec->u.version = VM_EVENT_INTERFACE_VERSION;
600 return 0;
601 }
602
603 /* All other subops need to target a real domain. */
604 if ( unlikely(d == NULL) )
605 return -ESRCH;
606
607 rc = xsm_vm_event_control(XSM_PRIV, d, vec->mode, vec->op);
608 if ( rc )
609 return rc;
610
611 if ( unlikely(d == current->domain) ) /* no domain_pause() */
612 {
613 gdprintk(XENLOG_INFO, "Tried to do a memory event op on itself.\n");
614 return -EINVAL;
615 }
616
617 if ( unlikely(d->is_dying) )
618 {
619 gdprintk(XENLOG_INFO, "Ignoring memory event op on dying domain %u\n",
620 d->domain_id);
621 return 0;
622 }
623
624 if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
625 {
626 gdprintk(XENLOG_INFO,
627 "Memory event op on a domain (%u) with no vcpus\n",
628 d->domain_id);
629 return -EINVAL;
630 }
631
632 rc = -ENOSYS;
633
634 switch ( vec->mode )
635 {
636 #ifdef CONFIG_MEM_PAGING
637 case XEN_DOMCTL_VM_EVENT_OP_PAGING:
638 {
639 rc = -EINVAL;
640
641 switch( vec->op )
642 {
643 case XEN_VM_EVENT_ENABLE:
644 {
645 rc = -EOPNOTSUPP;
646 /* hvm fixme: p2m_is_foreign types need addressing */
647 if ( is_hvm_domain(hardware_domain) )
648 break;
649
650 rc = -ENODEV;
651 /* Only HAP is supported */
652 if ( !hap_enabled(d) )
653 break;
654
655 /* No paging if iommu is used */
656 rc = -EMLINK;
657 if ( unlikely(is_iommu_enabled(d)) )
658 break;
659
660 rc = -EXDEV;
661 /* Disallow paging in a PoD guest */
662 if ( p2m_pod_active(d) )
663 break;
664
665 /* domain_pause() not required here, see XSA-99 */
666 rc = vm_event_enable(d, vec, &d->vm_event_paging, _VPF_mem_paging,
667 HVM_PARAM_PAGING_RING_PFN,
668 mem_paging_notification);
669 }
670 break;
671
672 case XEN_VM_EVENT_DISABLE:
673 if ( vm_event_check_ring(d->vm_event_paging) )
674 {
675 domain_pause(d);
676 rc = vm_event_disable(d, &d->vm_event_paging);
677 domain_unpause(d);
678 }
679 break;
680
681 case XEN_VM_EVENT_RESUME:
682 rc = vm_event_resume(d, d->vm_event_paging);
683 break;
684
685 default:
686 rc = -ENOSYS;
687 break;
688 }
689 }
690 break;
691 #endif
692
693 case XEN_DOMCTL_VM_EVENT_OP_MONITOR:
694 {
695 rc = -EINVAL;
696
697 switch( vec->op )
698 {
699 case XEN_VM_EVENT_ENABLE:
700 /* domain_pause() not required here, see XSA-99 */
701 rc = arch_monitor_init_domain(d);
702 if ( rc )
703 break;
704 rc = vm_event_enable(d, vec, &d->vm_event_monitor, _VPF_mem_access,
705 HVM_PARAM_MONITOR_RING_PFN,
706 monitor_notification);
707 break;
708
709 case XEN_VM_EVENT_DISABLE:
710 if ( vm_event_check_ring(d->vm_event_monitor) )
711 {
712 domain_pause(d);
713 rc = vm_event_disable(d, &d->vm_event_monitor);
714 arch_monitor_cleanup_domain(d);
715 domain_unpause(d);
716 }
717 break;
718
719 case XEN_VM_EVENT_RESUME:
720 rc = vm_event_resume(d, d->vm_event_monitor);
721 break;
722
723 default:
724 rc = -ENOSYS;
725 break;
726 }
727 }
728 break;
729
730 #ifdef CONFIG_MEM_SHARING
731 case XEN_DOMCTL_VM_EVENT_OP_SHARING:
732 {
733 rc = -EINVAL;
734
735 switch( vec->op )
736 {
737 case XEN_VM_EVENT_ENABLE:
738 rc = -EOPNOTSUPP;
739 /* hvm fixme: p2m_is_foreign types need addressing */
740 if ( is_hvm_domain(hardware_domain) )
741 break;
742
743 rc = -ENODEV;
744 /* Only HAP is supported */
745 if ( !hap_enabled(d) )
746 break;
747
748 /* domain_pause() not required here, see XSA-99 */
749 rc = vm_event_enable(d, vec, &d->vm_event_share, _VPF_mem_sharing,
750 HVM_PARAM_SHARING_RING_PFN,
751 mem_sharing_notification);
752 break;
753
754 case XEN_VM_EVENT_DISABLE:
755 if ( vm_event_check_ring(d->vm_event_share) )
756 {
757 domain_pause(d);
758 rc = vm_event_disable(d, &d->vm_event_share);
759 domain_unpause(d);
760 }
761 break;
762
763 case XEN_VM_EVENT_RESUME:
764 rc = vm_event_resume(d, d->vm_event_share);
765 break;
766
767 default:
768 rc = -ENOSYS;
769 break;
770 }
771 }
772 break;
773 #endif
774
775 default:
776 rc = -ENOSYS;
777 break;
778 }
779
780 return rc;
781 }
782
vm_event_vcpu_pause(struct vcpu * v)783 void vm_event_vcpu_pause(struct vcpu *v)
784 {
785 ASSERT(v == current);
786
787 atomic_inc(&v->vm_event_pause_count);
788 vcpu_pause_nosync(v);
789 }
790
vm_event_vcpu_unpause(struct vcpu * v)791 void vm_event_vcpu_unpause(struct vcpu *v)
792 {
793 int old, new, prev = v->vm_event_pause_count.counter;
794
795 /*
796 * All unpause requests as a result of toolstack responses.
797 * Prevent underflow of the vcpu pause count.
798 */
799 do
800 {
801 old = prev;
802 new = old - 1;
803
804 if ( new < 0 )
805 {
806 printk(XENLOG_G_WARNING
807 "%pv vm_event: Too many unpause attempts\n", v);
808 return;
809 }
810
811 prev = cmpxchg(&v->vm_event_pause_count.counter, old, new);
812 } while ( prev != old );
813
814 vcpu_unpause(v);
815 }
816
817 /*
818 * Local variables:
819 * mode: C
820 * c-file-style: "BSD"
821 * c-basic-offset: 4
822 * indent-tabs-mode: nil
823 * End:
824 */
825