Lines Matching refs:h
1428 void init_hvm_data(struct hvm_data *h, struct vcpu_data *v) { in init_hvm_data() argument
1431 if(h->init) in init_hvm_data()
1434 h->v = v; in init_hvm_data()
1436 h->init = 1; in init_hvm_data()
1439 h->exit_reason_max = HVM_SVM_EXIT_REASON_MAX; in init_hvm_data()
1440 h->exit_reason_name = hvm_svm_exit_reason_name; in init_hvm_data()
1442 h->exit_reason_max = HVM_VMX_EXIT_REASON_MAX; in init_hvm_data()
1443 h->exit_reason_name = hvm_vmx_exit_reason_name; in init_hvm_data()
1449 h->summary.extint_histogram = malloc(size); in init_hvm_data()
1450 if(h->summary.extint_histogram) in init_hvm_data()
1451 bzero(h->summary.extint_histogram, size); in init_hvm_data()
1460 h->summary.guest_interrupt[i].count=0; in init_hvm_data()
2663 void interval_domain_guest_interrupt(struct hvm_data *h, int vector) { in interval_domain_guest_interrupt() argument
2664 struct domain_data *d = h->v->d; in interval_domain_guest_interrupt()
2929 void hvm_update_short_summary(struct hvm_data *h, int element) { in hvm_update_short_summary() argument
2930 struct vcpu_data *v = h->v; in hvm_update_short_summary()
2933 update_cycles(&v->cr3.data->hvm.s[element], h->arc_cycles); in hvm_update_short_summary()
2935 update_cycles(&v->d->hvm_short.s[element], h->arc_cycles); in hvm_update_short_summary()
2937 h->short_summary_done=1; in hvm_update_short_summary()
2965 int __hvm_set_summary_handler(struct hvm_data *h, void (*s)(struct hvm_data *h, void*d), void*d) { in __hvm_set_summary_handler() argument
2967 if(h->exit_reason < h->exit_reason_max) in __hvm_set_summary_handler()
2973 q=&h->exit_reason_summary_handler_list[h->exit_reason]; in __hvm_set_summary_handler()
3005 void hvm_generic_postprocess(struct hvm_data *h);
3007 static int hvm_set_postprocess(struct hvm_data *h, void (*s)(struct hvm_data *h)) in hvm_set_postprocess() argument
3009 if ( h->post_process == NULL in hvm_set_postprocess()
3010 || h->post_process == hvm_generic_postprocess ) in hvm_set_postprocess()
3012 h->post_process = s; in hvm_set_postprocess()
3029 void hvm_pf_xen_summary(struct hvm_data *h, void *d) { in hvm_pf_xen_summary() argument
3037 PRINT_SUMMARY(h->summary.pf_xen[i], in hvm_pf_xen_summary()
3042 PRINT_SUMMARY(h->summary.pf_xen[i], in hvm_pf_xen_summary()
3048 PRINT_SUMMARY(h->summary.pf_xen_non_emul[j], in hvm_pf_xen_summary()
3053 PRINT_SUMMARY(h->summary.pf_xen_emul[j], in hvm_pf_xen_summary()
3058 PRINT_SUMMARY(h->summary.pf_xen_emul_early_unshadow[k], in hvm_pf_xen_summary()
3066 PRINT_SUMMARY(h->summary.pf_xen_fixup[j], in hvm_pf_xen_summary()
3070 PRINT_SUMMARY(h->summary.pf_xen_fixup_unsync_resync[k], in hvm_pf_xen_summary()
3073 PRINT_SUMMARY(h->summary.pf_xen_fixup_unsync_resync[k], in hvm_pf_xen_summary()
3229 void hvm_pf_xen_preprocess(unsigned event, struct hvm_data *h) { in hvm_pf_xen_preprocess() argument
3230 struct pf_xen_extra *e = &h->inflight.pf_xen; in hvm_pf_xen_preprocess()
3231 struct mmio_info *m = &h->inflight.mmio; in hvm_pf_xen_preprocess()
3232 struct hvm_pf_xen_record *r = (typeof(r))h->d; in hvm_pf_xen_preprocess()
3252 pf_preprocess(e, h->v->guest_paging_levels); in hvm_pf_xen_preprocess()
3282 void hvm_pf_xen_postprocess(struct hvm_data *h) { in hvm_pf_xen_postprocess() argument
3283 struct pf_xen_extra *e = &h->inflight.pf_xen; in hvm_pf_xen_postprocess()
3287 update_cycles(&h->summary.pf_xen[e->pf_case], in hvm_pf_xen_postprocess()
3288 h->arc_cycles); in hvm_pf_xen_postprocess()
3294 update_eip(&h->v->d->emulate_eip_list, in hvm_pf_xen_postprocess()
3295 h->rip, in hvm_pf_xen_postprocess()
3296 h->arc_cycles, in hvm_pf_xen_postprocess()
3300 if(is_kernel(h->v->guest_paging_levels, h->rip)) in hvm_pf_xen_postprocess()
3301 update_cycles(&h->summary.pf_xen_non_emul[PF_XEN_NON_EMUL_EIP_KERNEL], in hvm_pf_xen_postprocess()
3302 h->arc_cycles); in hvm_pf_xen_postprocess()
3304 update_cycles(&h->summary.pf_xen_non_emul[PF_XEN_NON_EMUL_EIP_USER], in hvm_pf_xen_postprocess()
3305 h->arc_cycles); in hvm_pf_xen_postprocess()
3306 if(is_kernel(h->v->guest_paging_levels, e->va)) in hvm_pf_xen_postprocess()
3307 update_cycles(&h->summary.pf_xen_non_emul[PF_XEN_NON_EMUL_VA_KERNEL], in hvm_pf_xen_postprocess()
3308 h->arc_cycles); in hvm_pf_xen_postprocess()
3311 update_cycles(&h->summary.pf_xen_non_emul[PF_XEN_NON_EMUL_VA_USER], in hvm_pf_xen_postprocess()
3312 h->arc_cycles); in hvm_pf_xen_postprocess()
3316 hvm_set_summary_handler(h, hvm_pf_xen_summary, NULL); in hvm_pf_xen_postprocess()
3320 void hvm_pf_xen_process(struct record_info *ri, struct hvm_data *h) { in hvm_pf_xen_process() argument
3321 struct pf_xen_extra *e = &h->inflight.pf_xen; in hvm_pf_xen_process()
3324 && h->v->guest_paging_levels != 4) in hvm_pf_xen_process()
3326 h->v->guest_paging_levels); in hvm_pf_xen_process()
3328 && h->v->guest_paging_levels == 4) in hvm_pf_xen_process()
3330 h->v->guest_paging_levels); in hvm_pf_xen_process()
3332 hvm_pf_xen_preprocess(ri->event, h); in hvm_pf_xen_process()
3351 if ( hvm_set_postprocess(h, hvm_pf_xen_postprocess) ) in hvm_pf_xen_process()
3392 update_cycles(&h->summary.ipi_latency, lat); in hvm_vlapic_vmentry_cleanup()
3393 h->summary.ipi_count[vla->outstanding_ipis]++; in hvm_vlapic_vmentry_cleanup()
3474 void hvm_vlapic_icr_handler(struct hvm_data *h) in hvm_vlapic_icr_handler() argument
3476 struct mmio_info *m = &h->inflight.mmio; in hvm_vlapic_icr_handler()
3495 h->v->d->did, h->v->vid, in hvm_vlapic_icr_handler()
3502 struct vcpu_data *ov, *v = h->v; in hvm_vlapic_icr_handler()
3526 h->v->d->did, h->v->vid, in hvm_vlapic_icr_handler()
3551 void hvm_vlapic_eoi_handler(struct hvm_data *h) { in hvm_vlapic_eoi_handler() argument
3554 h->v->d->did, h->v->vid); in hvm_vlapic_eoi_handler()
3557 void hvm_vlapic_handler(struct hvm_data *h) in hvm_vlapic_handler() argument
3559 struct mmio_info *m = &h->inflight.mmio; in hvm_vlapic_handler()
3563 hvm_vlapic_icr_handler(h); in hvm_vlapic_handler()
3566 hvm_vlapic_eoi_handler(h); in hvm_vlapic_handler()
3575 void enumerate_mmio(struct hvm_data *h) in enumerate_mmio() argument
3577 struct mmio_info *m = &h->inflight.mmio; in enumerate_mmio()
3589 update_io_address(&h->summary.io.mmio, m->gpa, m->is_write, h->arc_cycles, m->va); in enumerate_mmio()
3592 void hvm_mmio_summary(struct hvm_data *h, void *data) in hvm_mmio_summary() argument
3596 PRINT_SUMMARY(h->summary.mmio[reason], in hvm_mmio_summary()
3600 void hvm_mmio_assist_postprocess(struct hvm_data *h) in hvm_mmio_assist_postprocess() argument
3604 switch(h->exit_reason) in hvm_mmio_assist_postprocess()
3609 hvm_set_summary_handler(h, hvm_mmio_summary, (void *)reason); in hvm_mmio_assist_postprocess()
3613 hvm_set_summary_handler(h, hvm_mmio_summary, (void *)reason); in hvm_mmio_assist_postprocess()
3621 __func__, h->exit_reason); in hvm_mmio_assist_postprocess()
3625 hvm_set_summary_handler(h, hvm_mmio_summary, (void *)reason); in hvm_mmio_assist_postprocess()
3632 update_cycles(&h->summary.mmio[reason], in hvm_mmio_assist_postprocess()
3633 h->arc_cycles); in hvm_mmio_assist_postprocess()
3637 enumerate_mmio(h); in hvm_mmio_assist_postprocess()
3641 void hvm_mmio_assist_process(struct record_info *ri, struct hvm_data *h) in hvm_mmio_assist_process() argument
3643 struct mmio_info *m = &h->inflight.mmio; in hvm_mmio_assist_process()
3653 } *r = (typeof(r))h->d; in hvm_mmio_assist_process()
3691 hvm_vlapic_handler(h); in hvm_mmio_assist_process()
3695 hvm_set_postprocess(h, hvm_mmio_assist_postprocess); in hvm_mmio_assist_process()
3698 void hvm_inj_virq_process(struct record_info *ri, struct hvm_data *h) { in hvm_inj_virq_process() argument
3701 } *r = (typeof(r))h->d; in hvm_inj_virq_process()
3715 h->summary.guest_interrupt[vector].count++; in hvm_inj_virq_process()
3718 interval_domain_guest_interrupt(h, vector); in hvm_inj_virq_process()
3724 if ( h->w2h.waking && h->w2h.vector == 0 ) { in hvm_inj_virq_process()
3725 if(h->summary.guest_interrupt[vector].start_tsc) { in hvm_inj_virq_process()
3727 h->v->d->did, h->v->vid, in hvm_inj_virq_process()
3729 h->summary.guest_interrupt[vector].start_tsc); in hvm_inj_virq_process()
3732 if(h->w2h.interrupts) in hvm_inj_virq_process()
3734 h->w2h.interrupts); in hvm_inj_virq_process()
3738 h->v->d->did, h->v->vid, vector); in hvm_inj_virq_process()
3742 h->w2h.vector = FAKE_VECTOR; in hvm_inj_virq_process()
3744 h->w2h.vector = vector; in hvm_inj_virq_process()
3745 h->summary.guest_interrupt[vector].is_wake = 1; in hvm_inj_virq_process()
3748 if( h->summary.guest_interrupt[vector].start_tsc == 0 ) { in hvm_inj_virq_process()
3750 h->summary.guest_interrupt[vector].start_tsc = 1; in hvm_inj_virq_process()
3751 h->w2h.interrupts_wanting_tsc++; in hvm_inj_virq_process()
3752 h->w2h.interrupts++; in hvm_inj_virq_process()
3756 h->v->d->did, h->v->vid, vector); in hvm_inj_virq_process()
3760 hvm_vlapic_inject(h->v, r->vector); in hvm_inj_virq_process()
3823 void hvm_io_write_postprocess(struct hvm_data *h) in hvm_io_write_postprocess() argument
3826 update_io_address(&h->summary.io.pio, h->inflight.io.port, 1, h->arc_cycles, 0); in hvm_io_write_postprocess()
3829 void hvm_io_read_postprocess(struct hvm_data *h) in hvm_io_read_postprocess() argument
3832 update_io_address(&h->summary.io.pio, h->inflight.io.port, 0, h->arc_cycles, 0); in hvm_io_read_postprocess()
3833 if(opt.scatterplot_io && h->inflight.io.port == opt.scatterplot_io_port) in hvm_io_read_postprocess()
3834 scatterplot_vs_time(h->exit_tsc, P.now - h->exit_tsc); in hvm_io_read_postprocess()
3837 void hvm_io_assist_process(struct record_info *ri, struct hvm_data *h) in hvm_io_assist_process() argument
3844 } *r = (typeof(r))h->d; in hvm_io_assist_process()
3861 h->inflight.io.port = r->x32.port; in hvm_io_assist_process()
3862 h->inflight.io.val = r->x32.data; in hvm_io_assist_process()
3865 h->inflight.io.is_write = 1; in hvm_io_assist_process()
3866 if ( hvm_set_postprocess(h, hvm_io_write_postprocess) ) in hvm_io_assist_process()
3869 h->inflight.io.is_write = 0; in hvm_io_assist_process()
3870 if ( hvm_set_postprocess(h, hvm_io_read_postprocess) ) in hvm_io_assist_process()
3886 void cr3_switch(unsigned long long val, struct hvm_data *h) { in cr3_switch() argument
3887 struct vcpu_data *v = h->v; in cr3_switch()
3892 if ( !h->init ) in cr3_switch()
3949 h->inflight.cr_write.repromote = 1; in cr3_switch()
3963 scatterplot_vs_time(h->exit_tsc, in cr3_switch()
3968 scatterplot_vs_time(h->exit_tsc, gmfn); in cr3_switch()
4073 void hvm_cr3_write_summary(struct hvm_data *h) { in hvm_cr3_write_summary() argument
4077 PRINT_SUMMARY(h->summary.cr3_write_resyncs[j], in hvm_cr3_write_summary()
4079 PRINT_SUMMARY(h->summary.cr3_write_resyncs[j], in hvm_cr3_write_summary()
4083 void hvm_cr_write_summary(struct hvm_data *h, void *data) in hvm_cr_write_summary() argument
4087 PRINT_SUMMARY(h->summary.cr_write[cr], in hvm_cr_write_summary()
4090 hvm_cr3_write_summary(h); in hvm_cr_write_summary()
4093 void hvm_cr_write_postprocess(struct hvm_data *h) in hvm_cr_write_postprocess() argument
4095 if(h->inflight.cr_write.cr == 3) { in hvm_cr_write_postprocess()
4096 struct vcpu_data *v = h->v; in hvm_cr_write_postprocess()
4097 unsigned long long new_val = h->inflight.cr_write.val; in hvm_cr_write_postprocess()
4115 int resyncs = h->resyncs; in hvm_cr_write_postprocess()
4120 update_cycles(&h->summary.cr3_write_resyncs[resyncs], in hvm_cr_write_postprocess()
4121 h->arc_cycles); in hvm_cr_write_postprocess()
4123 update_cycles(&h->summary.cr_write[3], in hvm_cr_write_postprocess()
4124 h->arc_cycles); in hvm_cr_write_postprocess()
4126 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_CR3); in hvm_cr_write_postprocess()
4130 cr3_switch(new_val, h); in hvm_cr_write_postprocess()
4134 if(h->inflight.cr_write.cr < CR_MAX) in hvm_cr_write_postprocess()
4135 update_cycles(&h->summary.cr_write[h->inflight.cr_write.cr], in hvm_cr_write_postprocess()
4136 h->arc_cycles); in hvm_cr_write_postprocess()
4143 if(h->exit_reason < h->exit_reason_max) in hvm_cr_write_postprocess()
4146 switch(h->inflight.cr_write.cr) in hvm_cr_write_postprocess()
4150 hvm_set_summary_handler(h, hvm_cr_write_summary, (void *)(_x)); \ in hvm_cr_write_postprocess()
4170 fprintf(stderr, "Unexpected cr: %d\n", h->inflight.cr_write.cr); in hvm_cr_write_postprocess()
4177 void hvm_cr_write_process(struct record_info *ri, struct hvm_data *h) in hvm_cr_write_process() argument
4188 } *r = (typeof(r))h->d; in hvm_cr_write_process()
4193 h->inflight.cr_write.cr = cr = r->x64.cr; in hvm_cr_write_process()
4194 h->inflight.cr_write.val = val = r->x64.val; in hvm_cr_write_process()
4196 h->inflight.cr_write.cr = cr = r->x32.cr; in hvm_cr_write_process()
4197 h->inflight.cr_write.val = val = r->x32.val; in hvm_cr_write_process()
4202 if ( hvm_set_postprocess(h, hvm_cr_write_postprocess) ) in hvm_cr_write_process()
4208 if(cr == 3 && h->v->cr3.val) { in hvm_cr_write_process()
4212 h->v->cr3.val, in hvm_cr_write_process()
4213 (h->v->cr3.val == val)?"flush":"switch"); in hvm_cr_write_process()
4225 void hvm_msr_write_summary(struct hvm_data *h, void *d) in hvm_msr_write_summary() argument
4229 void hvm_msr_write_postprocess(struct hvm_data *h) in hvm_msr_write_postprocess() argument
4235 hvm_set_summary_handler(h, hvm_msr_write_summary, NULL); in hvm_msr_write_postprocess()
4238 void hvm_msr_write_process(struct record_info *ri, struct hvm_data *h) in hvm_msr_write_process() argument
4243 } __attribute__((packed)) *r = (typeof(r))h->d; in hvm_msr_write_process()
4248 h->inflight.msr.addr = r->addr; in hvm_msr_write_process()
4249 h->inflight.msr.val = r->val; in hvm_msr_write_process()
4258 if ( hvm_set_postprocess(h, hvm_msr_write_postprocess) ) in hvm_msr_write_process()
4263 void hvm_msr_read_summary(struct hvm_data *h, void *d) in hvm_msr_read_summary() argument
4267 void hvm_msr_read_postprocess(struct hvm_data *h) in hvm_msr_read_postprocess() argument
4273 hvm_set_summary_handler(h, hvm_msr_read_summary, NULL); in hvm_msr_read_postprocess()
4276 void hvm_msr_read_process(struct record_info *ri, struct hvm_data *h) in hvm_msr_read_process() argument
4281 } __attribute__((packed)) *r = (typeof(r))h->d; in hvm_msr_read_process()
4286 h->inflight.msr.addr = r->addr; in hvm_msr_read_process()
4287 h->inflight.msr.val = r->val; in hvm_msr_read_process()
4296 if ( hvm_set_postprocess(h, hvm_msr_read_postprocess) ) in hvm_msr_read_process()
4300 void hvm_vmcall_summary(struct hvm_data *h, void *d) in hvm_vmcall_summary() argument
4306 PRINT_SUMMARY(h->summary.vmcall[i], in hvm_vmcall_summary()
4309 PRINT_SUMMARY(h->summary.vmcall[HYPERCALL_MAX], in hvm_vmcall_summary()
4313 void hvm_vmcall_postprocess(struct hvm_data *h) in hvm_vmcall_postprocess() argument
4315 unsigned eax = h->inflight.vmcall.eax ; in hvm_vmcall_postprocess()
4320 update_cycles(&h->summary.vmcall[eax], in hvm_vmcall_postprocess()
4321 h->arc_cycles); in hvm_vmcall_postprocess()
4323 update_cycles(&h->summary.vmcall[HYPERCALL_MAX], in hvm_vmcall_postprocess()
4324 h->arc_cycles); in hvm_vmcall_postprocess()
4325 hvm_set_summary_handler(h, hvm_vmcall_summary, NULL); in hvm_vmcall_postprocess()
4329 void hvm_vmcall_process(struct record_info *ri, struct hvm_data *h) in hvm_vmcall_process() argument
4333 } *r = (typeof(r))h->d; in hvm_vmcall_process()
4347 h->inflight.vmcall.eax = r->eax; in hvm_vmcall_process()
4349 if ( hvm_set_postprocess(h, hvm_vmcall_postprocess) ) in hvm_vmcall_process()
4353 void hvm_inj_exc_process(struct record_info *ri, struct hvm_data *h) in hvm_inj_exc_process() argument
4357 } *r = (typeof(r))h->d; in hvm_inj_exc_process()
4373 void hvm_intr_summary(struct hvm_data *h, void *d) in hvm_intr_summary() argument
4378 if(h->summary.extint[i]) in hvm_intr_summary()
4384 h->summary.extint[i]); in hvm_intr_summary()
4388 h->summary.extint[i]); in hvm_intr_summary()
4390 if(h->summary.extint[EXTERNAL_INTERRUPT_MAX]) in hvm_intr_summary()
4392 h->summary.extint[EXTERNAL_INTERRUPT_MAX]); in hvm_intr_summary()
4396 void hvm_intr_process(struct record_info *ri, struct hvm_data *h) in hvm_intr_process() argument
4398 unsigned vec = *(unsigned *)h->d; in hvm_intr_process()
4404 if( (h->rip >> ADDR_SPACE_BITS) != 00 in hvm_intr_process()
4405 && (h->rip >> ADDR_SPACE_BITS) != ((0ULL-1)>> ADDR_SPACE_BITS) ) { in hvm_intr_process()
4407 h->rip, in hvm_intr_process()
4408 h->rip >> ADDR_SPACE_BITS); in hvm_intr_process()
4413 h->inflight.intr.vec = vec; in hvm_intr_process()
4433 unsigned long long rip = h->rip & ((1ULL << ADDR_SPACE_BITS)-1); in hvm_intr_process()
4436 abs_cycles_to_time(h->exit_tsc, &t); in hvm_intr_process()
4438 h->v->d->did, h->v->vid, in hvm_intr_process()
4447 unsigned long long rip = h->rip & ((1ULL << ADDR_SPACE_BITS)-1); in hvm_intr_process()
4450 h->summary.extint_histogram[index]++; in hvm_intr_process()
4457 unsigned long long rip = h->rip & ((1ULL << ADDR_SPACE_BITS)-1); in hvm_intr_process()
4460 update_eip(&h->v->d->interrupt_eip_list, rip, 0, 0, NULL); in hvm_intr_process()
4465 h->post_process = NULL; in hvm_intr_process()
4469 hvm_set_summary_handler(h, hvm_intr_summary, NULL); in hvm_intr_process()
4472 h->summary.extint[vec]++; in hvm_intr_process()
4474 h->summary.extint[EXTERNAL_INTERRUPT_MAX]++; in hvm_intr_process()
4479 void hvm_intr_window_process(struct record_info *ri, struct hvm_data *h) in hvm_intr_window_process() argument
4485 } *r = (typeof(r))h->d; in hvm_intr_window_process()
4512 void hvm_pf_inject_process(struct record_info *ri, struct hvm_data *h) in hvm_pf_inject_process() argument
4523 } *r = (typeof(r))h->d; in hvm_pf_inject_process()
4546 void hvm_generic_postprocess_init(struct record_info *ri, struct hvm_data *h);
4548 void hvm_npf_process(struct record_info *ri, struct hvm_data *h) in hvm_npf_process() argument
4555 } *r = (typeof(r))h->d; in hvm_npf_process()
4564 hvm_generic_postprocess_init(ri, h); in hvm_npf_process()
4567 void hvm_rdtsc_process(struct record_info *ri, struct hvm_data *h) in hvm_rdtsc_process() argument
4571 } *r = (typeof(r))h->d; in hvm_rdtsc_process()
4578 h->last_rdtsc > r->tsc ? "BACKWARDS" : ""); in hvm_rdtsc_process()
4587 h->v->d->did, h->v->vid, in hvm_rdtsc_process()
4592 h->last_rdtsc = r->tsc; in hvm_rdtsc_process()
4595 void hvm_generic_summary(struct hvm_data *h, void *data) in hvm_generic_summary() argument
4601 PRINT_SUMMARY(h->summary.generic[evt], in hvm_generic_summary()
4606 void hvm_generic_postprocess_init(struct record_info *ri, struct hvm_data *h) in hvm_generic_postprocess_init() argument
4608 if ( h->post_process != hvm_generic_postprocess ) in hvm_generic_postprocess_init()
4611 h->inflight.generic.event = ri->event; in hvm_generic_postprocess_init()
4612 bcopy(h->d, h->inflight.generic.d, sizeof(unsigned int) * 4); in hvm_generic_postprocess_init()
4615 void hvm_generic_postprocess(struct hvm_data *h) in hvm_generic_postprocess() argument
4620 if ( h->inflight.generic.event ) in hvm_generic_postprocess()
4621 evt = (h->inflight.generic.event - TRC_HVM_HANDLER) in hvm_generic_postprocess()
4631 switch(h->exit_reason) in hvm_generic_postprocess()
4643 if ( !warned[h->exit_reason] ) in hvm_generic_postprocess()
4647 __func__, h->exit_reason, in hvm_generic_postprocess()
4648 (h->exit_reason > h->exit_reason_max) in hvm_generic_postprocess()
4650 : h->exit_reason_name[h->exit_reason]); in hvm_generic_postprocess()
4651 warned[h->exit_reason]=1; in hvm_generic_postprocess()
4658 __func__, evt, h->inflight.generic.event); in hvm_generic_postprocess()
4664 update_cycles(&h->summary.generic[evt], in hvm_generic_postprocess()
4665 h->arc_cycles); in hvm_generic_postprocess()
4671 if ( registered[evt] != h->exit_reason+1 && !warned[h->exit_reason]) in hvm_generic_postprocess()
4674 __func__, evt, registered[evt]-1, h->exit_reason); in hvm_generic_postprocess()
4675 warned[h->exit_reason]=1; in hvm_generic_postprocess()
4681 if((ret=__hvm_set_summary_handler(h, hvm_generic_summary, (void *)evt))) in hvm_generic_postprocess()
4684 registered[evt]=h->exit_reason+1; in hvm_generic_postprocess()
4730 void hvm_handler_process(struct record_info *ri, struct hvm_data *h) { in hvm_handler_process() argument
4732 if(!h->init) in hvm_handler_process()
4739 h->d = ri->d; in hvm_handler_process()
4748 hvm_pf_inject_process(ri, h); in hvm_handler_process()
4755 *(unsigned*)h->d); in hvm_handler_process()
4759 hvm_inj_exc_process(ri, h); in hvm_handler_process()
4762 hvm_inj_virq_process(ri, h); in hvm_handler_process()
4765 hvm_intr_window_process(ri, h); in hvm_handler_process()
4768 if(h->v->cr3.data) { in hvm_handler_process()
4769 struct cr3_value_struct *cur = h->v->cr3.data; in hvm_handler_process()
4780 ri->t.s, ri->t.ns, pcpu_string(ri->cpu), h->v->cr3.val); in hvm_handler_process()
4789 if(!h->vmexit_valid) in hvm_handler_process()
4797 h->event_handler = ri->event - TRC_HVM_HANDLER; in hvm_handler_process()
4802 hvm_intr_process(ri, h); in hvm_handler_process()
4806 hvm_pf_xen_process(ri, h); in hvm_handler_process()
4810 hvm_io_assist_process(ri, h); in hvm_handler_process()
4816 hvm_mmio_assist_process(ri, h); in hvm_handler_process()
4820 hvm_cr_write_process(ri, h); in hvm_handler_process()
4823 hvm_msr_write_process(ri, h); in hvm_handler_process()
4826 hvm_msr_read_process(ri, h); in hvm_handler_process()
4829 hvm_vmcall_process(ri, h); in hvm_handler_process()
4832 hvm_npf_process(ri, h); in hvm_handler_process()
4835 hvm_rdtsc_process(ri, h); in hvm_handler_process()
4858 hvm_generic_postprocess_init(ri, h); in hvm_handler_process()
5032 void hvm_vmexit_process(struct record_info *ri, struct hvm_data *h, in hvm_vmexit_process() argument
5060 if(!h->init) in hvm_vmexit_process()
5061 init_hvm_data(h, v); in hvm_vmexit_process()
5063 h->vmexit_valid=1; in hvm_vmexit_process()
5064 bzero(&h->inflight, sizeof(h->inflight)); in hvm_vmexit_process()
5077 h->rip = r->x64.rip; in hvm_vmexit_process()
5078 h->exit_reason = r->x64.exit_reason; in hvm_vmexit_process()
5093 h->rip = r->x32.eip; in hvm_vmexit_process()
5094 h->exit_reason = r->x32.exit_reason; in hvm_vmexit_process()
5098 scatterplot_vs_time(ri->tsc, h->rip); in hvm_vmexit_process()
5100 if(h->exit_reason > h->exit_reason_max) in hvm_vmexit_process()
5103 (unsigned int)h->exit_reason, in hvm_vmexit_process()
5104 (unsigned int)h->exit_reason_max); in hvm_vmexit_process()
5110 if ( h->exit_reason < h->exit_reason_max in hvm_vmexit_process()
5111 && h->exit_reason_name[h->exit_reason] != NULL) in hvm_vmexit_process()
5114 h->exit_reason_name[h->exit_reason], in hvm_vmexit_process()
5115 h->rip, in hvm_vmexit_process()
5116 find_symbol(h->rip)); in hvm_vmexit_process()
5120 h->exit_reason, in hvm_vmexit_process()
5121 h->rip, in hvm_vmexit_process()
5122 find_symbol(h->rip)); in hvm_vmexit_process()
5125 if(h->v->cr3.data && h->entry_tsc) { in hvm_vmexit_process()
5126 update_cycles(&h->v->cr3.data->guest_time, in hvm_vmexit_process()
5127 ri->tsc - h->entry_tsc); in hvm_vmexit_process()
5128 h->v->cr3.data->run_time += (ri->tsc - h->entry_tsc); in hvm_vmexit_process()
5131 h->exit_tsc = ri->tsc; in hvm_vmexit_process()
5132 h->entry_tsc = 0; in hvm_vmexit_process()
5133 h->resyncs = 0; in hvm_vmexit_process()
5134 h->prealloc_unpin = 0; in hvm_vmexit_process()
5135 h->wrmap_bf = 0; in hvm_vmexit_process()
5136 h->short_summary_done = 0; in hvm_vmexit_process()
5138 h->post_process = hvm_generic_postprocess; in hvm_vmexit_process()
5139 h->inflight.generic.event = 0; in hvm_vmexit_process()
5142 void hvm_close_vmexit(struct hvm_data *h, tsc_t tsc) { in hvm_close_vmexit() argument
5144 if(h->exit_tsc) { in hvm_close_vmexit()
5145 if(h->exit_tsc > tsc) in hvm_close_vmexit()
5146 h->arc_cycles = 0; in hvm_close_vmexit()
5148 h->arc_cycles = tsc - h->exit_tsc; in hvm_close_vmexit()
5151 update_cycles(&h->summary.exit_reason[h->exit_reason], in hvm_close_vmexit()
5152 h->arc_cycles); in hvm_close_vmexit()
5153 h->summary_info = 1; in hvm_close_vmexit()
5157 && h->exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT in hvm_close_vmexit()
5158 && h->inflight.intr.vec == opt.scatterplot_extint_cycles_vector ) in hvm_close_vmexit()
5165 h->v->d->did, in hvm_close_vmexit()
5166 h->v->vid, in hvm_close_vmexit()
5168 h->arc_cycles); in hvm_close_vmexit()
5173 if(h->post_process) in hvm_close_vmexit()
5174 (h->post_process)(h); in hvm_close_vmexit()
5176 if(h->arc_cycles) { in hvm_close_vmexit()
5177 if(opt.summary_info && !h->short_summary_done) { in hvm_close_vmexit()
5178 switch(h->event_handler) { in hvm_close_vmexit()
5180 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_VMCALL); in hvm_close_vmexit()
5183 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_INTERRUPT); in hvm_close_vmexit()
5186 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_HLT); in hvm_close_vmexit()
5189 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_OTHER); in hvm_close_vmexit()
5195 if(h->v->cr3.data) { in hvm_close_vmexit()
5196 h->v->cr3.data->run_time += h->arc_cycles; in hvm_close_vmexit()
5199 update_cycles(&h->v->cr3.data->hv_time, in hvm_close_vmexit()
5200 h->arc_cycles); in hvm_close_vmexit()
5204 h->exit_tsc = 0; in hvm_close_vmexit()
5205 h->vmexit_valid = 0; in hvm_close_vmexit()
5206 h->post_process = NULL; in hvm_close_vmexit()
5210 void hvm_vmentry_process(struct record_info *ri, struct hvm_data *h) { in hvm_vmentry_process() argument
5211 if(!h->init) in hvm_vmentry_process()
5222 hvm_vlapic_vmentry_cleanup(h->v, ri->tsc); in hvm_vmentry_process()
5224 if(h->w2h.waking && opt.dump_all) in hvm_vmentry_process()
5226 h->v->d->did, h->v->vid); in hvm_vmentry_process()
5228 h->w2h.waking = 0; in hvm_vmentry_process()
5230 if ( h->w2h.interrupts_wanting_tsc ) { in hvm_vmentry_process()
5234 if ( h->summary.guest_interrupt[i].start_tsc == 1 ) in hvm_vmentry_process()
5238 h->v->d->did, h->v->vid, i, ri->tsc); in hvm_vmentry_process()
5239 h->summary.guest_interrupt[i].start_tsc = ri->tsc; in hvm_vmentry_process()
5240 h->w2h.interrupts_wanting_tsc--; in hvm_vmentry_process()
5241 if ( h->w2h.interrupts_wanting_tsc == 0 ) in hvm_vmentry_process()
5247 if(!h->vmexit_valid) in hvm_vmentry_process()
5256 unsigned long long arc_cycles = ri->tsc - h->exit_tsc; in hvm_vmentry_process()
5261 hvm_close_vmexit(h, ri->tsc); in hvm_vmentry_process()
5262 h->entry_tsc = ri->tsc; in hvm_vmentry_process()
5269 struct hvm_data *h = &v->hvm; in hvm_process() local
5279 hvm_handler_process(ri, h); in hvm_process()
5288 hvm_vmexit_process(ri, h, v); in hvm_process()
5300 void hvm_summary(struct hvm_data *h) { in hvm_summary() argument
5303 if(!h->summary_info) in hvm_summary()
5307 for(i=0; i<h->exit_reason_max; i++) { in hvm_summary()
5310 if ( h->exit_reason_name[i] ) in hvm_summary()
5311 PRINT_SUMMARY(h->summary.exit_reason[i], in hvm_summary()
5312 " %-20s ", h->exit_reason_name[i]); in hvm_summary()
5314 PRINT_SUMMARY(h->summary.exit_reason[i], in hvm_summary()
5317 p=h->exit_reason_summary_handler_list[i]; in hvm_summary()
5320 p->handler(h, p->data); in hvm_summary()
5327 if(h->summary.guest_interrupt[i].count) { in hvm_summary()
5330 i, h->summary.guest_interrupt[i].count); in hvm_summary()
5334 print_cycle_summary(h->summary.guest_interrupt[i].runtime+j, desc); in hvm_summary()
5337 if(h->summary.guest_interrupt[i].count) in hvm_summary()
5339 i, h->summary.guest_interrupt[i].count); in hvm_summary()
5346 if(h->summary.extint_histogram[i]) in hvm_summary()
5351 h->summary.extint_histogram[i]); in hvm_summary()
5355 PRINT_SUMMARY(h->summary.ipi_latency, in hvm_summary()
5358 if(h->summary.ipi_count[i]) in hvm_summary()
5360 i, h->summary.ipi_count[i]); in hvm_summary()
5361 hvm_io_address_summary(h->summary.io.pio, "IO address summary:"); in hvm_summary()
5362 hvm_io_address_summary(h->summary.io.mmio, "MMIO address summary:"); in hvm_summary()
5416 void shadow_emulate_postprocess(struct hvm_data *h) in shadow_emulate_postprocess() argument
5418 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_emulate_postprocess()
5422 update_eip(&h->v->d->emulate_eip_list, in shadow_emulate_postprocess()
5423 h->rip, in shadow_emulate_postprocess()
5424 h->arc_cycles, in shadow_emulate_postprocess()
5426 update_cycles(&h->summary.pf_xen[PF_XEN_EMULATE], h->arc_cycles); in shadow_emulate_postprocess()
5427 update_cycles(&h->summary.pf_xen_emul[e->pt_level], h->arc_cycles); in shadow_emulate_postprocess()
5428 if(h->prealloc_unpin) in shadow_emulate_postprocess()
5429 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_PREALLOC_UNPIN], h->arc_cycles); in shadow_emulate_postprocess()
5431 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_PREALLOC_UNHOOK], h->arc_cycles); in shadow_emulate_postprocess()
5433 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_EARLY_UNSHADOW], h->arc_cycles); in shadow_emulate_postprocess()
5435 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_SET_CHANGED], h->arc_cycles); in shadow_emulate_postprocess()
5437 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_SET_UNCHANGED], h->arc_cycles); in shadow_emulate_postprocess()
5439 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_SET_FLUSH], h->arc_cycles); in shadow_emulate_postprocess()
5441 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_SET_ERROR], h->arc_cycles); in shadow_emulate_postprocess()
5443 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_PROMOTE], h->arc_cycles); in shadow_emulate_postprocess()
5445 update_cycles(&h->summary.pf_xen_emul[PF_XEN_EMUL_DEMOTE], h->arc_cycles); in shadow_emulate_postprocess()
5448 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_EMULATE); in shadow_emulate_postprocess()
5453 scatterplot_vs_time(h->exit_tsc, -10); in shadow_emulate_postprocess()
5454 if(h->prealloc_unpin) in shadow_emulate_postprocess()
5455 scatterplot_vs_time(h->exit_tsc, 0); in shadow_emulate_postprocess()
5458 if(h->v->cr3.data) in shadow_emulate_postprocess()
5459 scatterplot_vs_time(h->exit_tsc, h->v->cr3.data->cr3_id); in shadow_emulate_postprocess()
5461 scatterplot_vs_time(h->exit_tsc, 2); in shadow_emulate_postprocess()
5468 void shadow_emulate_process(struct record_info *ri, struct hvm_data *h) in shadow_emulate_process() argument
5470 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_emulate_process()
5494 if ( rec_gpl != h->v->guest_paging_levels ) in shadow_emulate_process()
5497 __func__, rec_gpl, h->v->guest_paging_levels); in shadow_emulate_process()
5498 h->v->guest_paging_levels = rec_gpl; in shadow_emulate_process()
5508 __func__, sizeof(r->gpl2), h->v->guest_paging_levels, in shadow_emulate_process()
5523 __func__, sizeof(r->gpl3), h->v->guest_paging_levels, in shadow_emulate_process()
5537 __func__, sizeof(r->gpl4), h->v->guest_paging_levels, in shadow_emulate_process()
5559 if ( hvm_set_postprocess(h, shadow_emulate_postprocess) ) in shadow_emulate_process()
5571 struct hvm_data *h) { in shadow_parse_other() argument
5595 if ( rec_gpl != h->v->guest_paging_levels ) in shadow_parse_other()
5598 __func__, rec_gpl, h->v->guest_paging_levels); in shadow_parse_other()
5599 h->v->guest_paging_levels = rec_gpl; in shadow_parse_other()
5649 void shadow_unsync_postprocess(struct hvm_data *h)
5651 struct pf_xen_extra *e = &h->inflight.pf_xen;
5653 if(h->resyncs > 1)
5655 h->resyncs);
5658 update_cycles(&h->summary.pf_xen[PF_XEN_EMULATE_UNSYNC],
5659 h->arc_cycles);
5660 if(h->resyncs <= 1)
5661 update_cycles(&h->summary.pf_xen_unsync[h->resyncs],
5662 h->arc_cycles);
5667 void shadow_unsync_process(struct record_info *ri, struct hvm_data *h)
5669 struct pf_xen_extra *e = &h->inflight.pf_xen;
5672 shadow_parse_other(ri, &r, h);
5677 pf_preprocess(e, h->v->guest_paging_levels);
5687 if ( hvm_set_postprocess(h, shadow_unsync_postprocess) )
5692 void shadow_fault_generic_postprocess(struct hvm_data *h);
5694 void shadow_emulate_other_process(struct record_info *ri, struct hvm_data *h) in shadow_emulate_other_process() argument
5696 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_emulate_other_process()
5700 shadow_parse_other(ri, &r, h); in shadow_emulate_other_process()
5706 pf_preprocess(e, h->v->guest_paging_levels); in shadow_emulate_other_process()
5715 if ( hvm_set_postprocess(h, shadow_fault_generic_postprocess) ) in shadow_emulate_other_process()
5719 void shadow_fixup_postprocess(struct hvm_data *h) in shadow_fixup_postprocess() argument
5721 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_fixup_postprocess()
5725 update_cycles(&h->summary.pf_xen[PF_XEN_FIXUP], h->arc_cycles); in shadow_fixup_postprocess()
5726 if(h->prealloc_unpin) { in shadow_fixup_postprocess()
5727 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_PREALLOC_UNPIN], h->arc_cycles); in shadow_fixup_postprocess()
5730 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_UNSYNC], h->arc_cycles); in shadow_fixup_postprocess()
5731 if(h->resyncs < PF_XEN_FIXUP_UNSYNC_RESYNC_MAX) in shadow_fixup_postprocess()
5732 update_cycles(&h->summary.pf_xen_fixup_unsync_resync[h->resyncs], in shadow_fixup_postprocess()
5733 h->arc_cycles); in shadow_fixup_postprocess()
5735 … update_cycles(&h->summary.pf_xen_fixup_unsync_resync[PF_XEN_FIXUP_UNSYNC_RESYNC_MAX], in shadow_fixup_postprocess()
5736 h->arc_cycles); in shadow_fixup_postprocess()
5739 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_OOS_ADD], h->arc_cycles); in shadow_fixup_postprocess()
5741 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_OOS_EVICT], h->arc_cycles); in shadow_fixup_postprocess()
5743 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_PROMOTE], h->arc_cycles); in shadow_fixup_postprocess()
5745 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_WRMAP], h->arc_cycles); in shadow_fixup_postprocess()
5746 if(e->flag_wrmap_brute_force || h->wrmap_bf) in shadow_fixup_postprocess()
5747 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_BRUTE_FORCE], h->arc_cycles); in shadow_fixup_postprocess()
5748 } else if(e->flag_wrmap_brute_force || h->wrmap_bf) { in shadow_fixup_postprocess()
5753 if(!(e->flag_promote || h->prealloc_unpin || e->flag_unsync)) in shadow_fixup_postprocess()
5754 update_cycles(&h->summary.pf_xen_fixup[PF_XEN_FIXUP_UPDATE_ONLY], h->arc_cycles); in shadow_fixup_postprocess()
5758 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_UNSYNC); in shadow_fixup_postprocess()
5760 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_FIXUP); in shadow_fixup_postprocess()
5764 if(h->prealloc_unpin) in shadow_fixup_postprocess()
5765 scatterplot_vs_time(h->exit_tsc, 0); in shadow_fixup_postprocess()
5768 if(h->v->cr3.data) in shadow_fixup_postprocess()
5769 scatterplot_vs_time(h->exit_tsc, h->v->cr3.data->cr3_id); in shadow_fixup_postprocess()
5771 scatterplot_vs_time(h->exit_tsc, 2); in shadow_fixup_postprocess()
5776 void shadow_fixup_process(struct record_info *ri, struct hvm_data *h) in shadow_fixup_process() argument
5778 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_fixup_process()
5797 if ( rec_gpl != h->v->guest_paging_levels ) in shadow_fixup_process()
5800 __func__, rec_gpl, h->v->guest_paging_levels); in shadow_fixup_process()
5801 h->v->guest_paging_levels = rec_gpl; in shadow_fixup_process()
5810 __func__, sizeof(r->gpl2), h->v->guest_paging_levels, in shadow_fixup_process()
5823 __func__, sizeof(r->gpl3), h->v->guest_paging_levels, in shadow_fixup_process()
5836 __func__, sizeof(r->gpl4), h->v->guest_paging_levels, in shadow_fixup_process()
5865 if ( hvm_set_postprocess(h, shadow_fixup_postprocess) ) in shadow_fixup_process()
5869 void shadow_mmio_postprocess(struct hvm_data *h) in shadow_mmio_postprocess() argument
5871 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_mmio_postprocess()
5875 update_cycles(&h->summary.pf_xen[e->pf_case], in shadow_mmio_postprocess()
5876 h->arc_cycles); in shadow_mmio_postprocess()
5880 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_MMIO); in shadow_mmio_postprocess()
5884 enumerate_mmio(h); in shadow_mmio_postprocess()
5887 void shadow_mmio_process(struct record_info *ri, struct hvm_data *h) in shadow_mmio_process() argument
5889 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_mmio_process()
5890 struct mmio_info *m = &h->inflight.mmio; in shadow_mmio_process()
5904 if ( rec_gpl != h->v->guest_paging_levels ) in shadow_mmio_process()
5907 __func__, rec_gpl, h->v->guest_paging_levels); in shadow_mmio_process()
5908 h->v->guest_paging_levels = rec_gpl; in shadow_mmio_process()
5918 __func__, sizeof(r->gpl2), h->v->guest_paging_levels, in shadow_mmio_process()
5929 __func__, sizeof(r->gpl4), h->v->guest_paging_levels, in shadow_mmio_process()
5944 if ( hvm_set_postprocess(h, shadow_mmio_postprocess) ) in shadow_mmio_process()
5948 void shadow_propagate_postprocess(struct hvm_data *h) in shadow_propagate_postprocess() argument
5950 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_propagate_postprocess()
5955 update_cycles(&h->summary.pf_xen[e->pf_case], in shadow_propagate_postprocess()
5956 h->arc_cycles); in shadow_propagate_postprocess()
5960 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_PROPAGATE); in shadow_propagate_postprocess()
5964 void shadow_propagate_process(struct record_info *ri, struct hvm_data *h) in shadow_propagate_process() argument
5966 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_propagate_process()
5985 if ( rec_gpl != h->v->guest_paging_levels ) in shadow_propagate_process()
5988 __func__, rec_gpl, h->v->guest_paging_levels); in shadow_propagate_process()
5989 h->v->guest_paging_levels = rec_gpl; in shadow_propagate_process()
5998 __func__, sizeof(r->gpl2), h->v->guest_paging_levels, in shadow_propagate_process()
6011 __func__, sizeof(r->gpl3), h->v->guest_paging_levels, in shadow_propagate_process()
6024 __func__, sizeof(r->gpl4), h->v->guest_paging_levels, in shadow_propagate_process()
6041 if ( hvm_set_postprocess(h, shadow_propagate_postprocess) ) in shadow_propagate_process()
6076 void shadow_fault_generic_postprocess(struct hvm_data *h) in shadow_fault_generic_postprocess() argument
6078 struct pf_xen_extra *e = &h->inflight.pf_xen; in shadow_fault_generic_postprocess()
6087 update_cycles(&h->summary.pf_xen[e->pf_case], in shadow_fault_generic_postprocess()
6088 h->arc_cycles); in shadow_fault_generic_postprocess()
6090 hvm_update_short_summary(h, HVM_SHORT_SUMMARY_PROPAGATE); in shadow_fault_generic_postprocess()
6094 void shadow_fault_generic_process(struct record_info *ri, struct hvm_data *h) in shadow_fault_generic_process() argument
6099 h->inflight.generic.event = ri->event; in shadow_fault_generic_process()
6100 bcopy(ri->d, h->inflight.generic.d, sizeof(unsigned int) * 4); in shadow_fault_generic_process()
6103 shadow_fault_generic_dump(h->inflight.generic.event, in shadow_fault_generic_process()
6104 h->inflight.generic.d, in shadow_fault_generic_process()
6107 h->inflight.pf_xen.pf_case = sevt.minor; in shadow_fault_generic_process()
6108 if ( hvm_set_postprocess(h, shadow_fault_generic_postprocess) ) in shadow_fault_generic_process()
6112 void shadow_resync_process(struct record_info *ri, struct hvm_data *h) in shadow_resync_process() argument
6124 h->resyncs++; in shadow_resync_process()
6127 void shadow_prealloc_unpin_process(struct record_info *ri, struct hvm_data *h) { in shadow_prealloc_unpin_process() argument
6136 if(h->prealloc_unpin) in shadow_prealloc_unpin_process()
6139 h->prealloc_unpin = 1; in shadow_prealloc_unpin_process()
6142 cr3_prealloc_unpin(h->v, r->gfn); in shadow_prealloc_unpin_process()
6145 void shadow_wrmap_bf_process(struct record_info *ri, struct hvm_data *h) { in shadow_wrmap_bf_process() argument
6154 h->wrmap_bf = 1; in shadow_wrmap_bf_process()
6160 struct hvm_data *h; in shadow_process() local
6170 h = &p->current->hvm; in shadow_process()
6172 if(!h->init || !h->vmexit_valid) in shadow_process()
6191 h->inflight.pf_xen.pf_case = sevt.minor; in shadow_process()
6193 hvm_set_summary_handler(h, hvm_pf_xen_summary, NULL); in shadow_process()
6201 shadow_propagate_process(ri, h); in shadow_process()
6204 shadow_emulate_process(ri, h); in shadow_process()
6207 shadow_fixup_process(ri, h); in shadow_process()
6211 shadow_mmio_process(ri, h); in shadow_process()
6216 shadow_emulate_other_process(ri, h); in shadow_process()
6220 shadow_unsync_process(ri, h); in shadow_process()
6225 shadow_resync_process(ri, h); in shadow_process()
6228 shadow_prealloc_unpin_process(ri, h); in shadow_process()
6231 shadow_wrmap_bf_process(ri, h); in shadow_process()
6235 shadow_fault_generic_process(ri, h); in shadow_process()