/xen-4.10.0-shim-comet/xen/include/asm-x86/hvm/ |
A D | hvm.h | 106 void (*vcpu_destroy)(struct vcpu *v); 140 void (*fpu_leave)(struct vcpu *v); 228 void (*setup)(struct vcpu *v); 246 int hvm_vcpu_initialise(struct vcpu *v); 247 void hvm_vcpu_destroy(struct vcpu *v); 248 void hvm_vcpu_down(struct vcpu *v); 314 hvm_guest_x86_mode(struct vcpu *v) in hvm_guest_x86_mode() 321 hvm_update_host_cr3(struct vcpu *v) in hvm_update_host_cr3() 357 hvm_get_cpl(struct vcpu *v) in hvm_get_cpl() 397 void hvm_do_resume(struct vcpu *v); [all …]
|
A D | vpt.h | 36 typedef void time_cb(struct vcpu *v, void *opaque); 49 struct vcpu *vcpu; /* vcpu timer interrupt delivers to */ member 123 struct vcpu *vcpu; /* Keeps sync with this vcpu's guest-time */ member 143 void pt_save_timer(struct vcpu *v); 144 void pt_restore_timer(struct vcpu *v); 145 int pt_update_irq(struct vcpu *v); 147 void pt_migrate(struct vcpu *v); 149 void pt_adjust_global_vcpu_target(struct vcpu *v); 153 (d)->vcpu ? (d)->vcpu[0] : NULL) 181 void rtc_migrate_timers(struct vcpu *v); [all …]
|
A D | nestedhvm.h | 39 int nestedhvm_vcpu_initialise(struct vcpu *v); 40 void nestedhvm_vcpu_destroy(struct vcpu *v); 41 void nestedhvm_vcpu_reset(struct vcpu *v); 42 bool_t nestedhvm_vcpu_in_guestmode(struct vcpu *v); 56 int nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa, 59 int nestedhap_walk_L1_p2m(struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, 73 bool_t nestedhvm_is_n2(struct vcpu *v); 75 static inline void nestedhvm_set_cr(struct vcpu *v, unsigned int cr, in nestedhvm_set_cr()
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/hvm/vmx/ |
A D | vvmx.h | 108 int nvmx_vcpu_initialise(struct vcpu *v); 109 void nvmx_vcpu_destroy(struct vcpu *v); 110 int nvmx_vcpu_reset(struct vcpu *v); 117 bool_t nvmx_ept_enabled(struct vcpu *v); 193 #define get_vvmcs(vcpu, encoding) \ argument 195 get_vvmcs_real(vcpu, encoding) : \ 198 #define set_vvmcs(vcpu, encoding, val) \ argument 200 set_vvmcs_real(vcpu, encoding, val) : \ 213 uint64_t get_shadow_eptp(struct vcpu *v); 215 void nvmx_destroy_vmcs(struct vcpu *v); [all …]
|
A D | vmcs.h | 24 extern void vmcs_dump_vcpu(struct vcpu *v); 176 int vmx_create_vmcs(struct vcpu *v); 177 void vmx_destroy_vmcs(struct vcpu *v); 178 void vmx_vmcs_enter(struct vcpu *v); 180 void vmx_vmcs_exit(struct vcpu *v); 181 void vmx_vmcs_reload(struct vcpu *v); 553 void virtual_vmcs_enter(const struct vcpu *); 554 void virtual_vmcs_exit(const struct vcpu *); 574 int vmx_vcpu_enable_pml(struct vcpu *v); 575 void vmx_vcpu_disable_pml(struct vcpu *v); [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/hvm/svm/ |
A D | nestedsvm.h | 99 int nestedsvm_vmcb_map(struct vcpu *v, uint64_t vmcbaddr); 100 void nestedsvm_vmexit_defer(struct vcpu *v, 109 void nsvm_vcpu_destroy(struct vcpu *v); 110 int nsvm_vcpu_initialise(struct vcpu *v); 111 int nsvm_vcpu_reset(struct vcpu *v); 112 int nsvm_vcpu_vmrun(struct vcpu *v, struct cpu_user_regs *regs); 114 uint64_t nsvm_vcpu_hostcr3(struct vcpu *v); 116 struct vcpu *v, unsigned int vector, int errcode); 117 bool_t nsvm_vmcb_hap_enabled(struct vcpu *v); 118 enum hvm_intblk nsvm_intr_blocked(struct vcpu *v); [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/x86_64/ |
A D | asm-offsets.c | 60 OFFSET(VCPU_processor, struct vcpu, processor); in __dummy__() 61 OFFSET(VCPU_domain, struct vcpu, domain); in __dummy__() 62 OFFSET(VCPU_vcpu_info, struct vcpu, vcpu_info); in __dummy__() 68 OFFSET(VCPU_failsafe_addr, struct vcpu, in __dummy__() 70 OFFSET(VCPU_failsafe_sel, struct vcpu, in __dummy__() 72 OFFSET(VCPU_syscall_addr, struct vcpu, in __dummy__() 74 OFFSET(VCPU_syscall32_addr, struct vcpu, in __dummy__() 76 OFFSET(VCPU_syscall32_sel, struct vcpu, in __dummy__() 80 OFFSET(VCPU_sysenter_addr, struct vcpu, in __dummy__() 82 OFFSET(VCPU_sysenter_sel, struct vcpu, in __dummy__() [all …]
|
/xen-4.10.0-shim-comet/xen/include/xen/ |
A D | domain.h | 16 struct vcpu *alloc_vcpu( 18 struct vcpu *alloc_dom0_vcpu0(struct domain *dom0); 19 int vcpu_reset(struct vcpu *); 20 int vcpu_up(struct vcpu *v); 36 struct vcpu *alloc_vcpu_struct(void); 37 void free_vcpu_struct(struct vcpu *v); 52 int vcpu_initialise(struct vcpu *v); 53 void vcpu_destroy(struct vcpu *v); 56 void unmap_vcpu_info(struct vcpu *v); 79 void arch_dump_vcpu_info(struct vcpu *v); [all …]
|
A D | sched-if.h | 38 struct vcpu *curr; /* current task */ 92 sched_lock(vcpu, const struct vcpu *v, v->processor, ) 94 sched_lock(vcpu, const struct vcpu *v, v->processor, _irq) in sched_lock() 96 sched_unlock(vcpu, const struct vcpu *v, v->processor, ) in sched_lock() 98 sched_unlock(vcpu, const struct vcpu *v, v->processor, _irq) in sched_lock() 104 sched_lock(vcpu, const struct vcpu *v, v->processor, _irqsave, *flags) in sched_lock() 107 sched_unlock(vcpu, const struct vcpu *v, v->processor, _irqrestore, flags) in sched_lock() 126 struct vcpu *task; 143 void * (*alloc_vdata) (const struct scheduler *, struct vcpu *, 246 static inline int has_soft_affinity(const struct vcpu *v, in has_soft_affinity() [all …]
|
A D | sched.h | 142 struct vcpu struct 152 struct vcpu *next_in_list; argument 310 struct vcpu **vcpu; member 655 void vcpu_wake(struct vcpu *v); 678 struct vcpu *prev, 679 struct vcpu *next); 691 struct vcpu *same); 764 for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \ 807 void vcpu_unblock(struct vcpu *v); 808 void vcpu_pause(struct vcpu *v); [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/oprofile/ |
A D | backtrace.c | 33 dump_hypervisor_backtrace(struct vcpu *vcpu, const struct frame_head *head, in dump_hypervisor_backtrace() argument 36 if (!xenoprof_add_trace(vcpu, head->ret, mode)) in dump_hypervisor_backtrace() 47 static inline int is_32bit_vcpu(struct vcpu *vcpu) in is_32bit_vcpu() argument 49 if (is_hvm_vcpu(vcpu)) in is_32bit_vcpu() 50 return !hvm_long_mode_active(vcpu); in is_32bit_vcpu() 52 return is_pv_32bit_vcpu(vcpu); in is_32bit_vcpu() 56 dump_guest_backtrace(struct vcpu *vcpu, const struct frame_head *head, in dump_guest_backtrace() argument 61 if ( is_32bit_vcpu(vcpu) ) in dump_guest_backtrace() 90 if (!xenoprof_add_trace(vcpu, bufhead.ret, mode)) in dump_guest_backtrace() 150 void xenoprof_backtrace(struct vcpu *vcpu, const struct cpu_user_regs *regs, in xenoprof_backtrace() argument [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/ |
A D | vpmu.h | 26 #define vcpu_vpmu(vcpu) (&(vcpu)->arch.vpmu) argument 28 #define vpmu_available(vcpu) vpmu_is_set(vcpu_vpmu(vcpu), VPMU_AVAILABLE) argument 46 void (*arch_vpmu_destroy)(struct vcpu *v); 49 void (*arch_vpmu_dump)(const struct vcpu *); 53 int vmx_vpmu_initialise(struct vcpu *); 55 int svm_vpmu_initialise(struct vcpu *); 110 void vpmu_initialise(struct vcpu *v); 111 void vpmu_destroy(struct vcpu *v); 112 void vpmu_save(struct vcpu *v); 113 int vpmu_load(struct vcpu *v, bool_t from_guest); [all …]
|
A D | paging.h | 84 void (*detach_old_tables )(struct vcpu *v); 116 unsigned long (*gva_to_gfn )(struct vcpu *v, 120 unsigned long (*p2m_ga_to_gfn )(struct vcpu *v, 193 void paging_vcpu_init(struct vcpu *v); 221 const struct paging_mode *paging_get_mode(struct vcpu *v); 222 void paging_update_nestedmode(struct vcpu *v); 236 struct vcpu *v = current; in paging_fault() 241 void paging_invlpg(struct vcpu *v, unsigned long va); 254 unsigned long paging_gva_to_gfn(struct vcpu *v, 279 static inline void paging_update_cr3(struct vcpu *v) in paging_update_cr3() [all …]
|
/xen-4.10.0-shim-comet/xen/common/ |
A D | sched_null.c | 90 struct vcpu *vcpu; member 99 struct vcpu *vcpu; member 211 nvc->vcpu = v; in null_alloc_vdata() 316 if ( likely((per_cpu(npc, cpu).vcpu == NULL || per_cpu(npc, cpu).vcpu == v) in pick_cpu() 354 d.vcpu = v->vcpu_id; in pick_cpu() 516 !has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) ) in _vcpu_remove() 698 BUG_ON(nvc->vcpu != v); in null_vcpu_check() 745 d.vcpu = per_cpu(npc, cpu).vcpu->vcpu_id; in null_schedule() 785 !has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) ) in null_schedule() 865 struct vcpu *v; in null_dump() [all …]
|
A D | sched_credit2.c | 688 struct vcpu *v = svc->vcpu; in get_fallback_cpu() 957 d.vcpu = svc->vcpu->vcpu_id; in _runq_assign() 1246 d.vcpu = svc->vcpu->vcpu_id; in update_svc_load() 1303 d.vcpu = svc->vcpu->vcpu_id; in runq_insert() 1403 d.vcpu = cur->vcpu->vcpu_id; in tickle_score() 1450 d.vcpu = new->vcpu->vcpu_id; in runq_tickle() 1729 d.vcpu = svc->vcpu->vcpu_id; in burn_credits() 1745 struct vcpu *v = svc->vcpu; in park_vcpu() 2457 d.vcpu = svc->vcpu->vcpu_id; in migrate() 2516 struct vcpu *v = svc->vcpu; in vcpu_is_migrateable() [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-arm/ |
A D | vgic.h | 123 uint8_t vcpu[32]; member 139 int (*vcpu_init)(struct vcpu *v); 203 extern int vcpu_vgic_init(struct vcpu *v); 204 extern struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq); 205 extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int virq); 207 extern void vgic_clear_pending_irqs(struct vcpu *v); 214 extern void vgic_disable_irqs(struct vcpu *v, uint32_t r, int n); 215 extern void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n); 221 extern int vcpu_vgic_free(struct vcpu *v); 222 extern bool vgic_to_sgi(struct vcpu *v, register_t sgir, [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/pv/ |
A D | traps.c | 48 struct vcpu *curr = current; in pv_inject_event() 121 struct vcpu *curr = current; in set_guest_machinecheck_trapbounce() 136 struct vcpu *curr = current; in set_guest_nmi_trapbounce() 145 void init_int80_direct_trap(struct vcpu *v) in init_int80_direct_trap() 161 struct vcpu *vcpu; /* vcpu to inject trap */ member 172 BUG_ON(st->vcpu == NULL); in nmi_mce_softirq() 198 vcpu_kick(st->vcpu); in nmi_mce_softirq() 199 st->vcpu = NULL; in nmi_mce_softirq() 222 if ( cmpxchgptr(&st->vcpu, NULL, v) ) in pv_raise_interrupt() 233 st->vcpu = NULL; in pv_raise_interrupt() [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/cpu/mcheck/ |
A D | vmce.h | 9 && hardware_domain->vcpu[0] \ 10 && guest_enabled_event(hardware_domain->vcpu[0], VIRQ_MCA)) 14 int vmce_intel_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); 15 int vmce_intel_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); 16 int vmce_amd_rdmsr(const struct vcpu *, uint32_t msr, uint64_t *val); 17 int vmce_amd_wrmsr(struct vcpu *, uint32_t msr, uint64_t val); 23 int inject_vmce(struct domain *d, int vcpu);
|
/xen-4.10.0-shim-comet/xen/arch/arm/ |
A D | vgic.c | 93 write_atomic(&rank->vcpu[i], vcpu); in vgic_rank_init() 197 int vcpu_vgic_init(struct vcpu *v) in vcpu_vgic_init() 221 int vcpu_vgic_free(struct vcpu *v) in vcpu_vgic_free() 227 struct vcpu *vgic_get_target_vcpu(struct vcpu *v, unsigned int virq) in vgic_get_target_vcpu() 231 return v->domain->vcpu[target]; in vgic_get_target_vcpu() 246 bool vgic_migrate_irq(struct vcpu *old, struct vcpu *new, unsigned int irq) in vgic_migrate_irq() 304 struct vcpu *v_target; in arch_move_irqs() 336 struct vcpu *v_target; in vgic_disable_irqs() 385 struct vcpu *v_target; in vgic_enable_irqs() 573 struct vcpu *v; in vgic_vcpu_inject_spi() [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/mm/shadow/ |
A D | multi.h | 25 struct vcpu *v, mfn_t gl1mfn, void *new_gl1p, u32 size); 28 struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size); 31 struct vcpu *v, mfn_t gl2mfn, void *new_gl2p, u32 size); 34 struct vcpu *v, mfn_t gl3mfn, void *new_gl3p, u32 size); 86 (struct vcpu *v, mfn_t sl1mfn, mfn_t x); 89 (struct vcpu *v, mfn_t sl1mfn, mfn_t x); 92 (struct vcpu *v, mfn_t sl2mfn, mfn_t x); 95 (struct vcpu *v, mfn_t sl3mfn, mfn_t x); 98 (struct vcpu *v, mfn_t sl4mfn, mfn_t x); 107 (struct vcpu *v, mfn_t gmfn, mfn_t snpmfn); [all …]
|
/xen-4.10.0-shim-comet/xen/include/asm-x86/pv/ |
A D | domain.h | 26 void pv_vcpu_destroy(struct vcpu *v); 27 int pv_vcpu_initialise(struct vcpu *v); 36 static inline void pv_vcpu_destroy(struct vcpu *v) {} in pv_vcpu_destroy() 37 static inline int pv_vcpu_initialise(struct vcpu *v) { return -EOPNOTSUPP; } in pv_vcpu_initialise() 47 void paravirt_ctxt_switch_from(struct vcpu *v); 48 void paravirt_ctxt_switch_to(struct vcpu *v);
|
/xen-4.10.0-shim-comet/xen/arch/x86/hvm/ |
A D | vpt.c | 80 struct vcpu *v = pt->vcpu; in pt_irq_vector() 109 struct vcpu *v = pt->vcpu; in pt_irq_masked() 139 struct vcpu *v; in pt_lock() 143 v = pt->vcpu; in pt_lock() 245 vcpu_kick(pt->vcpu); in pt_timer_fn() 441 pt->vcpu = v; in create_periodic_time() 480 if ( pt->vcpu == NULL ) in destroy_periodic_time() 503 if ( pt->vcpu == NULL ) in pt_adjust_vcpu() 514 pt->vcpu = v; in pt_adjust_vcpu() 555 if ( pt->vcpu == NULL ) in pt_resume() [all …]
|
/xen-4.10.0-shim-comet/xen/arch/x86/mm/hap/ |
A D | private.h | 27 unsigned long hap_gva_to_gfn_2_levels(struct vcpu *v, 31 unsigned long hap_gva_to_gfn_3_levels(struct vcpu *v, 35 unsigned long hap_gva_to_gfn_4_levels(struct vcpu *v, 40 unsigned long hap_p2m_ga_to_gfn_2_levels(struct vcpu *v, 43 unsigned long hap_p2m_ga_to_gfn_3_levels(struct vcpu *v, 46 unsigned long hap_p2m_ga_to_gfn_4_levels(struct vcpu *v,
|
/xen-4.10.0-shim-comet/xen/arch/x86/cpu/ |
A D | vpmu.c | 352 struct vcpu *v = (struct vcpu *)arg; in vpmu_save_force() 603 struct vcpu *v; in pvpmu_init() 608 if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) in pvpmu_init() 611 v = d->vcpu[params->vcpu]; in pvpmu_init() 654 struct vcpu *v; in pvpmu_finish() 659 if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) in pvpmu_finish() 662 v = d->vcpu[params->vcpu]; in pvpmu_finish() 699 struct vcpu *curr; in do_xenpmu_op() 845 struct vcpu *vcpu = per_cpu(last_vcpu, cpu); in cpu_callback() local 848 if ( !vcpu ) in cpu_callback() [all …]
|
/xen-4.10.0-shim-comet/tools/libxc/ |
A D | xc_sr_restore_x86_pv.c | 255 SET_FIELD(&vcpu, flags, in process_vcpu_basic() 385 memcpy(&domctl.u.ext_vcpucontext, vcpu->extd, vcpu->extdsz); in process_vcpu_extended() 413 vcpu->xsavesz); in process_vcpu_xsave() 423 memcpy(buffer, vcpu->xsave, vcpu->xsavesz); in process_vcpu_xsave() 451 vcpu->msrsz); in process_vcpu_msrs() 461 memcpy(buffer, vcpu->msr, vcpu->msrsz); in process_vcpu_msrs() 486 if ( vcpu->basic ) in update_vcpu_context() 498 if ( vcpu->extd ) in update_vcpu_context() 505 if ( vcpu->xsave ) in update_vcpu_context() 512 if ( vcpu->msr ) in update_vcpu_context() [all …]
|