1 /******************************************************************************
2  * hvm/emulate.h
3  *
4  * HVM instruction emulation. Used for MMIO and VMX real mode.
5  *
6  * Copyright (c) 2008 Citrix Systems, Inc.
7  *
8  * Authors:
9  *    Keir Fraser <keir@xen.org>
10  */
11 
12 #ifndef __ASM_X86_HVM_EMULATE_H__
13 #define __ASM_X86_HVM_EMULATE_H__
14 
15 #include <xen/err.h>
16 #include <xen/mm.h>
17 #include <xen/sched.h>
18 #include <xen/xvmalloc.h>
19 #include <asm/hvm/hvm.h>
20 #include <asm/x86_emulate.h>
21 
22 typedef bool hvm_emulate_validate_t(const struct x86_emulate_state *state,
23                                     const struct x86_emulate_ctxt *ctxt);
24 
25 struct hvm_emulate_ctxt {
26     struct x86_emulate_ctxt ctxt;
27 
28     /*
29      * validate: Post-decode, pre-emulate hook to allow caller controlled
30      * filtering.
31      */
32     hvm_emulate_validate_t *validate;
33 
34     /* Cache of 16 bytes of instruction. */
35     uint8_t insn_buf[16];
36     unsigned long insn_buf_eip;
37     unsigned int insn_buf_bytes;
38 
39     struct segment_register seg_reg[10];
40     unsigned int seg_reg_accessed;
41     unsigned int seg_reg_dirty;
42 
43     /*
44      * MFNs behind temporary mappings in the write callback.  The length is
45      * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are
46      * needed.
47      */
48     mfn_t mfn[2];
49 
50     uint32_t intr_shadow;
51 
52     bool is_mem_access;
53 
54     bool set_context;
55 };
56 
57 enum emul_kind {
58     EMUL_KIND_NORMAL,
59     EMUL_KIND_NOWRITE,
60     EMUL_KIND_SET_CONTEXT_DATA,
61     EMUL_KIND_SET_CONTEXT_INSN
62 };
63 
64 bool __nonnull(1, 2) hvm_emulate_one_insn(
65     hvm_emulate_validate_t *validate,
66     const char *descr);
67 int hvm_emulate_one(
68     struct hvm_emulate_ctxt *hvmemul_ctxt,
69     enum vio_completion completion);
70 void hvm_emulate_one_vm_event(enum emul_kind kind,
71     unsigned int trapnr,
72     unsigned int errcode);
73 /* Must be called once to set up hvmemul state. */
74 void hvm_emulate_init_once(
75     struct hvm_emulate_ctxt *hvmemul_ctxt,
76     hvm_emulate_validate_t *validate,
77     struct cpu_user_regs *regs);
78 /* Must be called once before each instruction emulated. */
79 void hvm_emulate_init_per_insn(
80     struct hvm_emulate_ctxt *hvmemul_ctxt,
81     const unsigned char *insn_buf,
82     unsigned int insn_bytes);
83 void hvm_emulate_writeback(
84     struct hvm_emulate_ctxt *hvmemul_ctxt);
85 void hvmemul_cancel(struct vcpu *v);
86 struct segment_register *hvmemul_get_seg_reg(
87     enum x86_segment seg,
88     struct hvm_emulate_ctxt *hvmemul_ctxt);
89 
handle_mmio(void)90 static inline bool handle_mmio(void)
91 {
92     return hvm_emulate_one_insn(x86_insn_is_mem_access, "MMIO");
93 }
94 
95 int cf_check hvmemul_insn_fetch(
96     unsigned long offset, void *p_data, unsigned int bytes,
97     struct x86_emulate_ctxt *ctxt);
98 
99 int hvmemul_do_pio_buffer(uint16_t port,
100                           unsigned int size,
101                           uint8_t dir,
102                           void *buffer);
103 
104 #ifdef CONFIG_HVM
105 /*
106  * The cache controlled by the functions below is not like an ordinary CPU
107  * cache, i.e. aiming to help performance, but a "secret store" which is
108  * needed for correctness.  The issue it helps addressing is the need for
109  * re-execution of an insn (after data was provided by a device model) to
110  * observe the exact same memory state, i.e. to specifically not observe any
111  * updates which may have occurred in the meantime by other agents.
112  * Therefore this cache gets
113  * - enabled when emulation of an insn starts,
114  * - disabled across processing secondary things like a hypercall resulting
115  *   from insn emulation,
116  * - disabled again when an emulated insn is known to not require any
117  *   further re-execution.
118  */
119 int __must_check hvmemul_cache_init(struct vcpu *v);
hvmemul_cache_destroy(struct vcpu * v)120 static inline void hvmemul_cache_destroy(struct vcpu *v)
121 {
122     unsigned int i;
123 
124     for ( i = 0; i < ARRAY_SIZE(v->arch.hvm.hvm_io.mmio_cache); ++i )
125         XVFREE(v->arch.hvm.hvm_io.mmio_cache[i]);
126     XVFREE(v->arch.hvm.hvm_io.cache);
127 }
128 bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa,
129                         void *buffer, unsigned int size);
130 void hvmemul_write_cache(const struct vcpu *v, paddr_t gpa,
131                          const void *buffer, unsigned int size);
132 unsigned int hvmemul_cache_disable(struct vcpu *v);
133 void hvmemul_cache_restore(struct vcpu *v, unsigned int token);
134 /* For use in ASSERT()s only: */
hvmemul_cache_disabled(struct vcpu * v)135 static inline bool hvmemul_cache_disabled(struct vcpu *v)
136 {
137     return hvmemul_cache_disable(v) == hvmemul_cache_disable(v);
138 }
139 #else
hvmemul_read_cache(const struct vcpu * v,paddr_t gpa,void * buf,unsigned int size)140 static inline bool hvmemul_read_cache(const struct vcpu *v, paddr_t gpa,
141                                       void *buf,
142                                       unsigned int size) { return false; }
hvmemul_write_cache(const struct vcpu * v,paddr_t gpa,const void * buf,unsigned int size)143 static inline void hvmemul_write_cache(const struct vcpu *v, paddr_t gpa,
144                                        const void *buf, unsigned int size) {}
145 #endif
146 
147 void hvm_dump_emulation_state(const char *loglvl, const char *prefix,
148                               struct hvm_emulate_ctxt *hvmemul_ctxt, int rc);
149 
150 /* For PVH dom0: signal whether to attempt fixup of p2m page-faults. */
151 extern bool opt_dom0_pf_fixup;
152 
153 #endif /* __ASM_X86_HVM_EMULATE_H__ */
154 
155 /*
156  * Local variables:
157  * mode: C
158  * c-file-style: "BSD"
159  * c-basic-offset: 4
160  * tab-width: 4
161  * indent-tabs-mode: nil
162  * End:
163  */
164