1 /******************************************************************************
2  * x86_emulate.h
3  *
4  * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5  *
6  * Copyright (c) 2005-2007 Keir Fraser
7  * Copyright (c) 2005-2007 XenSource Inc.
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; If not, see <http://www.gnu.org/licenses/>.
21  */
22 
23 #ifndef __X86_EMULATE_H__
24 #define __X86_EMULATE_H__
25 
26 #define MAX_INST_LEN 15
27 
28 struct x86_emulate_ctxt;
29 
30 /*
31  * Comprehensive enumeration of x86 segment registers.  Various bits of code
32  * rely on this order (general purpose before system, tr at the beginning of
33  * system).
34  */
35 enum x86_segment {
36     /* General purpose.  Matches the SReg3 encoding in opcode/ModRM bytes. */
37     x86_seg_es,
38     x86_seg_cs,
39     x86_seg_ss,
40     x86_seg_ds,
41     x86_seg_fs,
42     x86_seg_gs,
43     /* System: Valid to use for implicit table references. */
44     x86_seg_tr,
45     x86_seg_ldtr,
46     x86_seg_gdtr,
47     x86_seg_idtr,
48     /* No Segment: For accesses which are already linear. */
49     x86_seg_none
50 };
51 
is_x86_user_segment(enum x86_segment seg)52 static inline bool is_x86_user_segment(enum x86_segment seg)
53 {
54     unsigned int idx = seg;
55 
56     return idx <= x86_seg_gs;
57 }
is_x86_system_segment(enum x86_segment seg)58 static inline bool is_x86_system_segment(enum x86_segment seg)
59 {
60     return seg >= x86_seg_tr && seg < x86_seg_none;
61 }
62 
63 /*
64  * x86 event types. This enumeration is valid for:
65  *  Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8]
66  *  AMD SVM: eventinj[10:8] and exitintinfo[10:8] (types 0-4 only)
67  */
68 enum x86_event_type {
69     X86_EVENTTYPE_EXT_INTR,         /* External interrupt */
70     X86_EVENTTYPE_NMI = 2,          /* NMI */
71     X86_EVENTTYPE_HW_EXCEPTION,     /* Hardware exception */
72     X86_EVENTTYPE_SW_INTERRUPT,     /* Software interrupt (CD nn) */
73     X86_EVENTTYPE_PRI_SW_EXCEPTION, /* ICEBP (F1) */
74     X86_EVENTTYPE_SW_EXCEPTION,     /* INT3 (CC), INTO (CE) */
75 };
76 #define X86_EVENT_NO_EC (-1)        /* No error code. */
77 
78 struct x86_event {
79     int16_t       vector;
80     uint8_t       type;         /* X86_EVENTTYPE_* */
81     uint8_t       insn_len;     /* Instruction length */
82     int32_t       error_code;   /* X86_EVENT_NO_EC if n/a */
83     unsigned long cr2;          /* Only for TRAP_page_fault h/w exception */
84 };
85 
86 /*
87  * Full state of a segment register (visible and hidden portions).
88  * Chosen to match the format of an AMD SVM VMCB.
89  */
90 struct segment_register {
91     uint16_t   sel;
92     union {
93         uint16_t attr;
94         struct {
95             uint16_t type:4;
96             uint16_t s:   1;
97             uint16_t dpl: 2;
98             uint16_t p:   1;
99             uint16_t avl: 1;
100             uint16_t l:   1;
101             uint16_t db:  1;
102             uint16_t g:   1;
103             uint16_t pad: 4;
104         };
105     };
106     uint32_t   limit;
107     uint64_t   base;
108 };
109 
110 struct x86_emul_fpu_aux {
111     unsigned long ip, dp;
112     uint16_t cs, ds;
113     unsigned int op:11;
114     unsigned int dval:1;
115 };
116 
117 /*
118  * Return codes from state-accessor functions and from x86_emulate().
119  */
120  /* Completed successfully. State modified appropriately. */
121 #define X86EMUL_OKAY           0
122  /* Unhandleable access or emulation. No state modified. */
123 #define X86EMUL_UNHANDLEABLE   1
124  /* Exception raised and requires delivery. */
125 #define X86EMUL_EXCEPTION      2
126  /* Retry the emulation for some reason. No state modified. */
127 #define X86EMUL_RETRY          3
128  /*
129   * Operation fully done by one of the hooks:
130   * - validate(): operation completed (except common insn retire logic)
131   * - read_segment(x86_seg_tr, ...): bypass I/O bitmap access
132   * - read_io() / write_io(): bypass GPR update (non-string insns only)
133   * Undefined behavior when used anywhere else.
134   */
135 #define X86EMUL_DONE           4
136  /*
137   * Current instruction is not implemented by the emulator.
138   * This value should only be returned by the core emulator when a valid
139   * opcode is found but the execution logic for that instruction is missing.
140   * It should NOT be returned by any of the x86_emulate_ops callbacks.
141   */
142 #define X86EMUL_UNIMPLEMENTED  5
143  /*
144   * The current instruction's opcode is not valid.
145   * If this error code is returned by a function, an #UD trap should be
146   * raised by the final consumer of it.
147   *
148   * TODO: For the moment X86EMUL_UNRECOGNIZED and X86EMUL_UNIMPLEMENTED
149   * can be used interchangeably therefore raising an #UD trap is not
150   * strictly expected for now.
151  */
152 #define X86EMUL_UNRECOGNIZED   X86EMUL_UNIMPLEMENTED
153 
154 /* FPU sub-types which may be requested via ->get_fpu(). */
155 enum x86_emulate_fpu_type {
156     X86EMUL_FPU_fpu, /* Standard FPU coprocessor instruction set */
157     X86EMUL_FPU_wait, /* WAIT/FWAIT instruction */
158     X86EMUL_FPU_mmx, /* MMX instruction set (%mm0-%mm7) */
159     X86EMUL_FPU_xmm, /* SSE instruction set (%xmm0-%xmm7/15) */
160     X86EMUL_FPU_ymm, /* AVX/XOP instruction set (%ymm0-%ymm7/15) */
161     /* This sentinel will never be passed to ->get_fpu(). */
162     X86EMUL_FPU_none
163 };
164 
165 struct cpuid_leaf
166 {
167     uint32_t a, b, c, d;
168 };
169 
170 struct x86_emulate_state;
171 
172 /*
173  * These operations represent the instruction emulator's interface to memory,
174  * I/O ports, privileged state... pretty much everything other than GPRs.
175  *
176  * NOTES:
177  *  1. If the access fails (cannot emulate, or a standard access faults) then
178  *     it is up to the memop to propagate the fault to the guest VM via
179  *     some out-of-band mechanism, unknown to the emulator. The memop signals
180  *     failure by returning X86EMUL_EXCEPTION to the emulator, which will
181  *     then immediately bail.
182  *  2. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
183  */
184 struct x86_emulate_ops
185 {
186     /*
187      * All functions:
188      *  @ctxt:  [IN ] Emulation context info as passed to the emulator.
189      * All memory-access functions:
190      *  @seg:   [IN ] Segment being dereferenced (specified as x86_seg_??).
191      *  @offset:[IN ] Offset within segment.
192      *  @p_data:[IN ] Pointer to i/o data buffer (length is @bytes)
193      * Read functions:
194      *  @val:   [OUT] Value read, zero-extended to 'ulong'.
195      * Write functions:
196      *  @val:   [IN ] Value to write (low-order bytes used as req'd).
197      * Variable-length access functions:
198      *  @bytes: [IN ] Number of bytes to read or write. Valid access sizes are
199      *                1, 2, 4 and 8 (x86/64 only) bytes, unless otherwise
200      *                stated.
201      */
202 
203     /*
204      * read: Emulate a memory read.
205      *  @bytes: Access length (0 < @bytes < 4096).
206      */
207     int (*read)(
208         enum x86_segment seg,
209         unsigned long offset,
210         void *p_data,
211         unsigned int bytes,
212         struct x86_emulate_ctxt *ctxt);
213 
214     /*
215      * insn_fetch: Emulate fetch from instruction byte stream.
216      *  Except for @bytes, all parameters are the same as for 'read'.
217      *  @bytes: Access length (0 <= @bytes < 16, with zero meaning
218      *  "validate address only").
219      *  @seg is always x86_seg_cs.
220      */
221     int (*insn_fetch)(
222         enum x86_segment seg,
223         unsigned long offset,
224         void *p_data,
225         unsigned int bytes,
226         struct x86_emulate_ctxt *ctxt);
227 
228     /*
229      * write: Emulate a memory write.
230      *  @bytes: Access length (0 < @bytes < 4096).
231      */
232     int (*write)(
233         enum x86_segment seg,
234         unsigned long offset,
235         void *p_data,
236         unsigned int bytes,
237         struct x86_emulate_ctxt *ctxt);
238 
239     /*
240      * cmpxchg: Emulate an atomic (LOCKed) CMPXCHG operation.
241      *  @p_old: [IN ] Pointer to value expected to be current at @addr.
242      *  @p_new: [IN ] Pointer to value to write to @addr.
243      *  @bytes: [IN ] Operation size (up to 8 (x86/32) or 16 (x86/64) bytes).
244      */
245     int (*cmpxchg)(
246         enum x86_segment seg,
247         unsigned long offset,
248         void *p_old,
249         void *p_new,
250         unsigned int bytes,
251         struct x86_emulate_ctxt *ctxt);
252 
253     /*
254      * validate: Post-decode, pre-emulate hook to allow caller controlled
255      * filtering.
256      */
257     int (*validate)(
258         const struct x86_emulate_state *state,
259         struct x86_emulate_ctxt *ctxt);
260 
261     /*
262      * rep_ins: Emulate INS: <src_port> -> <dst_seg:dst_offset>.
263      *  @bytes_per_rep: [IN ] Bytes transferred per repetition.
264      *  @reps:  [IN ] Maximum repetitions to be emulated.
265      *          [OUT] Number of repetitions actually emulated.
266      */
267     int (*rep_ins)(
268         uint16_t src_port,
269         enum x86_segment dst_seg,
270         unsigned long dst_offset,
271         unsigned int bytes_per_rep,
272         unsigned long *reps,
273         struct x86_emulate_ctxt *ctxt);
274 
275     /*
276      * rep_outs: Emulate OUTS: <src_seg:src_offset> -> <dst_port>.
277      *  @bytes_per_rep: [IN ] Bytes transferred per repetition.
278      *  @reps:  [IN ] Maximum repetitions to be emulated.
279      *          [OUT] Number of repetitions actually emulated.
280      */
281     int (*rep_outs)(
282         enum x86_segment src_seg,
283         unsigned long src_offset,
284         uint16_t dst_port,
285         unsigned int bytes_per_rep,
286         unsigned long *reps,
287         struct x86_emulate_ctxt *ctxt);
288 
289     /*
290      * rep_movs: Emulate MOVS: <src_seg:src_offset> -> <dst_seg:dst_offset>.
291      *  @bytes_per_rep: [IN ] Bytes transferred per repetition.
292      *  @reps:  [IN ] Maximum repetitions to be emulated.
293      *          [OUT] Number of repetitions actually emulated.
294      */
295     int (*rep_movs)(
296         enum x86_segment src_seg,
297         unsigned long src_offset,
298         enum x86_segment dst_seg,
299         unsigned long dst_offset,
300         unsigned int bytes_per_rep,
301         unsigned long *reps,
302         struct x86_emulate_ctxt *ctxt);
303 
304     /*
305      * rep_stos: Emulate STOS: <*p_data> -> <seg:offset>.
306      *  @bytes_per_rep: [IN ] Bytes transferred per repetition.
307      *  @reps:  [IN ] Maximum repetitions to be emulated.
308      *          [OUT] Number of repetitions actually emulated.
309      */
310     int (*rep_stos)(
311         void *p_data,
312         enum x86_segment seg,
313         unsigned long offset,
314         unsigned int bytes_per_rep,
315         unsigned long *reps,
316         struct x86_emulate_ctxt *ctxt);
317 
318     /*
319      * read_segment: Emulate a read of full context of a segment register.
320      *  @reg:   [OUT] Contents of segment register (visible and hidden state).
321      */
322     int (*read_segment)(
323         enum x86_segment seg,
324         struct segment_register *reg,
325         struct x86_emulate_ctxt *ctxt);
326 
327     /*
328      * write_segment: Emulate a read of full context of a segment register.
329      *  @reg:   [OUT] Contents of segment register (visible and hidden state).
330      */
331     int (*write_segment)(
332         enum x86_segment seg,
333         const struct segment_register *reg,
334         struct x86_emulate_ctxt *ctxt);
335 
336     /*
337      * read_io: Read from I/O port(s).
338      *  @port:  [IN ] Base port for access.
339      */
340     int (*read_io)(
341         unsigned int port,
342         unsigned int bytes,
343         unsigned long *val,
344         struct x86_emulate_ctxt *ctxt);
345 
346     /*
347      * write_io: Write to I/O port(s).
348      *  @port:  [IN ] Base port for access.
349      */
350     int (*write_io)(
351         unsigned int port,
352         unsigned int bytes,
353         unsigned long val,
354         struct x86_emulate_ctxt *ctxt);
355 
356     /*
357      * read_cr: Read from control register.
358      *  @reg:   [IN ] Register to read (0-15).
359      */
360     int (*read_cr)(
361         unsigned int reg,
362         unsigned long *val,
363         struct x86_emulate_ctxt *ctxt);
364 
365     /*
366      * write_cr: Write to control register.
367      *  @reg:   [IN ] Register to write (0-15).
368      */
369     int (*write_cr)(
370         unsigned int reg,
371         unsigned long val,
372         struct x86_emulate_ctxt *ctxt);
373 
374     /*
375      * read_dr: Read from debug register.
376      *  @reg:   [IN ] Register to read (0-15).
377      */
378     int (*read_dr)(
379         unsigned int reg,
380         unsigned long *val,
381         struct x86_emulate_ctxt *ctxt);
382 
383     /*
384      * write_dr: Write to debug register.
385      *  @reg:   [IN ] Register to write (0-15).
386      */
387     int (*write_dr)(
388         unsigned int reg,
389         unsigned long val,
390         struct x86_emulate_ctxt *ctxt);
391 
392     /*
393      * read_msr: Read from model-specific register.
394      *  @reg:   [IN ] Register to read.
395      */
396     int (*read_msr)(
397         unsigned int reg,
398         uint64_t *val,
399         struct x86_emulate_ctxt *ctxt);
400 
401     /*
402      * write_dr: Write to model-specific register.
403      *  @reg:   [IN ] Register to write.
404      */
405     int (*write_msr)(
406         unsigned int reg,
407         uint64_t val,
408         struct x86_emulate_ctxt *ctxt);
409 
410     /* wbinvd: Write-back and invalidate cache contents. */
411     int (*wbinvd)(
412         struct x86_emulate_ctxt *ctxt);
413 
414     /* cpuid: Emulate CPUID via given set of EAX-EDX inputs/outputs. */
415     int (*cpuid)(
416         uint32_t leaf,
417         uint32_t subleaf,
418         struct cpuid_leaf *res,
419         struct x86_emulate_ctxt *ctxt);
420 
421     /*
422      * get_fpu: Load emulated environment's FPU state onto processor.
423      *  @exn_callback: On any FPU or SIMD exception, pass control to
424      *                 (*exception_callback)(exception_callback_arg, regs).
425      */
426     int (*get_fpu)(
427         void (*exception_callback)(void *, struct cpu_user_regs *),
428         void *exception_callback_arg,
429         enum x86_emulate_fpu_type type,
430         struct x86_emulate_ctxt *ctxt);
431 
432     /*
433      * put_fpu: Relinquish the FPU. Unhook from FPU/SIMD exception handlers.
434      *  The handler, if installed, must be prepared to get called without
435      *  the get_fpu one having got called before!
436      * @backout: Undo updates to the specified register file (can, besides
437      *           X86EMUL_FPU_none, only be X86EMUL_FPU_fpu at present);
438      * @aux: Packaged up FIP/FDP/FOP values to load into FPU.
439      */
440     void (*put_fpu)(
441         struct x86_emulate_ctxt *ctxt,
442         enum x86_emulate_fpu_type backout,
443         const struct x86_emul_fpu_aux *aux);
444 
445     /* invlpg: Invalidate paging structures which map addressed byte. */
446     int (*invlpg)(
447         enum x86_segment seg,
448         unsigned long offset,
449         struct x86_emulate_ctxt *ctxt);
450 
451     /* vmfunc: Emulate VMFUNC via given set of EAX ECX inputs */
452     int (*vmfunc)(
453         struct x86_emulate_ctxt *ctxt);
454 };
455 
456 struct cpu_user_regs;
457 
458 struct x86_emulate_ctxt
459 {
460     /*
461      * Input-only state:
462      */
463 
464     /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */
465     unsigned char vendor;
466 
467     /* Set this if writes may have side effects. */
468     bool force_writeback;
469 
470     /* Caller data that can be used by x86_emulate_ops' routines. */
471     void *data;
472 
473     /*
474      * Input/output state:
475      */
476 
477     /* Register state before/after emulation. */
478     struct cpu_user_regs *regs;
479 
480     /* Default address size in current execution mode (16, 32, or 64). */
481     unsigned int addr_size;
482 
483     /* Stack pointer width in bits (16, 32 or 64). */
484     unsigned int sp_size;
485 
486     /* Long mode active? */
487     bool lma;
488 
489     /*
490      * Output-only state:
491      */
492 
493     /* Canonical opcode (see below) (valid only on X86EMUL_OKAY). */
494     unsigned int opcode;
495 
496     /* Retirement state, set by the emulator (valid only on X86EMUL_OKAY). */
497     union {
498         uint8_t raw;
499         struct {
500             bool hlt:1;          /* Instruction HLTed. */
501             bool mov_ss:1;       /* Instruction sets MOV-SS irq shadow. */
502             bool sti:1;          /* Instruction sets STI irq shadow. */
503             bool unblock_nmi:1;  /* Instruction clears NMI blocking. */
504             bool singlestep:1;   /* Singlestepping was active. */
505         };
506     } retire;
507 
508     bool event_pending;
509     struct x86_event event;
510 };
511 
512 /*
513  * Encode opcode extensions in the following way:
514  *     0x0xxxx for one byte opcodes
515  *    0x0fxxxx for 0f-prefixed opcodes (or their VEX/EVEX equivalents)
516  *  0x0f38xxxx for 0f38-prefixed opcodes (or their VEX/EVEX equivalents)
517  *  0x0f3axxxx for 0f3a-prefixed opcodes (or their VEX/EVEX equivalents)
518  *  0x8f08xxxx for 8f/8-prefixed XOP opcodes
519  *  0x8f09xxxx for 8f/9-prefixed XOP opcodes
520  *  0x8f0axxxx for 8f/a-prefixed XOP opcodes
521  * The low byte represents the base opcode withing the resepctive space,
522  * and some of bits 8..15 are used for encoding further information (see
523  * below).
524  * Hence no separate #define-s get added.
525  */
526 #define X86EMUL_OPC_EXT_MASK         0xffff0000
527 #define X86EMUL_OPC(ext, byte)       ((uint8_t)(byte) | \
528                                       MASK_INSR((ext), X86EMUL_OPC_EXT_MASK))
529 /*
530  * This includes the 66, F3, and F2 prefixes (see also below)
531  * as well as VEX/EVEX:
532  */
533 #define X86EMUL_OPC_MASK             (0x000000ff | X86EMUL_OPC_PFX_MASK | \
534                                      X86EMUL_OPC_ENCODING_MASK)
535 
536 /*
537  * Note that prefixes 66, F2, and F3 get encoded only when semantically
538  * meaningful, to reduce the complexity of interpreting this representation.
539  */
540 #define X86EMUL_OPC_PFX_MASK         0x00000300
541 # define X86EMUL_OPC_66(ext, byte)   (X86EMUL_OPC(ext, byte) | 0x00000100)
542 # define X86EMUL_OPC_F3(ext, byte)   (X86EMUL_OPC(ext, byte) | 0x00000200)
543 # define X86EMUL_OPC_F2(ext, byte)   (X86EMUL_OPC(ext, byte) | 0x00000300)
544 
545 #define X86EMUL_OPC_ENCODING_MASK    0x00003000
546 #define X86EMUL_OPC_LEGACY_          0x00000000
547 #define X86EMUL_OPC_VEX_             0x00001000
548 # define X86EMUL_OPC_VEX(ext, byte) \
549     (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_VEX_)
550 # define X86EMUL_OPC_VEX_66(ext, byte) \
551     (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_VEX_)
552 # define X86EMUL_OPC_VEX_F3(ext, byte) \
553     (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_VEX_)
554 # define X86EMUL_OPC_VEX_F2(ext, byte) \
555     (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_VEX_)
556 #define X86EMUL_OPC_EVEX_            0x00002000
557 # define X86EMUL_OPC_EVEX(ext, byte) \
558     (X86EMUL_OPC(ext, byte) | X86EMUL_OPC_EVEX_)
559 # define X86EMUL_OPC_EVEX_66(ext, byte) \
560     (X86EMUL_OPC_66(ext, byte) | X86EMUL_OPC_EVEX_)
561 # define X86EMUL_OPC_EVEX_F3(ext, byte) \
562     (X86EMUL_OPC_F3(ext, byte) | X86EMUL_OPC_EVEX_)
563 # define X86EMUL_OPC_EVEX_F2(ext, byte) \
564     (X86EMUL_OPC_F2(ext, byte) | X86EMUL_OPC_EVEX_)
565 
566 #define X86EMUL_OPC_XOP(ext, byte)    X86EMUL_OPC(0x8f##ext, byte)
567 #define X86EMUL_OPC_XOP_66(ext, byte) X86EMUL_OPC_66(0x8f##ext, byte)
568 #define X86EMUL_OPC_XOP_F3(ext, byte) X86EMUL_OPC_F3(0x8f##ext, byte)
569 #define X86EMUL_OPC_XOP_F2(ext, byte) X86EMUL_OPC_F2(0x8f##ext, byte)
570 
571 struct x86_emulate_stub {
572     union {
573         void (*func)(void);
574         uintptr_t addr;
575     };
576 #ifdef __XEN__
577     void *ptr;
578 #else
579     /* Room for one insn and a (single byte) RET. */
580     uint8_t buf[MAX_INST_LEN + 1];
581 #endif
582 };
583 
584 /*
585  * x86_emulate: Emulate an instruction.
586  * Returns X86EMUL_* constants.
587  */
588 int
589 x86_emulate(
590     struct x86_emulate_ctxt *ctxt,
591     const struct x86_emulate_ops *ops);
592 
593 #ifndef NDEBUG
594 /*
595  * In debug builds, wrap x86_emulate() with some assertions about its expected
596  * behaviour.
597  */
598 int x86_emulate_wrapper(
599     struct x86_emulate_ctxt *ctxt,
600     const struct x86_emulate_ops *ops);
601 #define x86_emulate x86_emulate_wrapper
602 #endif
603 
604 /*
605  * Given the 'reg' portion of a ModRM byte, and a register block, return a
606  * pointer into the block that addresses the relevant register.
607  * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
608  */
609 void *
610 decode_register(
611     uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs);
612 
613 /* Unhandleable read, write or instruction fetch */
614 int
615 x86emul_unhandleable_rw(
616     enum x86_segment seg,
617     unsigned long offset,
618     void *p_data,
619     unsigned int bytes,
620     struct x86_emulate_ctxt *ctxt);
621 
622 #ifdef __XEN__
623 
624 struct x86_emulate_state *
625 x86_decode_insn(
626     struct x86_emulate_ctxt *ctxt,
627     int (*insn_fetch)(
628         enum x86_segment seg, unsigned long offset,
629         void *p_data, unsigned int bytes,
630         struct x86_emulate_ctxt *ctxt));
631 
632 unsigned int
633 x86_insn_opsize(const struct x86_emulate_state *state);
634 int
635 x86_insn_modrm(const struct x86_emulate_state *state,
636                unsigned int *rm, unsigned int *reg);
637 unsigned long
638 x86_insn_operand_ea(const struct x86_emulate_state *state,
639                     enum x86_segment *seg);
640 unsigned long
641 x86_insn_immediate(const struct x86_emulate_state *state,
642                    unsigned int nr);
643 unsigned int
644 x86_insn_length(const struct x86_emulate_state *state,
645                 const struct x86_emulate_ctxt *ctxt);
646 bool
647 x86_insn_is_mem_access(const struct x86_emulate_state *state,
648                        const struct x86_emulate_ctxt *ctxt);
649 bool
650 x86_insn_is_mem_write(const struct x86_emulate_state *state,
651                       const struct x86_emulate_ctxt *ctxt);
652 bool
653 x86_insn_is_portio(const struct x86_emulate_state *state,
654                    const struct x86_emulate_ctxt *ctxt);
655 bool
656 x86_insn_is_cr_access(const struct x86_emulate_state *state,
657                       const struct x86_emulate_ctxt *ctxt);
658 
659 #ifdef NDEBUG
x86_emulate_free_state(struct x86_emulate_state * state)660 static inline void x86_emulate_free_state(struct x86_emulate_state *state) {}
661 #else
662 void x86_emulate_free_state(struct x86_emulate_state *state);
663 #endif
664 
665 #endif
666 
x86_emul_hw_exception(unsigned int vector,int error_code,struct x86_emulate_ctxt * ctxt)667 static inline void x86_emul_hw_exception(
668     unsigned int vector, int error_code, struct x86_emulate_ctxt *ctxt)
669 {
670     ASSERT(!ctxt->event_pending);
671 
672     ctxt->event.vector = vector;
673     ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
674     ctxt->event.error_code = error_code;
675 
676     ctxt->event_pending = true;
677 }
678 
x86_emul_pagefault(int error_code,unsigned long cr2,struct x86_emulate_ctxt * ctxt)679 static inline void x86_emul_pagefault(
680     int error_code, unsigned long cr2, struct x86_emulate_ctxt *ctxt)
681 {
682     ASSERT(!ctxt->event_pending);
683 
684     ctxt->event.vector = 14; /* TRAP_page_fault */
685     ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
686     ctxt->event.error_code = error_code;
687     ctxt->event.cr2 = cr2;
688 
689     ctxt->event_pending = true;
690 }
691 
x86_emul_reset_event(struct x86_emulate_ctxt * ctxt)692 static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
693 {
694     ctxt->event_pending = false;
695     ctxt->event = (struct x86_event){};
696 }
697 
698 #endif /* __X86_EMULATE_H__ */
699