1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /******************************************************************************
3  * arch/x86/pv/traps.c
4  *
5  * PV low level entry points.
6  *
7  * Copyright (c) 2017 Citrix Systems Ltd.
8  */
9 
10 #include <xen/event.h>
11 #include <xen/hypercall.h>
12 #include <xen/lib.h>
13 #include <xen/softirq.h>
14 
15 #include <asm/debugreg.h>
16 #include <asm/idt.h>
17 #include <asm/irq-vectors.h>
18 #include <asm/pv/trace.h>
19 #include <asm/shared.h>
20 #include <asm/traps.h>
21 
pv_inject_event(const struct x86_event * event)22 void pv_inject_event(const struct x86_event *event)
23 {
24     struct vcpu *curr = current;
25     struct cpu_user_regs *regs = guest_cpu_user_regs();
26     struct trap_bounce *tb;
27     const struct trap_info *ti;
28     const uint8_t vector = event->vector;
29     unsigned int error_code = event->error_code;
30     bool use_error_code;
31 
32     ASSERT(vector == event->vector); /* Confirm no truncation. */
33     if ( event->type == X86_ET_HW_EXC )
34     {
35         ASSERT(vector < 32);
36         use_error_code = X86_EXC_HAVE_EC & (1u << vector);
37     }
38     else
39     {
40         ASSERT(event->type == X86_ET_SW_INT);
41         use_error_code = false;
42     }
43     if ( use_error_code )
44         ASSERT(error_code != X86_EVENT_NO_EC);
45     else
46         ASSERT(error_code == X86_EVENT_NO_EC);
47 
48     tb = &curr->arch.pv.trap_bounce;
49     ti = &curr->arch.pv.trap_ctxt[vector];
50 
51     tb->flags = TBF_EXCEPTION;
52     tb->cs    = ti->cs;
53     tb->eip   = ti->address;
54 
55     switch ( vector | -(event->type == X86_ET_SW_INT) )
56     {
57     case X86_EXC_PF:
58         curr->arch.pv.ctrlreg[2] = event->cr2;
59         arch_set_cr2(curr, event->cr2);
60 
61         /* Re-set error_code.user flag appropriately for the guest. */
62         error_code &= ~PFEC_user_mode;
63         if ( !guest_kernel_mode(curr, regs) )
64             error_code |= PFEC_user_mode;
65 
66         trace_pv_page_fault(event->cr2, error_code);
67         break;
68 
69     case X86_EXC_DB:
70         curr->arch.dr6 = x86_merge_dr6(curr->domain->arch.cpu_policy,
71                                        curr->arch.dr6, event->pending_dbg);
72         fallthrough;
73     default:
74         trace_pv_trap(vector, regs->rip, use_error_code, error_code);
75         break;
76     }
77 
78     if ( use_error_code )
79     {
80         tb->flags |= TBF_EXCEPTION_ERRCODE;
81         tb->error_code = error_code;
82     }
83 
84     if ( TI_GET_IF(ti) )
85         tb->flags |= TBF_INTERRUPT;
86 
87     if ( unlikely(null_trap_bounce(curr, tb)) )
88     {
89         gprintk(XENLOG_ERR,
90                 "Unhandled: vec %u, %s[%04x]\n",
91                 vector, vector_name(vector), error_code);
92 
93         if ( vector == X86_EXC_PF )
94             show_page_walk(event->cr2);
95     }
96 }
97 
98 /*
99  * Called from asm to set up the MCE trapbounce info.
100  * Returns false no callback is set up, else true.
101  */
set_guest_machinecheck_trapbounce(void)102 bool set_guest_machinecheck_trapbounce(void)
103 {
104     struct vcpu *curr = current;
105     struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
106 
107     pv_inject_hw_exception(X86_EXC_MC, X86_EVENT_NO_EC);
108     tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
109 
110     return !null_trap_bounce(curr, tb);
111 }
112 
113 /*
114  * Called from asm to set up the NMI trapbounce info.
115  * Returns false if no callback is set up, else true.
116  */
set_guest_nmi_trapbounce(void)117 bool set_guest_nmi_trapbounce(void)
118 {
119     struct vcpu *curr = current;
120     struct trap_bounce *tb = &curr->arch.pv.trap_bounce;
121 
122     pv_inject_hw_exception(X86_EXC_NMI, X86_EVENT_NO_EC);
123     tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
124 
125     return !null_trap_bounce(curr, tb);
126 }
127 
128 static DEFINE_PER_CPU(struct vcpu *, softirq_nmi_vcpu);
129 
nmi_softirq(void)130 static void cf_check nmi_softirq(void)
131 {
132     struct vcpu **v_ptr = &this_cpu(softirq_nmi_vcpu);
133 
134     BUG_ON(*v_ptr == NULL);
135 
136     /*
137      * Only used to defer wakeup of domain/vcpu to a safe (non-NMI)
138      * context.
139      */
140     vcpu_kick(*v_ptr);
141     *v_ptr = NULL;
142 }
143 
pv_trap_init(void)144 static int __init cf_check pv_trap_init(void)
145 {
146     open_softirq(NMI_SOFTIRQ, nmi_softirq);
147 
148     return 0;
149 }
150 __initcall(pv_trap_init);
151 
152 /*
153  * Deliver NMI to PV guest. Return 0 on success.
154  * Called in NMI context, so no use of printk().
155  */
pv_raise_nmi(struct vcpu * v)156 int pv_raise_nmi(struct vcpu *v)
157 {
158     struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, smp_processor_id());
159 
160     if ( cmpxchgptr(v_ptr, NULL, v) )
161         return -EBUSY;
162     if ( !test_and_set_bool(v->arch.nmi_pending) )
163     {
164         /* Not safe to wake up a vcpu here */
165         raise_softirq(NMI_SOFTIRQ);
166         return 0;
167     }
168     *v_ptr = NULL;
169 
170     /* Delivery failed */
171     return -EIO;
172 }
173 
174 /*
175  * Local variables:
176  * mode: C
177  * c-file-style: "BSD"
178  * c-basic-offset: 4
179  * tab-width: 4
180  * indent-tabs-mode: nil
181  * End:
182  */
183