1 /******************************************************************************
2  * arch/x86/pv/traps.c
3  *
4  * PV low level entry points.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; If not, see <http://www.gnu.org/licenses/>.
18  *
19  * Copyright (c) 2017 Citrix Systems Ltd.
20  */
21 
22 #include <xen/event.h>
23 #include <xen/hypercall.h>
24 #include <xen/lib.h>
25 #include <xen/trace.h>
26 #include <xen/softirq.h>
27 
28 #include <asm/apic.h>
29 #include <asm/shared.h>
30 #include <asm/traps.h>
31 
32 /* Override macros from asm/page.h to make them work with mfn_t */
33 #undef mfn_to_page
34 #define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
35 #undef page_to_mfn
36 #define page_to_mfn(pg) _mfn(__page_to_mfn(pg))
37 
do_entry_int82(struct cpu_user_regs * regs)38 void do_entry_int82(struct cpu_user_regs *regs)
39 {
40     if ( unlikely(untrusted_msi) )
41         check_for_unexpected_msi((uint8_t)regs->entry_vector);
42 
43     pv_hypercall(regs);
44 }
45 
pv_inject_event(const struct x86_event * event)46 void pv_inject_event(const struct x86_event *event)
47 {
48     struct vcpu *curr = current;
49     struct cpu_user_regs *regs = guest_cpu_user_regs();
50     struct trap_bounce *tb;
51     const struct trap_info *ti;
52     const uint8_t vector = event->vector;
53     unsigned int error_code = event->error_code;
54     bool use_error_code;
55 
56     ASSERT(vector == event->vector); /* Confirm no truncation. */
57     if ( event->type == X86_EVENTTYPE_HW_EXCEPTION )
58     {
59         ASSERT(vector < 32);
60         use_error_code = TRAP_HAVE_EC & (1u << vector);
61     }
62     else
63     {
64         ASSERT(event->type == X86_EVENTTYPE_SW_INTERRUPT);
65         use_error_code = false;
66     }
67     if ( use_error_code )
68         ASSERT(error_code != X86_EVENT_NO_EC);
69     else
70         ASSERT(error_code == X86_EVENT_NO_EC);
71 
72     tb = &curr->arch.pv_vcpu.trap_bounce;
73     ti = &curr->arch.pv_vcpu.trap_ctxt[vector];
74 
75     tb->flags = TBF_EXCEPTION;
76     tb->cs    = ti->cs;
77     tb->eip   = ti->address;
78 
79     if ( event->type == X86_EVENTTYPE_HW_EXCEPTION &&
80          vector == TRAP_page_fault )
81     {
82         curr->arch.pv_vcpu.ctrlreg[2] = event->cr2;
83         arch_set_cr2(curr, event->cr2);
84 
85         /* Re-set error_code.user flag appropriately for the guest. */
86         error_code &= ~PFEC_user_mode;
87         if ( !guest_kernel_mode(curr, regs) )
88             error_code |= PFEC_user_mode;
89 
90         trace_pv_page_fault(event->cr2, error_code);
91     }
92     else
93         trace_pv_trap(vector, regs->rip, use_error_code, error_code);
94 
95     if ( use_error_code )
96     {
97         tb->flags |= TBF_EXCEPTION_ERRCODE;
98         tb->error_code = error_code;
99     }
100 
101     if ( TI_GET_IF(ti) )
102         tb->flags |= TBF_INTERRUPT;
103 
104     if ( unlikely(null_trap_bounce(curr, tb)) )
105     {
106         gprintk(XENLOG_WARNING,
107                 "Unhandled %s fault/trap [#%d, ec=%04x]\n",
108                 trapstr(vector), vector, error_code);
109 
110         if ( vector == TRAP_page_fault )
111             show_page_walk(event->cr2);
112     }
113 }
114 
115 /*
116  * Called from asm to set up the MCE trapbounce info.
117  * Returns false no callback is set up, else true.
118  */
set_guest_machinecheck_trapbounce(void)119 bool set_guest_machinecheck_trapbounce(void)
120 {
121     struct vcpu *curr = current;
122     struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
123 
124     pv_inject_hw_exception(TRAP_machine_check, X86_EVENT_NO_EC);
125     tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
126 
127     return !null_trap_bounce(curr, tb);
128 }
129 
130 /*
131  * Called from asm to set up the NMI trapbounce info.
132  * Returns false if no callback is set up, else true.
133  */
set_guest_nmi_trapbounce(void)134 bool set_guest_nmi_trapbounce(void)
135 {
136     struct vcpu *curr = current;
137     struct trap_bounce *tb = &curr->arch.pv_vcpu.trap_bounce;
138 
139     pv_inject_hw_exception(TRAP_nmi, X86_EVENT_NO_EC);
140     tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
141 
142     return !null_trap_bounce(curr, tb);
143 }
144 
init_int80_direct_trap(struct vcpu * v)145 void init_int80_direct_trap(struct vcpu *v)
146 {
147     struct trap_info *ti = &v->arch.pv_vcpu.trap_ctxt[0x80];
148     struct trap_bounce *tb = &v->arch.pv_vcpu.int80_bounce;
149 
150     tb->cs  = ti->cs;
151     tb->eip = ti->address;
152 
153     if ( null_trap_bounce(v, tb) )
154         tb->flags = 0;
155     else
156         tb->flags = TBF_EXCEPTION | (TI_GET_IF(ti) ? TBF_INTERRUPT : 0);
157 }
158 
159 struct softirq_trap {
160     struct domain *domain;   /* domain to inject trap */
161     struct vcpu *vcpu;       /* vcpu to inject trap */
162     unsigned int processor;  /* physical cpu to inject trap */
163 };
164 
165 static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
166 
nmi_mce_softirq(void)167 static void nmi_mce_softirq(void)
168 {
169     unsigned int cpu = smp_processor_id();
170     struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
171 
172     BUG_ON(st->vcpu == NULL);
173 
174     /*
175      * Set the tmp value unconditionally, so that the check in the iret
176      * hypercall works.
177      */
178     cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
179                  st->vcpu->cpu_hard_affinity);
180 
181     if ( (cpu != st->processor) ||
182          (st->processor != st->vcpu->processor) )
183     {
184 
185         /*
186          * We are on a different physical cpu.  Make sure to wakeup the vcpu on
187          * the specified processor.
188          */
189         vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
190 
191         /* Affinity is restored in the iret hypercall. */
192     }
193 
194     /*
195      * Only used to defer wakeup of domain/vcpu to a safe (non-NMI/MCE)
196      * context.
197      */
198     vcpu_kick(st->vcpu);
199     st->vcpu = NULL;
200 }
201 
pv_trap_init(void)202 void __init pv_trap_init(void)
203 {
204     /* The 32-on-64 hypercall vector is only accessible from ring 1. */
205     _set_gate(idt_table + HYPERCALL_VECTOR,
206               SYS_DESC_trap_gate, 1, entry_int82);
207 
208     /* Fast trap for int80 (faster than taking the #GP-fixup path). */
209     _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
210               &int80_direct_trap);
211 
212     open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
213 }
214 
pv_raise_interrupt(struct vcpu * v,uint8_t vector)215 int pv_raise_interrupt(struct vcpu *v, uint8_t vector)
216 {
217     struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
218 
219     switch ( vector )
220     {
221     case TRAP_nmi:
222         if ( cmpxchgptr(&st->vcpu, NULL, v) )
223             return -EBUSY;
224         if ( !test_and_set_bool(v->nmi_pending) )
225         {
226             st->domain = v->domain;
227             st->processor = v->processor;
228 
229             /* Not safe to wake up a vcpu here */
230             raise_softirq(NMI_MCE_SOFTIRQ);
231             return 0;
232         }
233         st->vcpu = NULL;
234         break;
235 
236     case TRAP_machine_check:
237         if ( cmpxchgptr(&st->vcpu, NULL, v) )
238             return -EBUSY;
239 
240         /*
241          * We are called by the machine check (exception or polling) handlers
242          * on the physical CPU that reported a machine check error.
243          */
244         if ( !test_and_set_bool(v->mce_pending) )
245         {
246             st->domain = v->domain;
247             st->processor = v->processor;
248 
249             /* not safe to wake up a vcpu here */
250             raise_softirq(NMI_MCE_SOFTIRQ);
251             return 0;
252         }
253         st->vcpu = NULL;
254         break;
255     }
256 
257     /* Delivery failed */
258     return -EIO;
259 }
260 
261 /*
262  * Local variables:
263  * mode: C
264  * c-file-style: "BSD"
265  * c-basic-offset: 4
266  * tab-width: 4
267  * indent-tabs-mode: nil
268  * End:
269  */
270