1 /*
2  * arch/x86/hvm/monitor.c
3  *
4  * Arch-specific hardware virtual machine event abstractions.
5  *
6  * Copyright (c) 2004, Intel Corporation.
7  * Copyright (c) 2005, International Business Machines Corporation.
8  * Copyright (c) 2008, Citrix Systems, Inc.
9  * Copyright (c) 2016, Bitdefender S.R.L.
10  * Copyright (c) 2016, Tamas K Lengyel (tamas@tklengyel.com)
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms and conditions of the GNU General Public License,
14  * version 2, as published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program; If not, see <http://www.gnu.org/licenses/>.
23  */
24 
25 #include <xen/vm_event.h>
26 #include <xen/monitor.h>
27 #include <asm/hvm/monitor.h>
28 #include <asm/monitor.h>
29 #include <asm/paging.h>
30 #include <asm/vm_event.h>
31 #include <public/vm_event.h>
32 
hvm_monitor_cr(unsigned int index,unsigned long value,unsigned long old)33 bool hvm_monitor_cr(unsigned int index, unsigned long value, unsigned long old)
34 {
35     struct vcpu *curr = current;
36     struct arch_domain *ad = &curr->domain->arch;
37     unsigned int ctrlreg_bitmask = monitor_ctrlreg_bitmask(index);
38 
39     if ( (ad->monitor.write_ctrlreg_enabled & ctrlreg_bitmask) &&
40          (!(ad->monitor.write_ctrlreg_onchangeonly & ctrlreg_bitmask) ||
41           value != old) &&
42          (!((value ^ old) & ad->monitor.write_ctrlreg_mask[index])) )
43     {
44         bool sync = ad->monitor.write_ctrlreg_sync & ctrlreg_bitmask;
45 
46         vm_event_request_t req = {
47             .reason = VM_EVENT_REASON_WRITE_CTRLREG,
48             .u.write_ctrlreg.index = index,
49             .u.write_ctrlreg.new_value = value,
50             .u.write_ctrlreg.old_value = old
51         };
52 
53         if ( monitor_traps(curr, sync, &req) >= 0 )
54             return 1;
55     }
56 
57     return 0;
58 }
59 
hvm_monitor_emul_unimplemented(void)60 bool hvm_monitor_emul_unimplemented(void)
61 {
62     struct vcpu *curr = current;
63 
64     /*
65      * Send a vm_event to the monitor to signal that the current
66      * instruction couldn't be emulated.
67      */
68     vm_event_request_t req = {
69         .reason = VM_EVENT_REASON_EMUL_UNIMPLEMENTED,
70         .vcpu_id  = curr->vcpu_id,
71     };
72 
73     return curr->domain->arch.monitor.emul_unimplemented_enabled &&
74         monitor_traps(curr, true, &req) == 1;
75 }
76 
hvm_monitor_msr(unsigned int msr,uint64_t value)77 void hvm_monitor_msr(unsigned int msr, uint64_t value)
78 {
79     struct vcpu *curr = current;
80 
81     if ( monitored_msr(curr->domain, msr) )
82     {
83         vm_event_request_t req = {
84             .reason = VM_EVENT_REASON_MOV_TO_MSR,
85             .u.mov_to_msr.msr = msr,
86             .u.mov_to_msr.value = value,
87         };
88 
89         monitor_traps(curr, 1, &req);
90     }
91 }
92 
hvm_monitor_descriptor_access(uint64_t exit_info,uint64_t vmx_exit_qualification,uint8_t descriptor,bool is_write)93 void hvm_monitor_descriptor_access(uint64_t exit_info,
94                                    uint64_t vmx_exit_qualification,
95                                    uint8_t descriptor, bool is_write)
96 {
97     vm_event_request_t req = {
98         .reason = VM_EVENT_REASON_DESCRIPTOR_ACCESS,
99         .u.desc_access.descriptor = descriptor,
100         .u.desc_access.is_write = is_write,
101     };
102 
103     if ( cpu_has_vmx )
104     {
105         req.u.desc_access.arch.vmx.instr_info = exit_info;
106         req.u.desc_access.arch.vmx.exit_qualification = vmx_exit_qualification;
107     }
108     else
109     {
110         req.u.desc_access.arch.svm.exitinfo = exit_info;
111     }
112 
113     monitor_traps(current, true, &req);
114 }
115 
gfn_of_rip(unsigned long rip)116 static inline unsigned long gfn_of_rip(unsigned long rip)
117 {
118     struct vcpu *curr = current;
119     struct segment_register sreg;
120     uint32_t pfec = PFEC_page_present | PFEC_insn_fetch;
121 
122     if ( hvm_get_cpl(curr) == 3 )
123         pfec |= PFEC_user_mode;
124 
125     hvm_get_segment_register(curr, x86_seg_cs, &sreg);
126 
127     return paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
128 }
129 
hvm_monitor_debug(unsigned long rip,enum hvm_monitor_debug_type type,unsigned long trap_type,unsigned long insn_length)130 int hvm_monitor_debug(unsigned long rip, enum hvm_monitor_debug_type type,
131                       unsigned long trap_type, unsigned long insn_length)
132 {
133     struct vcpu *curr = current;
134     struct arch_domain *ad = &curr->domain->arch;
135     vm_event_request_t req = {};
136     bool sync;
137 
138     switch ( type )
139     {
140     case HVM_MONITOR_SOFTWARE_BREAKPOINT:
141         if ( !ad->monitor.software_breakpoint_enabled )
142             return 0;
143         req.reason = VM_EVENT_REASON_SOFTWARE_BREAKPOINT;
144         req.u.software_breakpoint.gfn = gfn_of_rip(rip);
145         req.u.software_breakpoint.type = trap_type;
146         req.u.software_breakpoint.insn_length = insn_length;
147         sync = true;
148         break;
149 
150     case HVM_MONITOR_SINGLESTEP_BREAKPOINT:
151         if ( !ad->monitor.singlestep_enabled )
152             return 0;
153         req.reason = VM_EVENT_REASON_SINGLESTEP;
154         req.u.singlestep.gfn = gfn_of_rip(rip);
155         sync = true;
156         break;
157 
158     case HVM_MONITOR_DEBUG_EXCEPTION:
159         if ( !ad->monitor.debug_exception_enabled )
160             return 0;
161         req.reason = VM_EVENT_REASON_DEBUG_EXCEPTION;
162         req.u.debug_exception.gfn = gfn_of_rip(rip);
163         req.u.debug_exception.type = trap_type;
164         req.u.debug_exception.insn_length = insn_length;
165         sync = !!ad->monitor.debug_exception_sync;
166         break;
167 
168     default:
169         return -EOPNOTSUPP;
170     }
171 
172     return monitor_traps(curr, sync, &req);
173 }
174 
hvm_monitor_cpuid(unsigned long insn_length,unsigned int leaf,unsigned int subleaf)175 int hvm_monitor_cpuid(unsigned long insn_length, unsigned int leaf,
176                       unsigned int subleaf)
177 {
178     struct vcpu *curr = current;
179     struct arch_domain *ad = &curr->domain->arch;
180     vm_event_request_t req = {};
181 
182     if ( !ad->monitor.cpuid_enabled )
183         return 0;
184 
185     req.reason = VM_EVENT_REASON_CPUID;
186     req.u.cpuid.insn_length = insn_length;
187     req.u.cpuid.leaf = leaf;
188     req.u.cpuid.subleaf = subleaf;
189 
190     return monitor_traps(curr, 1, &req);
191 }
192 
hvm_monitor_interrupt(unsigned int vector,unsigned int type,unsigned int err,uint64_t cr2)193 void hvm_monitor_interrupt(unsigned int vector, unsigned int type,
194                            unsigned int err, uint64_t cr2)
195 {
196     vm_event_request_t req = {
197         .reason = VM_EVENT_REASON_INTERRUPT,
198         .u.interrupt.x86.vector = vector,
199         .u.interrupt.x86.type = type,
200         .u.interrupt.x86.error_code = err,
201         .u.interrupt.x86.cr2 = cr2,
202     };
203 
204     monitor_traps(current, 1, &req);
205 }
206 
207 /*
208  * Local variables:
209  * mode: C
210  * c-file-style: "BSD"
211  * c-basic-offset: 4
212  * tab-width: 4
213  * indent-tabs-mode: nil
214  * End:
215  */
216