1 /******************************************************************************
2  * arch/hvm/hypercall.c
3  *
4  * HVM hypercall dispatching routines
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; If not, see <http://www.gnu.org/licenses/>.
18  *
19  * Copyright (c) 2017 Citrix Systems Ltd.
20  */
21 #include <xen/lib.h>
22 #include <xen/hypercall.h>
23 
24 #include <asm/hvm/support.h>
25 
hvm_memory_op(int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)26 static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
27 {
28     const struct vcpu *curr = current;
29     long rc;
30 
31     switch ( cmd & MEMOP_CMD_MASK )
32     {
33     case XENMEM_machine_memory_map:
34     case XENMEM_machphys_mapping:
35         return -ENOSYS;
36     }
37 
38     if ( !curr->hcall_compat )
39         rc = do_memory_op(cmd, arg);
40     else
41         rc = compat_memory_op(cmd, arg);
42 
43     if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation )
44         curr->domain->arch.hvm_domain.qemu_mapcache_invalidate = true;
45 
46     return rc;
47 }
48 
hvm_grant_table_op(unsigned int cmd,XEN_GUEST_HANDLE_PARAM (void)uop,unsigned int count)49 static long hvm_grant_table_op(
50     unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count)
51 {
52     switch ( cmd )
53     {
54     case GNTTABOP_query_size:
55     case GNTTABOP_setup_table:
56     case GNTTABOP_set_version:
57     case GNTTABOP_get_version:
58     case GNTTABOP_copy:
59     case GNTTABOP_map_grant_ref:
60     case GNTTABOP_unmap_grant_ref:
61     case GNTTABOP_swap_grant_ref:
62         break;
63 
64     default: /* All other commands need auditing. */
65         return -ENOSYS;
66     }
67 
68     if ( !current->hcall_compat )
69         return do_grant_table_op(cmd, uop, count);
70     else
71         return compat_grant_table_op(cmd, uop, count);
72 }
73 
hvm_physdev_op(int cmd,XEN_GUEST_HANDLE_PARAM (void)arg)74 static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
75 {
76     const struct vcpu *curr = current;
77 
78     switch ( cmd )
79     {
80     default:
81         if ( !is_hardware_domain(curr->domain) )
82             return -ENOSYS;
83         /* fall through */
84     case PHYSDEVOP_map_pirq:
85     case PHYSDEVOP_unmap_pirq:
86     case PHYSDEVOP_eoi:
87     case PHYSDEVOP_irq_status_query:
88     case PHYSDEVOP_get_free_pirq:
89         if ( !has_pirq(curr->domain) )
90             return -ENOSYS;
91         break;
92     }
93 
94     if ( !curr->hcall_compat )
95         return do_physdev_op(cmd, arg);
96     else
97         return compat_physdev_op(cmd, arg);
98 }
99 
100 #define HYPERCALL(x)                                         \
101     [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x,  \
102                                (hypercall_fn_t *) do_ ## x }
103 
104 #define HVM_CALL(x)                                          \
105     [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) hvm_ ## x, \
106                                (hypercall_fn_t *) hvm_ ## x }
107 
108 #define COMPAT_CALL(x)                                       \
109     [ __HYPERVISOR_ ## x ] = { (hypercall_fn_t *) do_ ## x,  \
110                                (hypercall_fn_t *) compat_ ## x }
111 
112 #define do_arch_1             paging_domctl_continuation
113 
114 static const hypercall_table_t hvm_hypercall_table[] = {
115     HVM_CALL(memory_op),
116     HVM_CALL(grant_table_op),
117     COMPAT_CALL(vcpu_op),
118     HVM_CALL(physdev_op),
119     COMPAT_CALL(xen_version),
120     HYPERCALL(console_io),
121     HYPERCALL(event_channel_op),
122     COMPAT_CALL(sched_op),
123     COMPAT_CALL(set_timer_op),
124     HYPERCALL(xsm_op),
125     HYPERCALL(hvm_op),
126     HYPERCALL(sysctl),
127     HYPERCALL(domctl),
128 #ifdef CONFIG_TMEM
129     HYPERCALL(tmem_op),
130 #endif
131     COMPAT_CALL(platform_op),
132     COMPAT_CALL(mmuext_op),
133     HYPERCALL(xenpmu_op),
134     COMPAT_CALL(dm_op),
135     HYPERCALL(arch_1)
136 };
137 
138 #undef do_arch_1
139 
140 #undef HYPERCALL
141 #undef HVM_CALL
142 #undef COMPAT_CALL
143 
hvm_hypercall(struct cpu_user_regs * regs)144 int hvm_hypercall(struct cpu_user_regs *regs)
145 {
146     struct vcpu *curr = current;
147     struct domain *currd = curr->domain;
148     int mode = hvm_guest_x86_mode(curr);
149     unsigned long eax = regs->eax;
150 
151     switch ( mode )
152     {
153     case 8:
154         eax = regs->rax;
155         /* Fallthrough to permission check. */
156     case 4:
157     case 2:
158         if ( currd->arch.monitor.guest_request_userspace_enabled &&
159             eax == __HYPERVISOR_hvm_op &&
160             (mode == 8 ? regs->rdi : regs->ebx) == HVMOP_guest_request_vm_event )
161             break;
162 
163         if ( unlikely(hvm_get_cpl(curr)) )
164         {
165     default:
166             regs->rax = -EPERM;
167             return HVM_HCALL_completed;
168         }
169     case 0:
170         break;
171     }
172 
173     if ( (eax & 0x80000000) && is_viridian_domain(currd) )
174         return viridian_hypercall(regs);
175 
176     BUILD_BUG_ON(ARRAY_SIZE(hvm_hypercall_table) >
177                  ARRAY_SIZE(hypercall_args_table));
178 
179     if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
180          !hvm_hypercall_table[eax].native )
181     {
182         regs->rax = -ENOSYS;
183         return HVM_HCALL_completed;
184     }
185 
186     curr->hcall_preempted = false;
187 
188     if ( mode == 8 )
189     {
190         unsigned long rdi = regs->rdi;
191         unsigned long rsi = regs->rsi;
192         unsigned long rdx = regs->rdx;
193         unsigned long r10 = regs->r10;
194         unsigned long r8 = regs->r8;
195         unsigned long r9 = regs->r9;
196 
197         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu(%lx, %lx, %lx, %lx, %lx, %lx)",
198                     eax, rdi, rsi, rdx, r10, r8, r9);
199 
200 #ifndef NDEBUG
201         /* Deliberately corrupt parameter regs not used by this hypercall. */
202         switch ( hypercall_args_table[eax].native )
203         {
204         case 0: rdi = 0xdeadbeefdeadf00dUL;
205         case 1: rsi = 0xdeadbeefdeadf00dUL;
206         case 2: rdx = 0xdeadbeefdeadf00dUL;
207         case 3: r10 = 0xdeadbeefdeadf00dUL;
208         case 4: r8 = 0xdeadbeefdeadf00dUL;
209         case 5: r9 = 0xdeadbeefdeadf00dUL;
210         }
211 #endif
212 
213         regs->rax = hvm_hypercall_table[eax].native(rdi, rsi, rdx, r10, r8,
214                                                     r9);
215 
216 #ifndef NDEBUG
217         if ( !curr->hcall_preempted )
218         {
219             /* Deliberately corrupt parameter regs used by this hypercall. */
220             switch ( hypercall_args_table[eax].native )
221             {
222             case 6: regs->r9  = 0xdeadbeefdeadf00dUL;
223             case 5: regs->r8  = 0xdeadbeefdeadf00dUL;
224             case 4: regs->r10 = 0xdeadbeefdeadf00dUL;
225             case 3: regs->rdx = 0xdeadbeefdeadf00dUL;
226             case 2: regs->rsi = 0xdeadbeefdeadf00dUL;
227             case 1: regs->rdi = 0xdeadbeefdeadf00dUL;
228             }
229         }
230 #endif
231     }
232     else
233     {
234         unsigned int ebx = regs->ebx;
235         unsigned int ecx = regs->ecx;
236         unsigned int edx = regs->edx;
237         unsigned int esi = regs->esi;
238         unsigned int edi = regs->edi;
239         unsigned int ebp = regs->ebp;
240 
241         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu(%x, %x, %x, %x, %x, %x)", eax,
242                     ebx, ecx, edx, esi, edi, ebp);
243 
244 #ifndef NDEBUG
245         /* Deliberately corrupt parameter regs not used by this hypercall. */
246         switch ( hypercall_args_table[eax].compat )
247         {
248         case 0: ebx = 0xdeadf00d;
249         case 1: ecx = 0xdeadf00d;
250         case 2: edx = 0xdeadf00d;
251         case 3: esi = 0xdeadf00d;
252         case 4: edi = 0xdeadf00d;
253         case 5: ebp = 0xdeadf00d;
254         }
255 #endif
256 
257         curr->hcall_compat = true;
258         regs->rax = hvm_hypercall_table[eax].compat(ebx, ecx, edx, esi, edi,
259                                                     ebp);
260         curr->hcall_compat = false;
261 
262 #ifndef NDEBUG
263         if ( !curr->hcall_preempted )
264         {
265             /* Deliberately corrupt parameter regs used by this hypercall. */
266             switch ( hypercall_args_table[eax].compat )
267             {
268             case 6: regs->rbp = 0xdeadf00d;
269             case 5: regs->rdi = 0xdeadf00d;
270             case 4: regs->rsi = 0xdeadf00d;
271             case 3: regs->rdx = 0xdeadf00d;
272             case 2: regs->rcx = 0xdeadf00d;
273             case 1: regs->rbx = 0xdeadf00d;
274             }
275         }
276 #endif
277     }
278 
279     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%lu -> %lx", eax, regs->rax);
280 
281     if ( curr->hcall_preempted )
282         return HVM_HCALL_preempted;
283 
284     if ( unlikely(currd->arch.hvm_domain.qemu_mapcache_invalidate) &&
285          test_and_clear_bool(currd->arch.hvm_domain.qemu_mapcache_invalidate) )
286         send_invalidate_req();
287 
288     return HVM_HCALL_completed;
289 }
290 
291 /*
292  * Local variables:
293  * mode: C
294  * c-file-style: "BSD"
295  * c-basic-offset: 4
296  * tab-width: 4
297  * indent-tabs-mode: nil
298  * End:
299  */
300