1 /*
2 * This library is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU Lesser General Public
4 * License as published by the Free Software Foundation;
5 * version 2.1 of the License.
6 *
7 * This library is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * Lesser General Public License for more details.
11 *
12 * You should have received a copy of the GNU Lesser General Public
13 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
14 */
15
16 #include "xg_private.h"
17 #include "xenguest.h"
18
19 #if defined(__i386__) || defined(__x86_64__)
20
21 #include <xen/foreign/x86_32.h>
22 #include <xen/foreign/x86_64.h>
23 #include <xen/hvm/params.h>
24 #include "xg_core.h"
25
modify_returncode(xc_interface * xch,uint32_t domid)26 static int modify_returncode(xc_interface *xch, uint32_t domid)
27 {
28 vcpu_guest_context_any_t ctxt;
29 xc_domaininfo_t info;
30 xen_capabilities_info_t caps;
31 struct domain_info_context _dinfo = {};
32 struct domain_info_context *dinfo = &_dinfo;
33 int rc;
34
35 if ( xc_domain_getinfo_single(xch, domid, &info) < 0 )
36 {
37 PERROR("Could not get info for dom%u", domid);
38 return -1;
39 }
40
41 if ( !dominfo_shutdown_with(&info, SHUTDOWN_suspend) )
42 {
43 ERROR("Dom %d not suspended: (shutdown %d, reason %d)", domid,
44 info.flags & XEN_DOMINF_shutdown,
45 dominfo_shutdown_reason(&info));
46 errno = EINVAL;
47 return -1;
48 }
49
50 if ( info.flags & XEN_DOMINF_hvm_guest )
51 {
52 /* HVM guests without PV drivers have no return code to modify. */
53 uint64_t irq = 0;
54 xc_hvm_param_get(xch, domid, HVM_PARAM_CALLBACK_IRQ, &irq);
55 if ( !irq )
56 return 0;
57
58 /* HVM guests have host address width. */
59 if ( xc_version(xch, XENVER_capabilities, &caps) != 0 )
60 {
61 PERROR("Could not get Xen capabilities");
62 return -1;
63 }
64 dinfo->guest_width = strstr(caps, "x86_64") ? 8 : 4;
65 }
66 else
67 {
68 /* Probe PV guest address width. */
69 if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) )
70 return -1;
71 }
72
73 if ( (rc = xc_vcpu_getcontext(xch, domid, 0, &ctxt)) != 0 )
74 return rc;
75
76 SET_FIELD(&ctxt, user_regs.eax, 1, dinfo->guest_width);
77
78 if ( (rc = xc_vcpu_setcontext(xch, domid, 0, &ctxt)) != 0 )
79 return rc;
80
81 return 0;
82 }
83
84 #else
85
modify_returncode(xc_interface * xch,uint32_t domid)86 static int modify_returncode(xc_interface *xch, uint32_t domid)
87 {
88 return 0;
89
90 }
91
92 #endif
93
xc_domain_resume_cooperative(xc_interface * xch,uint32_t domid)94 static int xc_domain_resume_cooperative(xc_interface *xch, uint32_t domid)
95 {
96 struct xen_domctl domctl = {};
97 int rc;
98
99 /*
100 * Set hypercall return code to indicate that suspend is cancelled
101 * (rather than resuming in a new domain context).
102 */
103 if ( (rc = modify_returncode(xch, domid)) != 0 )
104 return rc;
105
106 domctl.cmd = XEN_DOMCTL_resumedomain;
107 domctl.domain = domid;
108 return do_domctl(xch, &domctl);
109 }
110
111 #if defined(__i386__) || defined(__x86_64__)
xc_domain_resume_hvm(xc_interface * xch,uint32_t domid)112 static int xc_domain_resume_hvm(xc_interface *xch, uint32_t domid)
113 {
114 struct xen_domctl domctl = {};
115
116 /*
117 * The domctl XEN_DOMCTL_resumedomain unpause each vcpu. After
118 * the domctl, the guest will run.
119 *
120 * If it is PVHVM, the guest called the hypercall
121 * SCHEDOP_shutdown:SHUTDOWN_suspend
122 * to suspend itself. We don't modify the return code, so the PV driver
123 * will disconnect and reconnect.
124 *
125 * If it is a HVM, the guest will continue running.
126 */
127 domctl.cmd = XEN_DOMCTL_resumedomain;
128 domctl.domain = domid;
129 return do_domctl(xch, &domctl);
130 }
131 #endif
132
xc_domain_resume_any(xc_interface * xch,uint32_t domid)133 static int xc_domain_resume_any(xc_interface *xch, uint32_t domid)
134 {
135 struct xen_domctl domctl = {};
136 xc_domaininfo_t info;
137 int i, rc = -1;
138 #if defined(__i386__) || defined(__x86_64__)
139 struct domain_info_context _dinfo = { .guest_width = 0,
140 .p2m_size = 0 };
141 struct domain_info_context *dinfo = &_dinfo;
142 xen_pfn_t mfn, store_mfn, console_mfn;
143 vcpu_guest_context_any_t ctxt;
144 start_info_any_t *start_info;
145 shared_info_any_t *shinfo = NULL;
146 xen_pfn_t *p2m = NULL;
147 #endif
148
149 if ( xc_domain_getinfo_single(xch, domid, &info) < 0 )
150 {
151 PERROR("Could not get domain info");
152 return rc;
153 }
154
155 /*
156 * (x86 only) Rewrite store_mfn and console_mfn back to MFN (from PFN).
157 */
158 #if defined(__i386__) || defined(__x86_64__)
159 if ( info.flags & XEN_DOMINF_hvm_guest )
160 return xc_domain_resume_hvm(xch, domid);
161
162 if ( xc_domain_get_guest_width(xch, domid, &dinfo->guest_width) != 0 )
163 {
164 PERROR("Could not get domain width");
165 return rc;
166 }
167
168 /* Map the shared info frame */
169 shinfo = xc_map_foreign_range(xch, domid, PAGE_SIZE,
170 PROT_READ, info.shared_info_frame);
171 if ( shinfo == NULL )
172 {
173 ERROR("Couldn't map shared info");
174 goto out;
175 }
176
177 /* Map the p2m list */
178 if ( xc_core_arch_map_p2m(xch, dinfo, &info, shinfo, &p2m) )
179 {
180 ERROR("Couldn't map p2m table");
181 goto out;
182 }
183
184 if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
185 {
186 ERROR("Could not get vcpu context");
187 goto out;
188 }
189
190 mfn = GET_FIELD(&ctxt, user_regs.edx, dinfo->guest_width);
191
192 start_info = xc_map_foreign_range(xch, domid, PAGE_SIZE,
193 PROT_READ | PROT_WRITE, mfn);
194 if ( start_info == NULL )
195 {
196 ERROR("Couldn't map start_info");
197 goto out;
198 }
199
200 store_mfn = GET_FIELD(start_info, store_mfn, dinfo->guest_width);
201 console_mfn = GET_FIELD(start_info, console.domU.mfn, dinfo->guest_width);
202 if ( dinfo->guest_width == 4 )
203 {
204 store_mfn = ((uint32_t *)p2m)[store_mfn];
205 console_mfn = ((uint32_t *)p2m)[console_mfn];
206 }
207 else
208 {
209 store_mfn = ((uint64_t *)p2m)[store_mfn];
210 console_mfn = ((uint64_t *)p2m)[console_mfn];
211 }
212 SET_FIELD(start_info, store_mfn, store_mfn, dinfo->guest_width);
213 SET_FIELD(start_info, console.domU.mfn, console_mfn, dinfo->guest_width);
214
215 munmap(start_info, PAGE_SIZE);
216 #endif /* defined(__i386__) || defined(__x86_64__) */
217
218 /* Reset all secondary CPU states. */
219 for ( i = 1; i <= info.max_vcpu_id; i++ )
220 if ( xc_vcpu_setcontext(xch, domid, i, NULL) != 0 )
221 {
222 ERROR("Couldn't reset vcpu state");
223 goto out;
224 }
225
226 /* Ready to resume domain execution now. */
227 domctl.cmd = XEN_DOMCTL_resumedomain;
228 domctl.domain = domid;
229 rc = do_domctl(xch, &domctl);
230
231 out:
232 #if defined(__i386__) || defined(__x86_64__)
233 if (p2m)
234 munmap(p2m, dinfo->p2m_frames * PAGE_SIZE);
235 if (shinfo)
236 munmap(shinfo, PAGE_SIZE);
237 #endif
238
239 return rc;
240 }
241
242 /*
243 * Resume execution of a domain after suspend shutdown.
244 * This can happen in one of two ways:
245 * 1. (fast=1) Resume the guest without resetting the domain environment.
246 * The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend) will return 1.
247 *
248 * 2. (fast=0) Reset guest environment so it believes it is resumed in a new
249 * domain context. The guests's call to SCHEDOP_shutdown(SHUTDOWN_suspend)
250 * will return 0.
251 *
252 * (1) should only by used for guests which can handle the special return
253 * code. Also note that the insertion of the return code is quite interesting
254 * and that the guest MUST be paused - otherwise we would be corrupting
255 * the guest vCPU state.
256 *
257 * (2) should be used only for guests which cannot handle the special
258 * new return code - and it is always safe (but slower).
259 */
xc_domain_resume(xc_interface * xch,uint32_t domid,int fast)260 int xc_domain_resume(xc_interface *xch, uint32_t domid, int fast)
261 {
262 return (fast
263 ? xc_domain_resume_cooperative(xch, domid)
264 : xc_domain_resume_any(xch, domid));
265 }
266