1 /******************************************************************************
2 * xc_misc.c
3 *
4 * Miscellaneous control interface functions.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "xc_bitops.h"
21 #include "xc_private.h"
22 #include <xen/hvm/hvm_op.h>
23
xc_get_max_cpus(xc_interface * xch)24 int xc_get_max_cpus(xc_interface *xch)
25 {
26 static int max_cpus = 0;
27 xc_physinfo_t physinfo;
28
29 if ( max_cpus )
30 return max_cpus;
31
32 if ( !xc_physinfo(xch, &physinfo) )
33 {
34 max_cpus = physinfo.max_cpu_id + 1;
35 return max_cpus;
36 }
37
38 return -1;
39 }
40
xc_get_online_cpus(xc_interface * xch)41 int xc_get_online_cpus(xc_interface *xch)
42 {
43 xc_physinfo_t physinfo;
44
45 if ( !xc_physinfo(xch, &physinfo) )
46 return physinfo.nr_cpus;
47
48 return -1;
49 }
50
xc_get_max_nodes(xc_interface * xch)51 int xc_get_max_nodes(xc_interface *xch)
52 {
53 static int max_nodes = 0;
54 xc_physinfo_t physinfo;
55
56 if ( max_nodes )
57 return max_nodes;
58
59 if ( !xc_physinfo(xch, &physinfo) )
60 {
61 max_nodes = physinfo.max_node_id + 1;
62 return max_nodes;
63 }
64
65 return -1;
66 }
67
xc_get_cpumap_size(xc_interface * xch)68 int xc_get_cpumap_size(xc_interface *xch)
69 {
70 int max_cpus = xc_get_max_cpus(xch);
71
72 if ( max_cpus < 0 )
73 return -1;
74 return (max_cpus + 7) / 8;
75 }
76
xc_get_nodemap_size(xc_interface * xch)77 int xc_get_nodemap_size(xc_interface *xch)
78 {
79 int max_nodes = xc_get_max_nodes(xch);
80
81 if ( max_nodes < 0 )
82 return -1;
83 return (max_nodes + 7) / 8;
84 }
85
xc_cpumap_alloc(xc_interface * xch)86 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
87 {
88 int sz;
89
90 sz = xc_get_cpumap_size(xch);
91 if (sz <= 0)
92 return NULL;
93 return calloc(1, sz);
94 }
95
96 /*
97 * xc_bitops.h has macros that do this as well - however they assume that
98 * the bitmask is word aligned but xc_cpumap_t is only guaranteed to be
99 * byte aligned and so we need byte versions for architectures which do
100 * not support misaligned accesses (which is basically everyone
101 * but x86, although even on x86 it can be inefficient).
102 *
103 * NOTE: The xc_bitops macros now use byte alignment.
104 * TODO: Clean up the users of this interface.
105 */
106 #define BITS_PER_CPUMAP(map) (sizeof(*map) * 8)
107 #define CPUMAP_ENTRY(cpu, map) ((map))[(cpu) / BITS_PER_CPUMAP(map)]
108 #define CPUMAP_SHIFT(cpu, map) ((cpu) % BITS_PER_CPUMAP(map))
xc_cpumap_clearcpu(int cpu,xc_cpumap_t map)109 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map)
110 {
111 CPUMAP_ENTRY(cpu, map) &= ~(1U << CPUMAP_SHIFT(cpu, map));
112 }
113
xc_cpumap_setcpu(int cpu,xc_cpumap_t map)114 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map)
115 {
116 CPUMAP_ENTRY(cpu, map) |= (1U << CPUMAP_SHIFT(cpu, map));
117 }
118
xc_cpumap_testcpu(int cpu,xc_cpumap_t map)119 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map)
120 {
121 return (CPUMAP_ENTRY(cpu, map) >> CPUMAP_SHIFT(cpu, map)) & 1;
122 }
123
xc_nodemap_alloc(xc_interface * xch)124 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
125 {
126 int sz;
127
128 sz = xc_get_nodemap_size(xch);
129 if (sz <= 0)
130 return NULL;
131 return calloc(1, sz);
132 }
133
xc_readconsolering(xc_interface * xch,char * buffer,unsigned int * pnr_chars,int clear,int incremental,uint32_t * pindex)134 int xc_readconsolering(xc_interface *xch,
135 char *buffer,
136 unsigned int *pnr_chars,
137 int clear, int incremental, uint32_t *pindex)
138 {
139 int ret;
140 unsigned int nr_chars = *pnr_chars;
141 struct xen_sysctl sysctl = {};
142 DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
143
144 if ( xc_hypercall_bounce_pre(xch, buffer) )
145 return -1;
146
147 sysctl.cmd = XEN_SYSCTL_readconsole;
148 set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
149 sysctl.u.readconsole.count = nr_chars;
150 sysctl.u.readconsole.clear = clear;
151 sysctl.u.readconsole.incremental = 0;
152 if ( pindex )
153 {
154 sysctl.u.readconsole.index = *pindex;
155 sysctl.u.readconsole.incremental = incremental;
156 }
157
158 if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
159 {
160 *pnr_chars = sysctl.u.readconsole.count;
161 if ( pindex )
162 *pindex = sysctl.u.readconsole.index;
163 }
164
165 xc_hypercall_bounce_post(xch, buffer);
166
167 return ret;
168 }
169
xc_send_debug_keys(xc_interface * xch,const char * keys)170 int xc_send_debug_keys(xc_interface *xch, const char *keys)
171 {
172 int ret, len = strlen(keys);
173 struct xen_sysctl sysctl = {};
174 DECLARE_HYPERCALL_BOUNCE_IN(keys, len);
175
176 if ( xc_hypercall_bounce_pre(xch, keys) )
177 return -1;
178
179 sysctl.cmd = XEN_SYSCTL_debug_keys;
180 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
181 sysctl.u.debug_keys.nr_keys = len;
182
183 ret = do_sysctl(xch, &sysctl);
184
185 xc_hypercall_bounce_post(xch, keys);
186
187 return ret;
188 }
189
xc_physinfo(xc_interface * xch,xc_physinfo_t * put_info)190 int xc_physinfo(xc_interface *xch,
191 xc_physinfo_t *put_info)
192 {
193 int ret;
194 struct xen_sysctl sysctl = {};
195
196 sysctl.cmd = XEN_SYSCTL_physinfo;
197
198 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
199 return ret;
200
201 memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
202
203 return 0;
204 }
205
xc_microcode_update(xc_interface * xch,const void * buf,size_t len,unsigned int flags)206 int xc_microcode_update(xc_interface *xch, const void *buf,
207 size_t len, unsigned int flags)
208 {
209 int ret;
210 struct xen_platform_op platform_op = {};
211 DECLARE_HYPERCALL_BUFFER(struct xenpf_microcode_update2, uc);
212
213 uc = xc_hypercall_buffer_alloc(xch, uc, len);
214 if ( uc == NULL )
215 return -1;
216
217 memcpy(uc, buf, len);
218
219 platform_op.cmd = XENPF_microcode_update2;
220 platform_op.u.microcode2.length = len;
221 platform_op.u.microcode2.flags = flags;
222 set_xen_guest_handle(platform_op.u.microcode2.data, uc);
223
224 ret = do_platform_op(xch, &platform_op);
225
226 xc_hypercall_buffer_free(xch, uc);
227
228 return ret;
229 }
230
xc_get_cpu_version(xc_interface * xch,struct xenpf_pcpu_version * cpu_ver)231 int xc_get_cpu_version(xc_interface *xch, struct xenpf_pcpu_version *cpu_ver)
232 {
233 int ret;
234 struct xen_platform_op op = {
235 .cmd = XENPF_get_cpu_version,
236 .u.pcpu_version.xen_cpuid = cpu_ver->xen_cpuid,
237 };
238
239 ret = do_platform_op(xch, &op);
240 if ( ret != 0 )
241 return ret;
242
243 *cpu_ver = op.u.pcpu_version;
244
245 return 0;
246 }
247
xc_get_ucode_revision(xc_interface * xch,struct xenpf_ucode_revision * ucode_rev)248 int xc_get_ucode_revision(xc_interface *xch,
249 struct xenpf_ucode_revision *ucode_rev)
250 {
251 int ret;
252 struct xen_platform_op op = {
253 .cmd = XENPF_get_ucode_revision,
254 .u.ucode_revision.cpu = ucode_rev->cpu,
255 };
256
257 ret = do_platform_op(xch, &op);
258 if ( ret != 0 )
259 return ret;
260
261 *ucode_rev = op.u.ucode_revision;
262
263 return 0;
264 }
265
xc_cputopoinfo(xc_interface * xch,unsigned * max_cpus,xc_cputopo_t * cputopo)266 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
267 xc_cputopo_t *cputopo)
268 {
269 int ret;
270 struct xen_sysctl sysctl = {};
271 DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
272 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
273
274 if ( (ret = xc_hypercall_bounce_pre(xch, cputopo)) )
275 goto out;
276
277 sysctl.u.cputopoinfo.num_cpus = *max_cpus;
278 set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
279
280 sysctl.cmd = XEN_SYSCTL_cputopoinfo;
281
282 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
283 goto out;
284
285 *max_cpus = sysctl.u.cputopoinfo.num_cpus;
286
287 out:
288 xc_hypercall_bounce_post(xch, cputopo);
289
290 return ret;
291 }
292
xc_numainfo(xc_interface * xch,unsigned * max_nodes,xc_meminfo_t * meminfo,uint32_t * distance)293 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
294 xc_meminfo_t *meminfo, uint32_t *distance)
295 {
296 int ret;
297 struct xen_sysctl sysctl = {};
298 DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
299 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
300 DECLARE_HYPERCALL_BOUNCE(distance,
301 *max_nodes * *max_nodes * sizeof(*distance),
302 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
303
304 if ( (ret = xc_hypercall_bounce_pre(xch, meminfo)) )
305 goto out;
306 if ((ret = xc_hypercall_bounce_pre(xch, distance)) )
307 goto out;
308
309 sysctl.u.numainfo.num_nodes = *max_nodes;
310 set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
311 set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
312
313 sysctl.cmd = XEN_SYSCTL_numainfo;
314
315 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
316 goto out;
317
318 *max_nodes = sysctl.u.numainfo.num_nodes;
319
320 out:
321 xc_hypercall_bounce_post(xch, meminfo);
322 xc_hypercall_bounce_post(xch, distance);
323
324 return ret;
325 }
326
xc_pcitopoinfo(xc_interface * xch,unsigned num_devs,physdev_pci_device_t * devs,uint32_t * nodes)327 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
328 physdev_pci_device_t *devs,
329 uint32_t *nodes)
330 {
331 int ret = 0;
332 unsigned processed = 0;
333 struct xen_sysctl sysctl = {};
334 DECLARE_HYPERCALL_BOUNCE(devs, num_devs * sizeof(*devs),
335 XC_HYPERCALL_BUFFER_BOUNCE_IN);
336 DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes),
337 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
338
339 if ( (ret = xc_hypercall_bounce_pre(xch, devs)) )
340 goto out;
341 if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) )
342 goto out;
343
344 sysctl.cmd = XEN_SYSCTL_pcitopoinfo;
345
346 while ( processed < num_devs )
347 {
348 sysctl.u.pcitopoinfo.num_devs = num_devs - processed;
349 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.devs, devs,
350 processed);
351 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes,
352 processed);
353
354 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
355 break;
356
357 processed += sysctl.u.pcitopoinfo.num_devs;
358 }
359
360 out:
361 xc_hypercall_bounce_post(xch, devs);
362 xc_hypercall_bounce_post(xch, nodes);
363
364 return ret;
365 }
366
xc_sched_id(xc_interface * xch,int * sched_id)367 int xc_sched_id(xc_interface *xch,
368 int *sched_id)
369 {
370 int ret;
371 struct xen_sysctl sysctl = {};
372
373 sysctl.cmd = XEN_SYSCTL_sched_id;
374
375 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
376 return ret;
377
378 *sched_id = sysctl.u.sched_id.sched_id;
379
380 return 0;
381 }
382
383 #if defined(__i386__) || defined(__x86_64__)
xc_mca_op(xc_interface * xch,struct xen_mc * mc)384 int xc_mca_op(xc_interface *xch, struct xen_mc *mc)
385 {
386 int ret = 0;
387 DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
388
389 if ( xc_hypercall_bounce_pre(xch, mc) )
390 {
391 PERROR("Could not bounce xen_mc memory buffer");
392 return -1;
393 }
394 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
395
396 ret = xencall1(xch->xcall, __HYPERVISOR_mca,
397 HYPERCALL_BUFFER_AS_ARG(mc));
398
399 xc_hypercall_bounce_post(xch, mc);
400 return ret;
401 }
402
xc_mca_op_inject_v2(xc_interface * xch,unsigned int flags,xc_cpumap_t cpumap,unsigned int nr_bits)403 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
404 xc_cpumap_t cpumap, unsigned int nr_bits)
405 {
406 int ret = -1;
407 struct xen_mc mc_buf, *mc = &mc_buf;
408 struct xen_mc_inject_v2 *inject = &mc->u.mc_inject_v2;
409
410 DECLARE_HYPERCALL_BOUNCE(cpumap, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
411 DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
412
413 memset(mc, 0, sizeof(*mc));
414
415 if ( cpumap )
416 {
417 if ( !nr_bits )
418 {
419 errno = EINVAL;
420 goto out;
421 }
422
423 HYPERCALL_BOUNCE_SET_SIZE(cpumap, (nr_bits + 7) / 8);
424 if ( xc_hypercall_bounce_pre(xch, cpumap) )
425 {
426 PERROR("Could not bounce cpumap memory buffer");
427 goto out;
428 }
429 set_xen_guest_handle(inject->cpumap.bitmap, cpumap);
430 inject->cpumap.nr_bits = nr_bits;
431 }
432
433 inject->flags = flags;
434 mc->cmd = XEN_MC_inject_v2;
435 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
436
437 if ( xc_hypercall_bounce_pre(xch, mc) )
438 {
439 PERROR("Could not bounce xen_mc memory buffer");
440 goto out_free_cpumap;
441 }
442
443 ret = xencall1(xch->xcall, __HYPERVISOR_mca, HYPERCALL_BUFFER_AS_ARG(mc));
444
445 xc_hypercall_bounce_post(xch, mc);
446 out_free_cpumap:
447 if ( cpumap )
448 xc_hypercall_bounce_post(xch, cpumap);
449 out:
450 return ret;
451 }
452 #endif /* __i386__ || __x86_64__ */
453
xc_perfc_reset(xc_interface * xch)454 int xc_perfc_reset(xc_interface *xch)
455 {
456 struct xen_sysctl sysctl = {};
457
458 sysctl.cmd = XEN_SYSCTL_perfc_op;
459 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
460 set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
461 set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
462
463 return do_sysctl(xch, &sysctl);
464 }
465
xc_perfc_query_number(xc_interface * xch,int * nbr_desc,int * nbr_val)466 int xc_perfc_query_number(xc_interface *xch,
467 int *nbr_desc,
468 int *nbr_val)
469 {
470 int rc;
471 struct xen_sysctl sysctl = {};
472
473 sysctl.cmd = XEN_SYSCTL_perfc_op;
474 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
475 set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
476 set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
477
478 rc = do_sysctl(xch, &sysctl);
479
480 if ( nbr_desc )
481 *nbr_desc = sysctl.u.perfc_op.nr_counters;
482 if ( nbr_val )
483 *nbr_val = sysctl.u.perfc_op.nr_vals;
484
485 return rc;
486 }
487
xc_perfc_query(xc_interface * xch,struct xc_hypercall_buffer * desc,struct xc_hypercall_buffer * val)488 int xc_perfc_query(xc_interface *xch,
489 struct xc_hypercall_buffer *desc,
490 struct xc_hypercall_buffer *val)
491 {
492 struct xen_sysctl sysctl = {};
493 DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
494 DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
495
496 sysctl.cmd = XEN_SYSCTL_perfc_op;
497 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
498 set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
499 set_xen_guest_handle(sysctl.u.perfc_op.val, val);
500
501 return do_sysctl(xch, &sysctl);
502 }
503
xc_lockprof_reset(xc_interface * xch)504 int xc_lockprof_reset(xc_interface *xch)
505 {
506 struct xen_sysctl sysctl = {};
507
508 sysctl.cmd = XEN_SYSCTL_lockprof_op;
509 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
510 set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
511
512 return do_sysctl(xch, &sysctl);
513 }
514
xc_lockprof_query_number(xc_interface * xch,uint32_t * n_elems)515 int xc_lockprof_query_number(xc_interface *xch,
516 uint32_t *n_elems)
517 {
518 int rc;
519 struct xen_sysctl sysctl = {};
520
521 sysctl.cmd = XEN_SYSCTL_lockprof_op;
522 sysctl.u.lockprof_op.max_elem = 0;
523 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
524 set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
525
526 rc = do_sysctl(xch, &sysctl);
527
528 *n_elems = sysctl.u.lockprof_op.nr_elem;
529
530 return rc;
531 }
532
xc_lockprof_query(xc_interface * xch,uint32_t * n_elems,uint64_t * time,struct xc_hypercall_buffer * data)533 int xc_lockprof_query(xc_interface *xch,
534 uint32_t *n_elems,
535 uint64_t *time,
536 struct xc_hypercall_buffer *data)
537 {
538 int rc;
539 struct xen_sysctl sysctl = {};
540 DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
541
542 sysctl.cmd = XEN_SYSCTL_lockprof_op;
543 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
544 sysctl.u.lockprof_op.max_elem = *n_elems;
545 set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
546
547 rc = do_sysctl(xch, &sysctl);
548
549 *n_elems = sysctl.u.lockprof_op.nr_elem;
550
551 return rc;
552 }
553
xc_getcpuinfo(xc_interface * xch,int max_cpus,xc_cpuinfo_t * info,int * nr_cpus)554 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
555 xc_cpuinfo_t *info, int *nr_cpus)
556 {
557 int rc;
558 struct xen_sysctl sysctl = {};
559 DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
560
561 if ( xc_hypercall_bounce_pre(xch, info) )
562 return -1;
563
564 sysctl.cmd = XEN_SYSCTL_getcpuinfo;
565 sysctl.u.getcpuinfo.max_cpus = max_cpus;
566 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
567
568 rc = do_sysctl(xch, &sysctl);
569
570 xc_hypercall_bounce_post(xch, info);
571
572 if ( nr_cpus )
573 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
574
575 return rc;
576 }
577
xc_livepatch_upload(xc_interface * xch,char * name,unsigned char * payload,uint32_t size,bool force)578 int xc_livepatch_upload(xc_interface *xch,
579 char *name,
580 unsigned char *payload,
581 uint32_t size,
582 bool force)
583 {
584 int rc;
585 struct xen_sysctl sysctl = {};
586 DECLARE_HYPERCALL_BUFFER(char, local);
587 DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
588 struct xen_livepatch_name def_name = { };
589
590 if ( !name || !payload )
591 {
592 errno = EINVAL;
593 return -1;
594 }
595
596 def_name.size = strlen(name) + 1;
597 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
598 {
599 errno = EINVAL;
600 return -1;
601 }
602
603 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
604
605 if ( xc_hypercall_bounce_pre(xch, name) )
606 return -1;
607
608 local = xc_hypercall_buffer_alloc(xch, local, size);
609 if ( !local )
610 {
611 xc_hypercall_bounce_post(xch, name);
612 return -1;
613 }
614 memcpy(local, payload, size);
615
616 sysctl.cmd = XEN_SYSCTL_livepatch_op;
617 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD;
618 sysctl.u.livepatch.flags = force ? LIVEPATCH_FLAG_FORCE : 0;
619 sysctl.u.livepatch.u.upload.size = size;
620 set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local);
621
622 sysctl.u.livepatch.u.upload.name = def_name;
623 set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name);
624
625 rc = do_sysctl(xch, &sysctl);
626
627 xc_hypercall_buffer_free(xch, local);
628 xc_hypercall_bounce_post(xch, name);
629
630 return rc;
631 }
632
xc_livepatch_get(xc_interface * xch,char * name,struct xen_livepatch_status * status)633 int xc_livepatch_get(xc_interface *xch,
634 char *name,
635 struct xen_livepatch_status *status)
636 {
637 int rc;
638 struct xen_sysctl sysctl = {};
639 DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
640 struct xen_livepatch_name def_name = { };
641
642 if ( !name )
643 {
644 errno = EINVAL;
645 return -1;
646 }
647
648 def_name.size = strlen(name) + 1;
649 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
650 {
651 errno = EINVAL;
652 return -1;
653 }
654
655 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
656
657 if ( xc_hypercall_bounce_pre(xch, name) )
658 return -1;
659
660 sysctl.cmd = XEN_SYSCTL_livepatch_op;
661 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_GET;
662
663 sysctl.u.livepatch.u.get.status.state = 0;
664 sysctl.u.livepatch.u.get.status.rc = 0;
665
666 sysctl.u.livepatch.u.get.name = def_name;
667 set_xen_guest_handle(sysctl.u.livepatch.u.get.name.name, name);
668
669 rc = do_sysctl(xch, &sysctl);
670
671 xc_hypercall_bounce_post(xch, name);
672
673 memcpy(status, &sysctl.u.livepatch.u.get.status, sizeof(*status));
674
675 return rc;
676 }
677
678 /*
679 * Get a number of available payloads and get actual total size of
680 * the payloads' name and metadata arrays.
681 *
682 * This functions is typically executed first before the xc_livepatch_list()
683 * to obtain the sizes and correctly allocate all necessary data resources.
684 *
685 * The return value is zero if the hypercall completed successfully.
686 *
687 * If there was an error performing the sysctl operation, the return value
688 * will contain the hypercall error code value.
689 */
xc_livepatch_list_get_sizes(xc_interface * xch,unsigned int * nr,uint32_t * name_total_size,uint32_t * metadata_total_size)690 int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
691 uint32_t *name_total_size,
692 uint32_t *metadata_total_size)
693 {
694 struct xen_sysctl sysctl = {};
695 int rc;
696
697 if ( !nr || !name_total_size || !metadata_total_size )
698 {
699 errno = EINVAL;
700 return -1;
701 }
702
703 memset(&sysctl, 0, sizeof(sysctl));
704 sysctl.cmd = XEN_SYSCTL_livepatch_op;
705 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
706
707 rc = do_sysctl(xch, &sysctl);
708 if ( rc )
709 return rc;
710
711 *nr = sysctl.u.livepatch.u.list.nr;
712 *name_total_size = sysctl.u.livepatch.u.list.name_total_size;
713 *metadata_total_size = sysctl.u.livepatch.u.list.metadata_total_size;
714
715 return 0;
716 }
717
718 /*
719 * The heart of this function is to get an array of the following objects:
720 * - xen_livepatch_status_t: states and return codes of payloads
721 * - name: names of payloads
722 * - len: lengths of corresponding payloads' names
723 * - metadata: payloads' metadata
724 * - metadata_len: lengths of corresponding payloads' metadata
725 *
726 * However it is complex because it has to deal with the hypervisor
727 * returning some of the requested data or data being stale
728 * (another hypercall might alter the list).
729 *
730 * The parameters that the function expects to contain data from
731 * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
732 * 'left' are also updated with the number of entries filled out
733 * and respectively the number of entries left to get from hypervisor.
734 *
735 * It is expected that the caller of this function will first issue the
736 * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
737 * and all metadata as well as the current number of payload entries.
738 * The total sizes are required and supplied via the 'name_total_size' and
739 * 'metadata_total_size' parameters.
740 *
741 * The 'max' is to be provided by the caller with the maximum number of
742 * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
743 * can be filled up with.
744 *
745 * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
746 * structure size.
747 *
748 * Each entry in the 'name' array may have an arbitrary size.
749 *
750 * Each entry in the 'len' array is expected to be of uint32_t size.
751 *
752 * Each entry in the 'metadata' array may have an arbitrary size.
753 *
754 * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
755 *
756 * The return value is zero if the hypercall completed successfully.
757 * Note that the return value is _not_ the amount of entries filled
758 * out - that is saved in 'done'.
759 *
760 * If there was an error performing the operation, the return value
761 * will contain an negative -EXX type value. The 'done' and 'left'
762 * will contain the number of entries that had been succesfully
763 * retrieved (if any).
764 */
xc_livepatch_list(xc_interface * xch,const unsigned int max,const unsigned int start,struct xen_livepatch_status * info,char * name,uint32_t * len,const uint32_t name_total_size,char * metadata,uint32_t * metadata_len,const uint32_t metadata_total_size,unsigned int * done,unsigned int * left)765 int xc_livepatch_list(xc_interface *xch, const unsigned int max,
766 const unsigned int start,
767 struct xen_livepatch_status *info,
768 char *name, uint32_t *len,
769 const uint32_t name_total_size,
770 char *metadata, uint32_t *metadata_len,
771 const uint32_t metadata_total_size,
772 unsigned int *done, unsigned int *left)
773 {
774 int rc;
775 struct xen_sysctl sysctl = {};
776 /* The sizes are adjusted later - hence zero. */
777 DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
778 DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
779 DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
780 DECLARE_HYPERCALL_BOUNCE(metadata, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
781 DECLARE_HYPERCALL_BOUNCE(metadata_len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
782 uint32_t max_batch_sz, nr;
783 uint32_t version = 0, retries = 0;
784 uint32_t adjust = 0;
785 uint32_t name_off = 0, metadata_off = 0;
786 uint32_t name_sz, metadata_sz;
787
788 if ( !max || !info || !name || !len ||
789 !metadata || !metadata_len || !done || !left )
790 {
791 errno = EINVAL;
792 return -1;
793 }
794
795 if ( name_total_size == 0 )
796 {
797 errno = ENOENT;
798 return -1;
799 }
800
801 memset(&sysctl, 0, sizeof(sysctl));
802 sysctl.cmd = XEN_SYSCTL_livepatch_op;
803 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
804 sysctl.u.livepatch.u.list.idx = start;
805
806 max_batch_sz = max;
807 name_sz = name_total_size;
808 metadata_sz = metadata_total_size;
809 *done = 0;
810 *left = 0;
811 do {
812 uint32_t _name_sz, _metadata_sz;
813
814 /*
815 * The first time we go in this loop our 'max' may be bigger
816 * than what the hypervisor is comfortable with - hence the first
817 * couple of loops may adjust the number of entries we will
818 * want filled (tracked by 'nr').
819 *
820 * N.B. This is a do { } while loop and the right hand side of
821 * the conditional when adjusting will evaluate to false (as
822 * *left is set to zero before the loop. Hence we need this
823 * adjust - even if we reset it at the start of the loop.
824 */
825 if ( adjust )
826 adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */
827
828 nr = min(max - *done, max_batch_sz);
829
830 sysctl.u.livepatch.u.list.nr = nr;
831 /* Fix the size (may vary between hypercalls). */
832 HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info));
833 HYPERCALL_BOUNCE_SET_SIZE(name, name_sz);
834 HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len));
835 HYPERCALL_BOUNCE_SET_SIZE(metadata, metadata_sz);
836 HYPERCALL_BOUNCE_SET_SIZE(metadata_len, nr * sizeof(*metadata_len));
837 /* Move the pointer to proper offset into 'info'. */
838 (HYPERCALL_BUFFER(info))->ubuf = info + *done;
839 (HYPERCALL_BUFFER(name))->ubuf = name + name_off;
840 (HYPERCALL_BUFFER(len))->ubuf = len + *done;
841 (HYPERCALL_BUFFER(metadata))->ubuf = metadata + metadata_off;
842 (HYPERCALL_BUFFER(metadata_len))->ubuf = metadata_len + *done;
843 /* Allocate memory. */
844 rc = xc_hypercall_bounce_pre(xch, info);
845 if ( rc )
846 break;
847
848 rc = xc_hypercall_bounce_pre(xch, name);
849 if ( rc )
850 break;
851
852 rc = xc_hypercall_bounce_pre(xch, len);
853 if ( rc )
854 break;
855
856 rc = xc_hypercall_bounce_pre(xch, metadata);
857 if ( rc )
858 break;
859
860 rc = xc_hypercall_bounce_pre(xch, metadata_len);
861 if ( rc )
862 break;
863
864 set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info);
865 set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name);
866 set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len);
867 set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata, metadata);
868 set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata_len, metadata_len);
869
870 sysctl.u.livepatch.u.list.name_total_size = name_sz;
871 sysctl.u.livepatch.u.list.metadata_total_size = metadata_sz;
872
873 rc = do_sysctl(xch, &sysctl);
874 /*
875 * From here on we MUST call xc_hypercall_bounce. If rc < 0 we
876 * end up doing it (outside the loop), so using a break is OK.
877 */
878 if ( rc < 0 && errno == E2BIG )
879 {
880 if ( max_batch_sz <= 1 )
881 break;
882 max_batch_sz >>= 1;
883 adjust = 1; /* For the loop conditional to let us loop again. */
884 /* No memory leaks! */
885 xc_hypercall_bounce_post(xch, info);
886 xc_hypercall_bounce_post(xch, name);
887 xc_hypercall_bounce_post(xch, len);
888 xc_hypercall_bounce_post(xch, metadata);
889 xc_hypercall_bounce_post(xch, metadata_len);
890 continue;
891 }
892
893 if ( rc < 0 ) /* For all other errors we bail out. */
894 break;
895
896 if ( !version )
897 version = sysctl.u.livepatch.u.list.version;
898
899 if ( sysctl.u.livepatch.u.list.version != version )
900 {
901 /* We could make this configurable as parameter? */
902 if ( retries++ > 3 )
903 {
904 rc = -1;
905 errno = EBUSY;
906 break;
907 }
908 *done = 0; /* Retry from scratch. */
909 version = sysctl.u.livepatch.u.list.version;
910 adjust = 1; /* And make sure we continue in the loop. */
911 /* No memory leaks. */
912 xc_hypercall_bounce_post(xch, info);
913 xc_hypercall_bounce_post(xch, name);
914 xc_hypercall_bounce_post(xch, len);
915 xc_hypercall_bounce_post(xch, metadata);
916 xc_hypercall_bounce_post(xch, metadata_len);
917 continue;
918 }
919
920 /* We should never hit this, but just in case. */
921 if ( rc > nr )
922 {
923 errno = EOVERFLOW; /* Overflow! */
924 rc = -1;
925 break;
926 }
927 *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */
928 _name_sz = sysctl.u.livepatch.u.list.name_total_size; /* Total received name size. */
929 _metadata_sz = sysctl.u.livepatch.u.list.metadata_total_size; /* Total received metadata size. */
930 /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */
931 HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info)));
932 HYPERCALL_BOUNCE_SET_SIZE(name, _name_sz);
933 HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len)));
934 HYPERCALL_BOUNCE_SET_SIZE(metadata, _metadata_sz);
935 HYPERCALL_BOUNCE_SET_SIZE(metadata_len, (rc * sizeof(*metadata_len)));
936 /* Bounce the data and free the bounce buffer. */
937 xc_hypercall_bounce_post(xch, info);
938 xc_hypercall_bounce_post(xch, name);
939 xc_hypercall_bounce_post(xch, len);
940 xc_hypercall_bounce_post(xch, metadata);
941 xc_hypercall_bounce_post(xch, metadata_len);
942
943 name_sz -= _name_sz;
944 name_off += _name_sz;
945 metadata_sz -= _metadata_sz;
946 metadata_off += _metadata_sz;
947
948 /* And update how many elements of info we have copied into. */
949 *done += rc;
950 /* Update idx. */
951 sysctl.u.livepatch.u.list.idx = *done;
952 } while ( adjust || (*done < max && *left != 0) );
953
954 if ( rc < 0 )
955 {
956 xc_hypercall_bounce_post(xch, len);
957 xc_hypercall_bounce_post(xch, name);
958 xc_hypercall_bounce_post(xch, info);
959 xc_hypercall_bounce_post(xch, metadata);
960 xc_hypercall_bounce_post(xch, metadata_len);
961 }
962
963 return rc > 0 ? 0 : rc;
964 }
965
_xc_livepatch_action(xc_interface * xch,char * name,unsigned int action,uint32_t timeout,uint32_t flags)966 static int _xc_livepatch_action(xc_interface *xch,
967 char *name,
968 unsigned int action,
969 uint32_t timeout,
970 uint32_t flags)
971 {
972 int rc;
973 struct xen_sysctl sysctl = {};
974 /* The size is figured out when we strlen(name) */
975 DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
976 struct xen_livepatch_name def_name = { };
977
978 def_name.size = strlen(name) + 1;
979
980 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
981 {
982 errno = EINVAL;
983 return -1;
984 }
985
986 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
987
988 if ( xc_hypercall_bounce_pre(xch, name) )
989 return -1;
990
991 sysctl.cmd = XEN_SYSCTL_livepatch_op;
992 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_ACTION;
993 sysctl.u.livepatch.u.action.cmd = action;
994 sysctl.u.livepatch.u.action.timeout = timeout;
995 sysctl.u.livepatch.u.action.flags = flags;
996 sysctl.u.livepatch.u.action.pad = 0;
997
998 sysctl.u.livepatch.u.action.name = def_name;
999 set_xen_guest_handle(sysctl.u.livepatch.u.action.name.name, name);
1000
1001 rc = do_sysctl(xch, &sysctl);
1002
1003 xc_hypercall_bounce_post(xch, name);
1004
1005 return rc;
1006 }
1007
xc_livepatch_apply(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1008 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1009 {
1010 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_APPLY, timeout, flags);
1011 }
1012
xc_livepatch_revert(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1013 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1014 {
1015 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REVERT, timeout, flags);
1016 }
1017
xc_livepatch_unload(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1018 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1019 {
1020 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_UNLOAD, timeout, flags);
1021 }
1022
xc_livepatch_replace(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1023 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1024 {
1025 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REPLACE, timeout, flags);
1026 }
1027
1028 /*
1029 * Local variables:
1030 * mode: C
1031 * c-file-style: "BSD"
1032 * c-basic-offset: 4
1033 * tab-width: 4
1034 * indent-tabs-mode: nil
1035 * End:
1036 */
1037