1 /******************************************************************************
2 * xc_misc.c
3 *
4 * Miscellaneous control interface functions.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "xc_bitops.h"
21 #include "xc_private.h"
22 #include <xen/hvm/hvm_op.h>
23
xc_get_max_cpus(xc_interface * xch)24 int xc_get_max_cpus(xc_interface *xch)
25 {
26 static int max_cpus = 0;
27 xc_physinfo_t physinfo;
28
29 if ( max_cpus )
30 return max_cpus;
31
32 if ( !xc_physinfo(xch, &physinfo) )
33 {
34 max_cpus = physinfo.max_cpu_id + 1;
35 return max_cpus;
36 }
37
38 return -1;
39 }
40
xc_get_online_cpus(xc_interface * xch)41 int xc_get_online_cpus(xc_interface *xch)
42 {
43 xc_physinfo_t physinfo;
44
45 if ( !xc_physinfo(xch, &physinfo) )
46 return physinfo.nr_cpus;
47
48 return -1;
49 }
50
xc_get_max_nodes(xc_interface * xch)51 int xc_get_max_nodes(xc_interface *xch)
52 {
53 static int max_nodes = 0;
54 xc_physinfo_t physinfo;
55
56 if ( max_nodes )
57 return max_nodes;
58
59 if ( !xc_physinfo(xch, &physinfo) )
60 {
61 max_nodes = physinfo.max_node_id + 1;
62 return max_nodes;
63 }
64
65 return -1;
66 }
67
xc_get_cpumap_size(xc_interface * xch)68 int xc_get_cpumap_size(xc_interface *xch)
69 {
70 int max_cpus = xc_get_max_cpus(xch);
71
72 if ( max_cpus < 0 )
73 return -1;
74 return (max_cpus + 7) / 8;
75 }
76
xc_get_nodemap_size(xc_interface * xch)77 int xc_get_nodemap_size(xc_interface *xch)
78 {
79 int max_nodes = xc_get_max_nodes(xch);
80
81 if ( max_nodes < 0 )
82 return -1;
83 return (max_nodes + 7) / 8;
84 }
85
xc_cpumap_alloc(xc_interface * xch)86 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
87 {
88 int sz;
89
90 sz = xc_get_cpumap_size(xch);
91 if (sz <= 0)
92 return NULL;
93 return calloc(1, sz);
94 }
95
96 /*
97 * xc_bitops.h has macros that do this as well - however they assume that
98 * the bitmask is word aligned but xc_cpumap_t is only guaranteed to be
99 * byte aligned and so we need byte versions for architectures which do
100 * not support misaligned accesses (which is basically everyone
101 * but x86, although even on x86 it can be inefficient).
102 *
103 * NOTE: The xc_bitops macros now use byte alignment.
104 * TODO: Clean up the users of this interface.
105 */
106 #define BITS_PER_CPUMAP(map) (sizeof(*map) * 8)
107 #define CPUMAP_ENTRY(cpu, map) ((map))[(cpu) / BITS_PER_CPUMAP(map)]
108 #define CPUMAP_SHIFT(cpu, map) ((cpu) % BITS_PER_CPUMAP(map))
xc_cpumap_clearcpu(int cpu,xc_cpumap_t map)109 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map)
110 {
111 CPUMAP_ENTRY(cpu, map) &= ~(1U << CPUMAP_SHIFT(cpu, map));
112 }
113
xc_cpumap_setcpu(int cpu,xc_cpumap_t map)114 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map)
115 {
116 CPUMAP_ENTRY(cpu, map) |= (1U << CPUMAP_SHIFT(cpu, map));
117 }
118
xc_cpumap_testcpu(int cpu,xc_cpumap_t map)119 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map)
120 {
121 return (CPUMAP_ENTRY(cpu, map) >> CPUMAP_SHIFT(cpu, map)) & 1;
122 }
123
xc_nodemap_alloc(xc_interface * xch)124 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
125 {
126 int sz;
127
128 sz = xc_get_nodemap_size(xch);
129 if (sz <= 0)
130 return NULL;
131 return calloc(1, sz);
132 }
133
xc_readconsolering(xc_interface * xch,char * buffer,unsigned int * pnr_chars,int clear,int incremental,uint32_t * pindex)134 int xc_readconsolering(xc_interface *xch,
135 char *buffer,
136 unsigned int *pnr_chars,
137 int clear, int incremental, uint32_t *pindex)
138 {
139 int ret;
140 unsigned int nr_chars = *pnr_chars;
141 struct xen_sysctl sysctl = {};
142 DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
143
144 if ( xc_hypercall_bounce_pre(xch, buffer) )
145 return -1;
146
147 sysctl.cmd = XEN_SYSCTL_readconsole;
148 set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
149 sysctl.u.readconsole.count = nr_chars;
150 sysctl.u.readconsole.clear = clear;
151 sysctl.u.readconsole.incremental = 0;
152 if ( pindex )
153 {
154 sysctl.u.readconsole.index = *pindex;
155 sysctl.u.readconsole.incremental = incremental;
156 }
157
158 if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
159 {
160 *pnr_chars = sysctl.u.readconsole.count;
161 if ( pindex )
162 *pindex = sysctl.u.readconsole.index;
163 }
164
165 xc_hypercall_bounce_post(xch, buffer);
166
167 return ret;
168 }
169
xc_send_debug_keys(xc_interface * xch,const char * keys)170 int xc_send_debug_keys(xc_interface *xch, const char *keys)
171 {
172 int ret, len = strlen(keys);
173 struct xen_sysctl sysctl = {};
174 DECLARE_HYPERCALL_BOUNCE_IN(keys, len);
175
176 if ( xc_hypercall_bounce_pre(xch, keys) )
177 return -1;
178
179 sysctl.cmd = XEN_SYSCTL_debug_keys;
180 set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
181 sysctl.u.debug_keys.nr_keys = len;
182
183 ret = do_sysctl(xch, &sysctl);
184
185 xc_hypercall_bounce_post(xch, keys);
186
187 return ret;
188 }
189
xc_physinfo(xc_interface * xch,xc_physinfo_t * put_info)190 int xc_physinfo(xc_interface *xch,
191 xc_physinfo_t *put_info)
192 {
193 int ret;
194 struct xen_sysctl sysctl = {};
195
196 sysctl.cmd = XEN_SYSCTL_physinfo;
197
198 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
199 return ret;
200
201 memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
202
203 return 0;
204 }
205
xc_microcode_update(xc_interface * xch,const void * buf,size_t len)206 int xc_microcode_update(xc_interface *xch, const void *buf, size_t len)
207 {
208 int ret;
209 struct xen_platform_op platform_op = {};
210 DECLARE_HYPERCALL_BUFFER(struct xenpf_microcode_update, uc);
211
212 uc = xc_hypercall_buffer_alloc(xch, uc, len);
213 if ( uc == NULL )
214 return -1;
215
216 memcpy(uc, buf, len);
217
218 platform_op.cmd = XENPF_microcode_update;
219 platform_op.u.microcode.length = len;
220 set_xen_guest_handle(platform_op.u.microcode.data, uc);
221
222 ret = do_platform_op(xch, &platform_op);
223
224 xc_hypercall_buffer_free(xch, uc);
225
226 return ret;
227 }
228
xc_get_cpu_version(xc_interface * xch,struct xenpf_pcpu_version * cpu_ver)229 int xc_get_cpu_version(xc_interface *xch, struct xenpf_pcpu_version *cpu_ver)
230 {
231 int ret;
232 struct xen_platform_op op = {
233 .cmd = XENPF_get_cpu_version,
234 .u.pcpu_version.xen_cpuid = cpu_ver->xen_cpuid,
235 };
236
237 ret = do_platform_op(xch, &op);
238 if ( ret != 0 )
239 return ret;
240
241 *cpu_ver = op.u.pcpu_version;
242
243 return 0;
244 }
245
xc_get_ucode_revision(xc_interface * xch,struct xenpf_ucode_revision * ucode_rev)246 int xc_get_ucode_revision(xc_interface *xch,
247 struct xenpf_ucode_revision *ucode_rev)
248 {
249 int ret;
250 struct xen_platform_op op = {
251 .cmd = XENPF_get_ucode_revision,
252 .u.ucode_revision.cpu = ucode_rev->cpu,
253 };
254
255 ret = do_platform_op(xch, &op);
256 if ( ret != 0 )
257 return ret;
258
259 *ucode_rev = op.u.ucode_revision;
260
261 return 0;
262 }
263
xc_cputopoinfo(xc_interface * xch,unsigned * max_cpus,xc_cputopo_t * cputopo)264 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
265 xc_cputopo_t *cputopo)
266 {
267 int ret;
268 struct xen_sysctl sysctl = {};
269 DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
270 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
271
272 if ( (ret = xc_hypercall_bounce_pre(xch, cputopo)) )
273 goto out;
274
275 sysctl.u.cputopoinfo.num_cpus = *max_cpus;
276 set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
277
278 sysctl.cmd = XEN_SYSCTL_cputopoinfo;
279
280 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
281 goto out;
282
283 *max_cpus = sysctl.u.cputopoinfo.num_cpus;
284
285 out:
286 xc_hypercall_bounce_post(xch, cputopo);
287
288 return ret;
289 }
290
xc_numainfo(xc_interface * xch,unsigned * max_nodes,xc_meminfo_t * meminfo,uint32_t * distance)291 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
292 xc_meminfo_t *meminfo, uint32_t *distance)
293 {
294 int ret;
295 struct xen_sysctl sysctl = {};
296 DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
297 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
298 DECLARE_HYPERCALL_BOUNCE(distance,
299 *max_nodes * *max_nodes * sizeof(*distance),
300 XC_HYPERCALL_BUFFER_BOUNCE_OUT);
301
302 if ( (ret = xc_hypercall_bounce_pre(xch, meminfo)) )
303 goto out;
304 if ((ret = xc_hypercall_bounce_pre(xch, distance)) )
305 goto out;
306
307 sysctl.u.numainfo.num_nodes = *max_nodes;
308 set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
309 set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
310
311 sysctl.cmd = XEN_SYSCTL_numainfo;
312
313 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
314 goto out;
315
316 *max_nodes = sysctl.u.numainfo.num_nodes;
317
318 out:
319 xc_hypercall_bounce_post(xch, meminfo);
320 xc_hypercall_bounce_post(xch, distance);
321
322 return ret;
323 }
324
xc_pcitopoinfo(xc_interface * xch,unsigned num_devs,physdev_pci_device_t * devs,uint32_t * nodes)325 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
326 physdev_pci_device_t *devs,
327 uint32_t *nodes)
328 {
329 int ret = 0;
330 unsigned processed = 0;
331 struct xen_sysctl sysctl = {};
332 DECLARE_HYPERCALL_BOUNCE(devs, num_devs * sizeof(*devs),
333 XC_HYPERCALL_BUFFER_BOUNCE_IN);
334 DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes),
335 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
336
337 if ( (ret = xc_hypercall_bounce_pre(xch, devs)) )
338 goto out;
339 if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) )
340 goto out;
341
342 sysctl.cmd = XEN_SYSCTL_pcitopoinfo;
343
344 while ( processed < num_devs )
345 {
346 sysctl.u.pcitopoinfo.num_devs = num_devs - processed;
347 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.devs, devs,
348 processed);
349 set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes,
350 processed);
351
352 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
353 break;
354
355 processed += sysctl.u.pcitopoinfo.num_devs;
356 }
357
358 out:
359 xc_hypercall_bounce_post(xch, devs);
360 xc_hypercall_bounce_post(xch, nodes);
361
362 return ret;
363 }
364
xc_sched_id(xc_interface * xch,int * sched_id)365 int xc_sched_id(xc_interface *xch,
366 int *sched_id)
367 {
368 int ret;
369 struct xen_sysctl sysctl = {};
370
371 sysctl.cmd = XEN_SYSCTL_sched_id;
372
373 if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
374 return ret;
375
376 *sched_id = sysctl.u.sched_id.sched_id;
377
378 return 0;
379 }
380
381 #if defined(__i386__) || defined(__x86_64__)
xc_mca_op(xc_interface * xch,struct xen_mc * mc)382 int xc_mca_op(xc_interface *xch, struct xen_mc *mc)
383 {
384 int ret = 0;
385 DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
386
387 if ( xc_hypercall_bounce_pre(xch, mc) )
388 {
389 PERROR("Could not bounce xen_mc memory buffer");
390 return -1;
391 }
392 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
393
394 ret = xencall1(xch->xcall, __HYPERVISOR_mca,
395 HYPERCALL_BUFFER_AS_ARG(mc));
396
397 xc_hypercall_bounce_post(xch, mc);
398 return ret;
399 }
400
xc_mca_op_inject_v2(xc_interface * xch,unsigned int flags,xc_cpumap_t cpumap,unsigned int nr_bits)401 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
402 xc_cpumap_t cpumap, unsigned int nr_bits)
403 {
404 int ret = -1;
405 struct xen_mc mc_buf, *mc = &mc_buf;
406 struct xen_mc_inject_v2 *inject = &mc->u.mc_inject_v2;
407
408 DECLARE_HYPERCALL_BOUNCE(cpumap, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
409 DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
410
411 memset(mc, 0, sizeof(*mc));
412
413 if ( cpumap )
414 {
415 if ( !nr_bits )
416 {
417 errno = EINVAL;
418 goto out;
419 }
420
421 HYPERCALL_BOUNCE_SET_SIZE(cpumap, (nr_bits + 7) / 8);
422 if ( xc_hypercall_bounce_pre(xch, cpumap) )
423 {
424 PERROR("Could not bounce cpumap memory buffer");
425 goto out;
426 }
427 set_xen_guest_handle(inject->cpumap.bitmap, cpumap);
428 inject->cpumap.nr_bits = nr_bits;
429 }
430
431 inject->flags = flags;
432 mc->cmd = XEN_MC_inject_v2;
433 mc->interface_version = XEN_MCA_INTERFACE_VERSION;
434
435 if ( xc_hypercall_bounce_pre(xch, mc) )
436 {
437 PERROR("Could not bounce xen_mc memory buffer");
438 goto out_free_cpumap;
439 }
440
441 ret = xencall1(xch->xcall, __HYPERVISOR_mca, HYPERCALL_BUFFER_AS_ARG(mc));
442
443 xc_hypercall_bounce_post(xch, mc);
444 out_free_cpumap:
445 if ( cpumap )
446 xc_hypercall_bounce_post(xch, cpumap);
447 out:
448 return ret;
449 }
450 #endif /* __i386__ || __x86_64__ */
451
xc_perfc_reset(xc_interface * xch)452 int xc_perfc_reset(xc_interface *xch)
453 {
454 struct xen_sysctl sysctl = {};
455
456 sysctl.cmd = XEN_SYSCTL_perfc_op;
457 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
458 set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
459 set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
460
461 return do_sysctl(xch, &sysctl);
462 }
463
xc_perfc_query_number(xc_interface * xch,int * nbr_desc,int * nbr_val)464 int xc_perfc_query_number(xc_interface *xch,
465 int *nbr_desc,
466 int *nbr_val)
467 {
468 int rc;
469 struct xen_sysctl sysctl = {};
470
471 sysctl.cmd = XEN_SYSCTL_perfc_op;
472 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
473 set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
474 set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
475
476 rc = do_sysctl(xch, &sysctl);
477
478 if ( nbr_desc )
479 *nbr_desc = sysctl.u.perfc_op.nr_counters;
480 if ( nbr_val )
481 *nbr_val = sysctl.u.perfc_op.nr_vals;
482
483 return rc;
484 }
485
xc_perfc_query(xc_interface * xch,struct xc_hypercall_buffer * desc,struct xc_hypercall_buffer * val)486 int xc_perfc_query(xc_interface *xch,
487 struct xc_hypercall_buffer *desc,
488 struct xc_hypercall_buffer *val)
489 {
490 struct xen_sysctl sysctl = {};
491 DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
492 DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
493
494 sysctl.cmd = XEN_SYSCTL_perfc_op;
495 sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
496 set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
497 set_xen_guest_handle(sysctl.u.perfc_op.val, val);
498
499 return do_sysctl(xch, &sysctl);
500 }
501
xc_lockprof_reset(xc_interface * xch)502 int xc_lockprof_reset(xc_interface *xch)
503 {
504 struct xen_sysctl sysctl = {};
505
506 sysctl.cmd = XEN_SYSCTL_lockprof_op;
507 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
508 set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
509
510 return do_sysctl(xch, &sysctl);
511 }
512
xc_lockprof_query_number(xc_interface * xch,uint32_t * n_elems)513 int xc_lockprof_query_number(xc_interface *xch,
514 uint32_t *n_elems)
515 {
516 int rc;
517 struct xen_sysctl sysctl = {};
518
519 sysctl.cmd = XEN_SYSCTL_lockprof_op;
520 sysctl.u.lockprof_op.max_elem = 0;
521 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
522 set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
523
524 rc = do_sysctl(xch, &sysctl);
525
526 *n_elems = sysctl.u.lockprof_op.nr_elem;
527
528 return rc;
529 }
530
xc_lockprof_query(xc_interface * xch,uint32_t * n_elems,uint64_t * time,struct xc_hypercall_buffer * data)531 int xc_lockprof_query(xc_interface *xch,
532 uint32_t *n_elems,
533 uint64_t *time,
534 struct xc_hypercall_buffer *data)
535 {
536 int rc;
537 struct xen_sysctl sysctl = {};
538 DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
539
540 sysctl.cmd = XEN_SYSCTL_lockprof_op;
541 sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
542 sysctl.u.lockprof_op.max_elem = *n_elems;
543 set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
544
545 rc = do_sysctl(xch, &sysctl);
546
547 *n_elems = sysctl.u.lockprof_op.nr_elem;
548
549 return rc;
550 }
551
xc_getcpuinfo(xc_interface * xch,int max_cpus,xc_cpuinfo_t * info,int * nr_cpus)552 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
553 xc_cpuinfo_t *info, int *nr_cpus)
554 {
555 int rc;
556 struct xen_sysctl sysctl = {};
557 DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
558
559 if ( xc_hypercall_bounce_pre(xch, info) )
560 return -1;
561
562 sysctl.cmd = XEN_SYSCTL_getcpuinfo;
563 sysctl.u.getcpuinfo.max_cpus = max_cpus;
564 set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
565
566 rc = do_sysctl(xch, &sysctl);
567
568 xc_hypercall_bounce_post(xch, info);
569
570 if ( nr_cpus )
571 *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
572
573 return rc;
574 }
575
xc_livepatch_upload(xc_interface * xch,char * name,unsigned char * payload,uint32_t size,bool force)576 int xc_livepatch_upload(xc_interface *xch,
577 char *name,
578 unsigned char *payload,
579 uint32_t size,
580 bool force)
581 {
582 int rc;
583 struct xen_sysctl sysctl = {};
584 DECLARE_HYPERCALL_BUFFER(char, local);
585 DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
586 struct xen_livepatch_name def_name = { };
587
588 if ( !name || !payload )
589 {
590 errno = EINVAL;
591 return -1;
592 }
593
594 def_name.size = strlen(name) + 1;
595 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
596 {
597 errno = EINVAL;
598 return -1;
599 }
600
601 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
602
603 if ( xc_hypercall_bounce_pre(xch, name) )
604 return -1;
605
606 local = xc_hypercall_buffer_alloc(xch, local, size);
607 if ( !local )
608 {
609 xc_hypercall_bounce_post(xch, name);
610 return -1;
611 }
612 memcpy(local, payload, size);
613
614 sysctl.cmd = XEN_SYSCTL_livepatch_op;
615 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD;
616 sysctl.u.livepatch.flags = force ? LIVEPATCH_FLAG_FORCE : 0;
617 sysctl.u.livepatch.u.upload.size = size;
618 set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local);
619
620 sysctl.u.livepatch.u.upload.name = def_name;
621 set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name);
622
623 rc = do_sysctl(xch, &sysctl);
624
625 xc_hypercall_buffer_free(xch, local);
626 xc_hypercall_bounce_post(xch, name);
627
628 return rc;
629 }
630
xc_livepatch_get(xc_interface * xch,char * name,struct xen_livepatch_status * status)631 int xc_livepatch_get(xc_interface *xch,
632 char *name,
633 struct xen_livepatch_status *status)
634 {
635 int rc;
636 struct xen_sysctl sysctl = {};
637 DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
638 struct xen_livepatch_name def_name = { };
639
640 if ( !name )
641 {
642 errno = EINVAL;
643 return -1;
644 }
645
646 def_name.size = strlen(name) + 1;
647 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
648 {
649 errno = EINVAL;
650 return -1;
651 }
652
653 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
654
655 if ( xc_hypercall_bounce_pre(xch, name) )
656 return -1;
657
658 sysctl.cmd = XEN_SYSCTL_livepatch_op;
659 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_GET;
660
661 sysctl.u.livepatch.u.get.status.state = 0;
662 sysctl.u.livepatch.u.get.status.rc = 0;
663
664 sysctl.u.livepatch.u.get.name = def_name;
665 set_xen_guest_handle(sysctl.u.livepatch.u.get.name.name, name);
666
667 rc = do_sysctl(xch, &sysctl);
668
669 xc_hypercall_bounce_post(xch, name);
670
671 memcpy(status, &sysctl.u.livepatch.u.get.status, sizeof(*status));
672
673 return rc;
674 }
675
676 /*
677 * Get a number of available payloads and get actual total size of
678 * the payloads' name and metadata arrays.
679 *
680 * This functions is typically executed first before the xc_livepatch_list()
681 * to obtain the sizes and correctly allocate all necessary data resources.
682 *
683 * The return value is zero if the hypercall completed successfully.
684 *
685 * If there was an error performing the sysctl operation, the return value
686 * will contain the hypercall error code value.
687 */
xc_livepatch_list_get_sizes(xc_interface * xch,unsigned int * nr,uint32_t * name_total_size,uint32_t * metadata_total_size)688 int xc_livepatch_list_get_sizes(xc_interface *xch, unsigned int *nr,
689 uint32_t *name_total_size,
690 uint32_t *metadata_total_size)
691 {
692 struct xen_sysctl sysctl = {};
693 int rc;
694
695 if ( !nr || !name_total_size || !metadata_total_size )
696 {
697 errno = EINVAL;
698 return -1;
699 }
700
701 memset(&sysctl, 0, sizeof(sysctl));
702 sysctl.cmd = XEN_SYSCTL_livepatch_op;
703 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
704
705 rc = do_sysctl(xch, &sysctl);
706 if ( rc )
707 return rc;
708
709 *nr = sysctl.u.livepatch.u.list.nr;
710 *name_total_size = sysctl.u.livepatch.u.list.name_total_size;
711 *metadata_total_size = sysctl.u.livepatch.u.list.metadata_total_size;
712
713 return 0;
714 }
715
716 /*
717 * The heart of this function is to get an array of the following objects:
718 * - xen_livepatch_status_t: states and return codes of payloads
719 * - name: names of payloads
720 * - len: lengths of corresponding payloads' names
721 * - metadata: payloads' metadata
722 * - metadata_len: lengths of corresponding payloads' metadata
723 *
724 * However it is complex because it has to deal with the hypervisor
725 * returning some of the requested data or data being stale
726 * (another hypercall might alter the list).
727 *
728 * The parameters that the function expects to contain data from
729 * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
730 * 'left' are also updated with the number of entries filled out
731 * and respectively the number of entries left to get from hypervisor.
732 *
733 * It is expected that the caller of this function will first issue the
734 * xc_livepatch_list_get_sizes() in order to obtain total sizes of names
735 * and all metadata as well as the current number of payload entries.
736 * The total sizes are required and supplied via the 'name_total_size' and
737 * 'metadata_total_size' parameters.
738 *
739 * The 'max' is to be provided by the caller with the maximum number of
740 * entries that 'info', 'name', 'len', 'metadata' and 'metadata_len' arrays
741 * can be filled up with.
742 *
743 * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
744 * structure size.
745 *
746 * Each entry in the 'name' array may have an arbitrary size.
747 *
748 * Each entry in the 'len' array is expected to be of uint32_t size.
749 *
750 * Each entry in the 'metadata' array may have an arbitrary size.
751 *
752 * Each entry in the 'metadata_len' array is expected to be of uint32_t size.
753 *
754 * The return value is zero if the hypercall completed successfully.
755 * Note that the return value is _not_ the amount of entries filled
756 * out - that is saved in 'done'.
757 *
758 * If there was an error performing the operation, the return value
759 * will contain an negative -EXX type value. The 'done' and 'left'
760 * will contain the number of entries that had been succesfully
761 * retrieved (if any).
762 */
xc_livepatch_list(xc_interface * xch,const unsigned int max,const unsigned int start,struct xen_livepatch_status * info,char * name,uint32_t * len,const uint32_t name_total_size,char * metadata,uint32_t * metadata_len,const uint32_t metadata_total_size,unsigned int * done,unsigned int * left)763 int xc_livepatch_list(xc_interface *xch, const unsigned int max,
764 const unsigned int start,
765 struct xen_livepatch_status *info,
766 char *name, uint32_t *len,
767 const uint32_t name_total_size,
768 char *metadata, uint32_t *metadata_len,
769 const uint32_t metadata_total_size,
770 unsigned int *done, unsigned int *left)
771 {
772 int rc;
773 struct xen_sysctl sysctl = {};
774 /* The sizes are adjusted later - hence zero. */
775 DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
776 DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
777 DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
778 DECLARE_HYPERCALL_BOUNCE(metadata, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
779 DECLARE_HYPERCALL_BOUNCE(metadata_len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
780 uint32_t max_batch_sz, nr;
781 uint32_t version = 0, retries = 0;
782 uint32_t adjust = 0;
783 uint32_t name_off = 0, metadata_off = 0;
784 uint32_t name_sz, metadata_sz;
785
786 if ( !max || !info || !name || !len ||
787 !metadata || !metadata_len || !done || !left )
788 {
789 errno = EINVAL;
790 return -1;
791 }
792
793 if ( name_total_size == 0 )
794 {
795 errno = ENOENT;
796 return -1;
797 }
798
799 memset(&sysctl, 0, sizeof(sysctl));
800 sysctl.cmd = XEN_SYSCTL_livepatch_op;
801 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
802 sysctl.u.livepatch.u.list.idx = start;
803
804 max_batch_sz = max;
805 name_sz = name_total_size;
806 metadata_sz = metadata_total_size;
807 *done = 0;
808 *left = 0;
809 do {
810 uint32_t _name_sz, _metadata_sz;
811
812 /*
813 * The first time we go in this loop our 'max' may be bigger
814 * than what the hypervisor is comfortable with - hence the first
815 * couple of loops may adjust the number of entries we will
816 * want filled (tracked by 'nr').
817 *
818 * N.B. This is a do { } while loop and the right hand side of
819 * the conditional when adjusting will evaluate to false (as
820 * *left is set to zero before the loop. Hence we need this
821 * adjust - even if we reset it at the start of the loop.
822 */
823 if ( adjust )
824 adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */
825
826 nr = min(max - *done, max_batch_sz);
827
828 sysctl.u.livepatch.u.list.nr = nr;
829 /* Fix the size (may vary between hypercalls). */
830 HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info));
831 HYPERCALL_BOUNCE_SET_SIZE(name, name_sz);
832 HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len));
833 HYPERCALL_BOUNCE_SET_SIZE(metadata, metadata_sz);
834 HYPERCALL_BOUNCE_SET_SIZE(metadata_len, nr * sizeof(*metadata_len));
835 /* Move the pointer to proper offset into 'info'. */
836 (HYPERCALL_BUFFER(info))->ubuf = info + *done;
837 (HYPERCALL_BUFFER(name))->ubuf = name + name_off;
838 (HYPERCALL_BUFFER(len))->ubuf = len + *done;
839 (HYPERCALL_BUFFER(metadata))->ubuf = metadata + metadata_off;
840 (HYPERCALL_BUFFER(metadata_len))->ubuf = metadata_len + *done;
841 /* Allocate memory. */
842 rc = xc_hypercall_bounce_pre(xch, info);
843 if ( rc )
844 break;
845
846 rc = xc_hypercall_bounce_pre(xch, name);
847 if ( rc )
848 break;
849
850 rc = xc_hypercall_bounce_pre(xch, len);
851 if ( rc )
852 break;
853
854 rc = xc_hypercall_bounce_pre(xch, metadata);
855 if ( rc )
856 break;
857
858 rc = xc_hypercall_bounce_pre(xch, metadata_len);
859 if ( rc )
860 break;
861
862 set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info);
863 set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name);
864 set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len);
865 set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata, metadata);
866 set_xen_guest_handle(sysctl.u.livepatch.u.list.metadata_len, metadata_len);
867
868 rc = do_sysctl(xch, &sysctl);
869 /*
870 * From here on we MUST call xc_hypercall_bounce. If rc < 0 we
871 * end up doing it (outside the loop), so using a break is OK.
872 */
873 if ( rc < 0 && errno == E2BIG )
874 {
875 if ( max_batch_sz <= 1 )
876 break;
877 max_batch_sz >>= 1;
878 adjust = 1; /* For the loop conditional to let us loop again. */
879 /* No memory leaks! */
880 xc_hypercall_bounce_post(xch, info);
881 xc_hypercall_bounce_post(xch, name);
882 xc_hypercall_bounce_post(xch, len);
883 xc_hypercall_bounce_post(xch, metadata);
884 xc_hypercall_bounce_post(xch, metadata_len);
885 continue;
886 }
887
888 if ( rc < 0 ) /* For all other errors we bail out. */
889 break;
890
891 if ( !version )
892 version = sysctl.u.livepatch.u.list.version;
893
894 if ( sysctl.u.livepatch.u.list.version != version )
895 {
896 /* We could make this configurable as parameter? */
897 if ( retries++ > 3 )
898 {
899 rc = -1;
900 errno = EBUSY;
901 break;
902 }
903 *done = 0; /* Retry from scratch. */
904 version = sysctl.u.livepatch.u.list.version;
905 adjust = 1; /* And make sure we continue in the loop. */
906 /* No memory leaks. */
907 xc_hypercall_bounce_post(xch, info);
908 xc_hypercall_bounce_post(xch, name);
909 xc_hypercall_bounce_post(xch, len);
910 xc_hypercall_bounce_post(xch, metadata);
911 xc_hypercall_bounce_post(xch, metadata_len);
912 continue;
913 }
914
915 /* We should never hit this, but just in case. */
916 if ( rc > nr )
917 {
918 errno = EOVERFLOW; /* Overflow! */
919 rc = -1;
920 break;
921 }
922 *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */
923 _name_sz = sysctl.u.livepatch.u.list.name_total_size; /* Total received name size. */
924 _metadata_sz = sysctl.u.livepatch.u.list.metadata_total_size; /* Total received metadata size. */
925 /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */
926 HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info)));
927 HYPERCALL_BOUNCE_SET_SIZE(name, _name_sz);
928 HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len)));
929 HYPERCALL_BOUNCE_SET_SIZE(metadata, _metadata_sz);
930 HYPERCALL_BOUNCE_SET_SIZE(metadata_len, (rc * sizeof(*metadata_len)));
931 /* Bounce the data and free the bounce buffer. */
932 xc_hypercall_bounce_post(xch, info);
933 xc_hypercall_bounce_post(xch, name);
934 xc_hypercall_bounce_post(xch, len);
935 xc_hypercall_bounce_post(xch, metadata);
936 xc_hypercall_bounce_post(xch, metadata_len);
937
938 name_sz -= _name_sz;
939 name_off += _name_sz;
940 metadata_sz -= _metadata_sz;
941 metadata_off += _metadata_sz;
942
943 /* And update how many elements of info we have copied into. */
944 *done += rc;
945 /* Update idx. */
946 sysctl.u.livepatch.u.list.idx = *done;
947 } while ( adjust || (*done < max && *left != 0) );
948
949 if ( rc < 0 )
950 {
951 xc_hypercall_bounce_post(xch, len);
952 xc_hypercall_bounce_post(xch, name);
953 xc_hypercall_bounce_post(xch, info);
954 xc_hypercall_bounce_post(xch, metadata);
955 xc_hypercall_bounce_post(xch, metadata_len);
956 }
957
958 return rc > 0 ? 0 : rc;
959 }
960
_xc_livepatch_action(xc_interface * xch,char * name,unsigned int action,uint32_t timeout,uint32_t flags)961 static int _xc_livepatch_action(xc_interface *xch,
962 char *name,
963 unsigned int action,
964 uint32_t timeout,
965 uint32_t flags)
966 {
967 int rc;
968 struct xen_sysctl sysctl = {};
969 /* The size is figured out when we strlen(name) */
970 DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
971 struct xen_livepatch_name def_name = { };
972
973 def_name.size = strlen(name) + 1;
974
975 if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
976 {
977 errno = EINVAL;
978 return -1;
979 }
980
981 HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
982
983 if ( xc_hypercall_bounce_pre(xch, name) )
984 return -1;
985
986 sysctl.cmd = XEN_SYSCTL_livepatch_op;
987 sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_ACTION;
988 sysctl.u.livepatch.u.action.cmd = action;
989 sysctl.u.livepatch.u.action.timeout = timeout;
990 sysctl.u.livepatch.u.action.flags = flags;
991 sysctl.u.livepatch.u.action.pad = 0;
992
993 sysctl.u.livepatch.u.action.name = def_name;
994 set_xen_guest_handle(sysctl.u.livepatch.u.action.name.name, name);
995
996 rc = do_sysctl(xch, &sysctl);
997
998 xc_hypercall_bounce_post(xch, name);
999
1000 return rc;
1001 }
1002
xc_livepatch_apply(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1003 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1004 {
1005 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_APPLY, timeout, flags);
1006 }
1007
xc_livepatch_revert(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1008 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1009 {
1010 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REVERT, timeout, flags);
1011 }
1012
xc_livepatch_unload(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1013 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1014 {
1015 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_UNLOAD, timeout, flags);
1016 }
1017
xc_livepatch_replace(xc_interface * xch,char * name,uint32_t timeout,uint32_t flags)1018 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout, uint32_t flags)
1019 {
1020 return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REPLACE, timeout, flags);
1021 }
1022
1023 /*
1024 * Local variables:
1025 * mode: C
1026 * c-file-style: "BSD"
1027 * c-basic-offset: 4
1028 * tab-width: 4
1029 * indent-tabs-mode: nil
1030 * End:
1031 */
1032