1 /******************************************************************************
2  * xc_misc.c
3  *
4  * Miscellaneous control interface functions.
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation;
9  * version 2.1 of the License.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "xc_bitops.h"
21 #include "xc_private.h"
22 #include <xen/hvm/hvm_op.h>
23 
xc_get_max_cpus(xc_interface * xch)24 int xc_get_max_cpus(xc_interface *xch)
25 {
26     static int max_cpus = 0;
27     xc_physinfo_t physinfo;
28 
29     if ( max_cpus )
30         return max_cpus;
31 
32     if ( !xc_physinfo(xch, &physinfo) )
33     {
34         max_cpus = physinfo.max_cpu_id + 1;
35         return max_cpus;
36     }
37 
38     return -1;
39 }
40 
xc_get_online_cpus(xc_interface * xch)41 int xc_get_online_cpus(xc_interface *xch)
42 {
43     xc_physinfo_t physinfo;
44 
45     if ( !xc_physinfo(xch, &physinfo) )
46         return physinfo.nr_cpus;
47 
48     return -1;
49 }
50 
xc_get_max_nodes(xc_interface * xch)51 int xc_get_max_nodes(xc_interface *xch)
52 {
53     static int max_nodes = 0;
54     xc_physinfo_t physinfo;
55 
56     if ( max_nodes )
57         return max_nodes;
58 
59     if ( !xc_physinfo(xch, &physinfo) )
60     {
61         max_nodes = physinfo.max_node_id + 1;
62         return max_nodes;
63     }
64 
65     return -1;
66 }
67 
xc_get_cpumap_size(xc_interface * xch)68 int xc_get_cpumap_size(xc_interface *xch)
69 {
70     int max_cpus = xc_get_max_cpus(xch);
71 
72     if ( max_cpus < 0 )
73         return -1;
74     return (max_cpus + 7) / 8;
75 }
76 
xc_get_nodemap_size(xc_interface * xch)77 int xc_get_nodemap_size(xc_interface *xch)
78 {
79     int max_nodes = xc_get_max_nodes(xch);
80 
81     if ( max_nodes < 0 )
82         return -1;
83     return (max_nodes + 7) / 8;
84 }
85 
xc_cpumap_alloc(xc_interface * xch)86 xc_cpumap_t xc_cpumap_alloc(xc_interface *xch)
87 {
88     int sz;
89 
90     sz = xc_get_cpumap_size(xch);
91     if (sz <= 0)
92         return NULL;
93     return calloc(1, sz);
94 }
95 
96 /*
97  * xc_bitops.h has macros that do this as well - however they assume that
98  * the bitmask is word aligned but xc_cpumap_t is only guaranteed to be
99  * byte aligned and so we need byte versions for architectures which do
100  * not support misaligned accesses (which is basically everyone
101  * but x86, although even on x86 it can be inefficient).
102  *
103  * NOTE: The xc_bitops macros now use byte alignment.
104  * TODO: Clean up the users of this interface.
105  */
106 #define BITS_PER_CPUMAP(map) (sizeof(*map) * 8)
107 #define CPUMAP_ENTRY(cpu, map) ((map))[(cpu) / BITS_PER_CPUMAP(map)]
108 #define CPUMAP_SHIFT(cpu, map) ((cpu) % BITS_PER_CPUMAP(map))
xc_cpumap_clearcpu(int cpu,xc_cpumap_t map)109 void xc_cpumap_clearcpu(int cpu, xc_cpumap_t map)
110 {
111     CPUMAP_ENTRY(cpu, map) &= ~(1U << CPUMAP_SHIFT(cpu, map));
112 }
113 
xc_cpumap_setcpu(int cpu,xc_cpumap_t map)114 void xc_cpumap_setcpu(int cpu, xc_cpumap_t map)
115 {
116     CPUMAP_ENTRY(cpu, map) |= (1U << CPUMAP_SHIFT(cpu, map));
117 }
118 
xc_cpumap_testcpu(int cpu,xc_cpumap_t map)119 int xc_cpumap_testcpu(int cpu, xc_cpumap_t map)
120 {
121     return (CPUMAP_ENTRY(cpu, map) >> CPUMAP_SHIFT(cpu, map)) & 1;
122 }
123 
xc_nodemap_alloc(xc_interface * xch)124 xc_nodemap_t xc_nodemap_alloc(xc_interface *xch)
125 {
126     int sz;
127 
128     sz = xc_get_nodemap_size(xch);
129     if (sz <= 0)
130         return NULL;
131     return calloc(1, sz);
132 }
133 
xc_readconsolering(xc_interface * xch,char * buffer,unsigned int * pnr_chars,int clear,int incremental,uint32_t * pindex)134 int xc_readconsolering(xc_interface *xch,
135                        char *buffer,
136                        unsigned int *pnr_chars,
137                        int clear, int incremental, uint32_t *pindex)
138 {
139     int ret;
140     unsigned int nr_chars = *pnr_chars;
141     DECLARE_SYSCTL;
142     DECLARE_HYPERCALL_BOUNCE(buffer, nr_chars, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
143 
144     if ( xc_hypercall_bounce_pre(xch, buffer) )
145         return -1;
146 
147     sysctl.cmd = XEN_SYSCTL_readconsole;
148     set_xen_guest_handle(sysctl.u.readconsole.buffer, buffer);
149     sysctl.u.readconsole.count = nr_chars;
150     sysctl.u.readconsole.clear = clear;
151     sysctl.u.readconsole.incremental = 0;
152     if ( pindex )
153     {
154         sysctl.u.readconsole.index = *pindex;
155         sysctl.u.readconsole.incremental = incremental;
156     }
157 
158     if ( (ret = do_sysctl(xch, &sysctl)) == 0 )
159     {
160         *pnr_chars = sysctl.u.readconsole.count;
161         if ( pindex )
162             *pindex = sysctl.u.readconsole.index;
163     }
164 
165     xc_hypercall_bounce_post(xch, buffer);
166 
167     return ret;
168 }
169 
xc_send_debug_keys(xc_interface * xch,char * keys)170 int xc_send_debug_keys(xc_interface *xch, char *keys)
171 {
172     int ret, len = strlen(keys);
173     DECLARE_SYSCTL;
174     DECLARE_HYPERCALL_BOUNCE(keys, len, XC_HYPERCALL_BUFFER_BOUNCE_IN);
175 
176     if ( xc_hypercall_bounce_pre(xch, keys) )
177         return -1;
178 
179     sysctl.cmd = XEN_SYSCTL_debug_keys;
180     set_xen_guest_handle(sysctl.u.debug_keys.keys, keys);
181     sysctl.u.debug_keys.nr_keys = len;
182 
183     ret = do_sysctl(xch, &sysctl);
184 
185     xc_hypercall_bounce_post(xch, keys);
186 
187     return ret;
188 }
189 
xc_set_parameters(xc_interface * xch,char * params)190 int xc_set_parameters(xc_interface *xch, char *params)
191 {
192     int ret, len = strlen(params);
193     DECLARE_SYSCTL;
194     DECLARE_HYPERCALL_BOUNCE(params, len, XC_HYPERCALL_BUFFER_BOUNCE_IN);
195 
196     if ( xc_hypercall_bounce_pre(xch, params) )
197         return -1;
198 
199     sysctl.cmd = XEN_SYSCTL_set_parameter;
200     set_xen_guest_handle(sysctl.u.set_parameter.params, params);
201     sysctl.u.set_parameter.size = len;
202     memset(sysctl.u.set_parameter.pad, 0, sizeof(sysctl.u.set_parameter.pad));
203 
204     ret = do_sysctl(xch, &sysctl);
205 
206     xc_hypercall_bounce_post(xch, params);
207 
208     return ret;
209 }
210 
xc_physinfo(xc_interface * xch,xc_physinfo_t * put_info)211 int xc_physinfo(xc_interface *xch,
212                 xc_physinfo_t *put_info)
213 {
214     int ret;
215     DECLARE_SYSCTL;
216 
217     sysctl.cmd = XEN_SYSCTL_physinfo;
218 
219     memcpy(&sysctl.u.physinfo, put_info, sizeof(*put_info));
220 
221     if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
222         return ret;
223 
224     memcpy(put_info, &sysctl.u.physinfo, sizeof(*put_info));
225 
226     return 0;
227 }
228 
xc_cputopoinfo(xc_interface * xch,unsigned * max_cpus,xc_cputopo_t * cputopo)229 int xc_cputopoinfo(xc_interface *xch, unsigned *max_cpus,
230                    xc_cputopo_t *cputopo)
231 {
232     int ret;
233     DECLARE_SYSCTL;
234     DECLARE_HYPERCALL_BOUNCE(cputopo, *max_cpus * sizeof(*cputopo),
235                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
236 
237     if ( (ret = xc_hypercall_bounce_pre(xch, cputopo)) )
238         goto out;
239 
240     sysctl.u.cputopoinfo.num_cpus = *max_cpus;
241     set_xen_guest_handle(sysctl.u.cputopoinfo.cputopo, cputopo);
242 
243     sysctl.cmd = XEN_SYSCTL_cputopoinfo;
244 
245     if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
246         goto out;
247 
248     *max_cpus = sysctl.u.cputopoinfo.num_cpus;
249 
250 out:
251     xc_hypercall_bounce_post(xch, cputopo);
252 
253     return ret;
254 }
255 
xc_numainfo(xc_interface * xch,unsigned * max_nodes,xc_meminfo_t * meminfo,uint32_t * distance)256 int xc_numainfo(xc_interface *xch, unsigned *max_nodes,
257                 xc_meminfo_t *meminfo, uint32_t *distance)
258 {
259     int ret;
260     DECLARE_SYSCTL;
261     DECLARE_HYPERCALL_BOUNCE(meminfo, *max_nodes * sizeof(*meminfo),
262                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
263     DECLARE_HYPERCALL_BOUNCE(distance,
264                              *max_nodes * *max_nodes * sizeof(*distance),
265                              XC_HYPERCALL_BUFFER_BOUNCE_OUT);
266 
267     if ( (ret = xc_hypercall_bounce_pre(xch, meminfo)) )
268         goto out;
269     if ((ret = xc_hypercall_bounce_pre(xch, distance)) )
270         goto out;
271 
272     sysctl.u.numainfo.num_nodes = *max_nodes;
273     set_xen_guest_handle(sysctl.u.numainfo.meminfo, meminfo);
274     set_xen_guest_handle(sysctl.u.numainfo.distance, distance);
275 
276     sysctl.cmd = XEN_SYSCTL_numainfo;
277 
278     if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
279         goto out;
280 
281     *max_nodes = sysctl.u.numainfo.num_nodes;
282 
283 out:
284     xc_hypercall_bounce_post(xch, meminfo);
285     xc_hypercall_bounce_post(xch, distance);
286 
287     return ret;
288 }
289 
xc_pcitopoinfo(xc_interface * xch,unsigned num_devs,physdev_pci_device_t * devs,uint32_t * nodes)290 int xc_pcitopoinfo(xc_interface *xch, unsigned num_devs,
291                    physdev_pci_device_t *devs,
292                    uint32_t *nodes)
293 {
294     int ret = 0;
295     unsigned processed = 0;
296     DECLARE_SYSCTL;
297     DECLARE_HYPERCALL_BOUNCE(devs, num_devs * sizeof(*devs),
298                              XC_HYPERCALL_BUFFER_BOUNCE_IN);
299     DECLARE_HYPERCALL_BOUNCE(nodes, num_devs* sizeof(*nodes),
300                              XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
301 
302     if ( (ret = xc_hypercall_bounce_pre(xch, devs)) )
303         goto out;
304     if ( (ret = xc_hypercall_bounce_pre(xch, nodes)) )
305         goto out;
306 
307     sysctl.cmd = XEN_SYSCTL_pcitopoinfo;
308 
309     while ( processed < num_devs )
310     {
311         sysctl.u.pcitopoinfo.num_devs = num_devs - processed;
312         set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.devs, devs,
313                                     processed);
314         set_xen_guest_handle_offset(sysctl.u.pcitopoinfo.nodes, nodes,
315                                     processed);
316 
317         if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
318                 break;
319 
320         processed += sysctl.u.pcitopoinfo.num_devs;
321     }
322 
323  out:
324     xc_hypercall_bounce_post(xch, devs);
325     xc_hypercall_bounce_post(xch, nodes);
326 
327     return ret;
328 }
329 
xc_sched_id(xc_interface * xch,int * sched_id)330 int xc_sched_id(xc_interface *xch,
331                 int *sched_id)
332 {
333     int ret;
334     DECLARE_SYSCTL;
335 
336     sysctl.cmd = XEN_SYSCTL_sched_id;
337 
338     if ( (ret = do_sysctl(xch, &sysctl)) != 0 )
339         return ret;
340 
341     *sched_id = sysctl.u.sched_id.sched_id;
342 
343     return 0;
344 }
345 
346 #if defined(__i386__) || defined(__x86_64__)
xc_mca_op(xc_interface * xch,struct xen_mc * mc)347 int xc_mca_op(xc_interface *xch, struct xen_mc *mc)
348 {
349     int ret = 0;
350     DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
351 
352     if ( xc_hypercall_bounce_pre(xch, mc) )
353     {
354         PERROR("Could not bounce xen_mc memory buffer");
355         return -1;
356     }
357     mc->interface_version = XEN_MCA_INTERFACE_VERSION;
358 
359     ret = xencall1(xch->xcall, __HYPERVISOR_mca,
360                    HYPERCALL_BUFFER_AS_ARG(mc));
361 
362     xc_hypercall_bounce_post(xch, mc);
363     return ret;
364 }
365 
xc_mca_op_inject_v2(xc_interface * xch,unsigned int flags,xc_cpumap_t cpumap,unsigned int nr_bits)366 int xc_mca_op_inject_v2(xc_interface *xch, unsigned int flags,
367                         xc_cpumap_t cpumap, unsigned int nr_bits)
368 {
369     int ret = -1;
370     struct xen_mc mc_buf, *mc = &mc_buf;
371     struct xen_mc_inject_v2 *inject = &mc->u.mc_inject_v2;
372 
373     DECLARE_HYPERCALL_BOUNCE(cpumap, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
374     DECLARE_HYPERCALL_BOUNCE(mc, sizeof(*mc), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
375 
376     memset(mc, 0, sizeof(*mc));
377 
378     if ( cpumap )
379     {
380         if ( !nr_bits )
381         {
382             errno = EINVAL;
383             goto out;
384         }
385 
386         HYPERCALL_BOUNCE_SET_SIZE(cpumap, (nr_bits + 7) / 8);
387         if ( xc_hypercall_bounce_pre(xch, cpumap) )
388         {
389             PERROR("Could not bounce cpumap memory buffer");
390             goto out;
391         }
392         set_xen_guest_handle(inject->cpumap.bitmap, cpumap);
393         inject->cpumap.nr_bits = nr_bits;
394     }
395 
396     inject->flags = flags;
397     mc->cmd = XEN_MC_inject_v2;
398     mc->interface_version = XEN_MCA_INTERFACE_VERSION;
399 
400     if ( xc_hypercall_bounce_pre(xch, mc) )
401     {
402         PERROR("Could not bounce xen_mc memory buffer");
403         goto out_free_cpumap;
404     }
405 
406     ret = xencall1(xch->xcall, __HYPERVISOR_mca, HYPERCALL_BUFFER_AS_ARG(mc));
407 
408     xc_hypercall_bounce_post(xch, mc);
409 out_free_cpumap:
410     if ( cpumap )
411         xc_hypercall_bounce_post(xch, cpumap);
412 out:
413     return ret;
414 }
415 #endif /* __i386__ || __x86_64__ */
416 
xc_perfc_reset(xc_interface * xch)417 int xc_perfc_reset(xc_interface *xch)
418 {
419     DECLARE_SYSCTL;
420 
421     sysctl.cmd = XEN_SYSCTL_perfc_op;
422     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_reset;
423     set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
424     set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
425 
426     return do_sysctl(xch, &sysctl);
427 }
428 
xc_perfc_query_number(xc_interface * xch,int * nbr_desc,int * nbr_val)429 int xc_perfc_query_number(xc_interface *xch,
430                           int *nbr_desc,
431                           int *nbr_val)
432 {
433     int rc;
434     DECLARE_SYSCTL;
435 
436     sysctl.cmd = XEN_SYSCTL_perfc_op;
437     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
438     set_xen_guest_handle(sysctl.u.perfc_op.desc, HYPERCALL_BUFFER_NULL);
439     set_xen_guest_handle(sysctl.u.perfc_op.val, HYPERCALL_BUFFER_NULL);
440 
441     rc = do_sysctl(xch, &sysctl);
442 
443     if ( nbr_desc )
444         *nbr_desc = sysctl.u.perfc_op.nr_counters;
445     if ( nbr_val )
446         *nbr_val = sysctl.u.perfc_op.nr_vals;
447 
448     return rc;
449 }
450 
xc_perfc_query(xc_interface * xch,struct xc_hypercall_buffer * desc,struct xc_hypercall_buffer * val)451 int xc_perfc_query(xc_interface *xch,
452                    struct xc_hypercall_buffer *desc,
453                    struct xc_hypercall_buffer *val)
454 {
455     DECLARE_SYSCTL;
456     DECLARE_HYPERCALL_BUFFER_ARGUMENT(desc);
457     DECLARE_HYPERCALL_BUFFER_ARGUMENT(val);
458 
459     sysctl.cmd = XEN_SYSCTL_perfc_op;
460     sysctl.u.perfc_op.cmd = XEN_SYSCTL_PERFCOP_query;
461     set_xen_guest_handle(sysctl.u.perfc_op.desc, desc);
462     set_xen_guest_handle(sysctl.u.perfc_op.val, val);
463 
464     return do_sysctl(xch, &sysctl);
465 }
466 
xc_lockprof_reset(xc_interface * xch)467 int xc_lockprof_reset(xc_interface *xch)
468 {
469     DECLARE_SYSCTL;
470 
471     sysctl.cmd = XEN_SYSCTL_lockprof_op;
472     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_reset;
473     set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
474 
475     return do_sysctl(xch, &sysctl);
476 }
477 
xc_lockprof_query_number(xc_interface * xch,uint32_t * n_elems)478 int xc_lockprof_query_number(xc_interface *xch,
479                              uint32_t *n_elems)
480 {
481     int rc;
482     DECLARE_SYSCTL;
483 
484     sysctl.cmd = XEN_SYSCTL_lockprof_op;
485     sysctl.u.lockprof_op.max_elem = 0;
486     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
487     set_xen_guest_handle(sysctl.u.lockprof_op.data, HYPERCALL_BUFFER_NULL);
488 
489     rc = do_sysctl(xch, &sysctl);
490 
491     *n_elems = sysctl.u.lockprof_op.nr_elem;
492 
493     return rc;
494 }
495 
xc_lockprof_query(xc_interface * xch,uint32_t * n_elems,uint64_t * time,struct xc_hypercall_buffer * data)496 int xc_lockprof_query(xc_interface *xch,
497                       uint32_t *n_elems,
498                       uint64_t *time,
499                       struct xc_hypercall_buffer *data)
500 {
501     int rc;
502     DECLARE_SYSCTL;
503     DECLARE_HYPERCALL_BUFFER_ARGUMENT(data);
504 
505     sysctl.cmd = XEN_SYSCTL_lockprof_op;
506     sysctl.u.lockprof_op.cmd = XEN_SYSCTL_LOCKPROF_query;
507     sysctl.u.lockprof_op.max_elem = *n_elems;
508     set_xen_guest_handle(sysctl.u.lockprof_op.data, data);
509 
510     rc = do_sysctl(xch, &sysctl);
511 
512     *n_elems = sysctl.u.lockprof_op.nr_elem;
513 
514     return rc;
515 }
516 
xc_getcpuinfo(xc_interface * xch,int max_cpus,xc_cpuinfo_t * info,int * nr_cpus)517 int xc_getcpuinfo(xc_interface *xch, int max_cpus,
518                   xc_cpuinfo_t *info, int *nr_cpus)
519 {
520     int rc;
521     DECLARE_SYSCTL;
522     DECLARE_HYPERCALL_BOUNCE(info, max_cpus*sizeof(*info), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
523 
524     if ( xc_hypercall_bounce_pre(xch, info) )
525         return -1;
526 
527     sysctl.cmd = XEN_SYSCTL_getcpuinfo;
528     sysctl.u.getcpuinfo.max_cpus = max_cpus;
529     set_xen_guest_handle(sysctl.u.getcpuinfo.info, info);
530 
531     rc = do_sysctl(xch, &sysctl);
532 
533     xc_hypercall_bounce_post(xch, info);
534 
535     if ( nr_cpus )
536         *nr_cpus = sysctl.u.getcpuinfo.nr_cpus;
537 
538     return rc;
539 }
540 
xc_livepatch_upload(xc_interface * xch,char * name,unsigned char * payload,uint32_t size)541 int xc_livepatch_upload(xc_interface *xch,
542                         char *name,
543                         unsigned char *payload,
544                         uint32_t size)
545 {
546     int rc;
547     DECLARE_SYSCTL;
548     DECLARE_HYPERCALL_BUFFER(char, local);
549     DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
550     struct xen_livepatch_name def_name = { };
551 
552     if ( !name || !payload )
553     {
554         errno = EINVAL;
555         return -1;
556     }
557 
558     def_name.size = strlen(name) + 1;
559     if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
560     {
561         errno = EINVAL;
562         return -1;
563     }
564 
565     HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
566 
567     if ( xc_hypercall_bounce_pre(xch, name) )
568         return -1;
569 
570     local = xc_hypercall_buffer_alloc(xch, local, size);
571     if ( !local )
572     {
573         xc_hypercall_bounce_post(xch, name);
574         return -1;
575     }
576     memcpy(local, payload, size);
577 
578     sysctl.cmd = XEN_SYSCTL_livepatch_op;
579     sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_UPLOAD;
580     sysctl.u.livepatch.pad = 0;
581     sysctl.u.livepatch.u.upload.size = size;
582     set_xen_guest_handle(sysctl.u.livepatch.u.upload.payload, local);
583 
584     sysctl.u.livepatch.u.upload.name = def_name;
585     set_xen_guest_handle(sysctl.u.livepatch.u.upload.name.name, name);
586 
587     rc = do_sysctl(xch, &sysctl);
588 
589     xc_hypercall_buffer_free(xch, local);
590     xc_hypercall_bounce_post(xch, name);
591 
592     return rc;
593 }
594 
xc_livepatch_get(xc_interface * xch,char * name,struct xen_livepatch_status * status)595 int xc_livepatch_get(xc_interface *xch,
596                      char *name,
597                      struct xen_livepatch_status *status)
598 {
599     int rc;
600     DECLARE_SYSCTL;
601     DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
602     struct xen_livepatch_name def_name = { };
603 
604     if ( !name )
605     {
606         errno = EINVAL;
607         return -1;
608     }
609 
610     def_name.size = strlen(name) + 1;
611     if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
612     {
613         errno = EINVAL;
614         return -1;
615     }
616 
617     HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
618 
619     if ( xc_hypercall_bounce_pre(xch, name) )
620         return -1;
621 
622     sysctl.cmd = XEN_SYSCTL_livepatch_op;
623     sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_GET;
624     sysctl.u.livepatch.pad = 0;
625 
626     sysctl.u.livepatch.u.get.status.state = 0;
627     sysctl.u.livepatch.u.get.status.rc = 0;
628 
629     sysctl.u.livepatch.u.get.name = def_name;
630     set_xen_guest_handle(sysctl.u.livepatch.u.get.name.name, name);
631 
632     rc = do_sysctl(xch, &sysctl);
633 
634     xc_hypercall_bounce_post(xch, name);
635 
636     memcpy(status, &sysctl.u.livepatch.u.get.status, sizeof(*status));
637 
638     return rc;
639 }
640 
641 /*
642  * The heart of this function is to get an array of xen_livepatch_status_t.
643  *
644  * However it is complex because it has to deal with the hypervisor
645  * returning some of the requested data or data being stale
646  * (another hypercall might alter the list).
647  *
648  * The parameters that the function expects to contain data from
649  * the hypervisor are: 'info', 'name', and 'len'. The 'done' and
650  * 'left' are also updated with the number of entries filled out
651  * and respectively the number of entries left to get from hypervisor.
652  *
653  * It is expected that the caller of this function will take the
654  * 'left' and use the value for 'start'. This way we have an
655  * cursor in the array. Note that the 'info','name', and 'len' will
656  * be updated at the subsequent calls.
657  *
658  * The 'max' is to be provided by the caller with the maximum
659  * number of entries that 'info', 'name', and 'len' arrays can
660  * be filled up with.
661  *
662  * Each entry in the 'name' array is expected to be of XEN_LIVEPATCH_NAME_SIZE
663  * length.
664  *
665  * Each entry in the 'info' array is expected to be of xen_livepatch_status_t
666  * structure size.
667  *
668  * Each entry in the 'len' array is expected to be of uint32_t size.
669  *
670  * The return value is zero if the hypercall completed successfully.
671  * Note that the return value is _not_ the amount of entries filled
672  * out - that is saved in 'done'.
673  *
674  * If there was an error performing the operation, the return value
675  * will contain an negative -EXX type value. The 'done' and 'left'
676  * will contain the number of entries that had been succesfully
677  * retrieved (if any).
678  */
xc_livepatch_list(xc_interface * xch,unsigned int max,unsigned int start,struct xen_livepatch_status * info,char * name,uint32_t * len,unsigned int * done,unsigned int * left)679 int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
680                       struct xen_livepatch_status *info,
681                       char *name, uint32_t *len,
682                       unsigned int *done,
683                       unsigned int *left)
684 {
685     int rc;
686     DECLARE_SYSCTL;
687     /* The sizes are adjusted later - hence zero. */
688     DECLARE_HYPERCALL_BOUNCE(info, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
689     DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
690     DECLARE_HYPERCALL_BOUNCE(len, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
691     uint32_t max_batch_sz, nr;
692     uint32_t version = 0, retries = 0;
693     uint32_t adjust = 0;
694     ssize_t sz;
695 
696     if ( !max || !info || !name || !len )
697     {
698         errno = EINVAL;
699         return -1;
700     }
701 
702     sysctl.cmd = XEN_SYSCTL_livepatch_op;
703     sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_LIST;
704     sysctl.u.livepatch.pad = 0;
705     sysctl.u.livepatch.u.list.version = 0;
706     sysctl.u.livepatch.u.list.idx = start;
707     sysctl.u.livepatch.u.list.pad = 0;
708 
709     max_batch_sz = max;
710     /* Convience value. */
711     sz = sizeof(*name) * XEN_LIVEPATCH_NAME_SIZE;
712     *done = 0;
713     *left = 0;
714     do {
715         /*
716          * The first time we go in this loop our 'max' may be bigger
717          * than what the hypervisor is comfortable with - hence the first
718          * couple of loops may adjust the number of entries we will
719          * want filled (tracked by 'nr').
720          *
721          * N.B. This is a do { } while loop and the right hand side of
722          * the conditional when adjusting will evaluate to false (as
723          * *left is set to zero before the loop. Hence we need this
724          * adjust - even if we reset it at the start of the loop.
725          */
726         if ( adjust )
727             adjust = 0; /* Used when adjusting the 'max_batch_sz' or 'retries'. */
728 
729         nr = min(max - *done, max_batch_sz);
730 
731         sysctl.u.livepatch.u.list.nr = nr;
732         /* Fix the size (may vary between hypercalls). */
733         HYPERCALL_BOUNCE_SET_SIZE(info, nr * sizeof(*info));
734         HYPERCALL_BOUNCE_SET_SIZE(name, nr * nr);
735         HYPERCALL_BOUNCE_SET_SIZE(len, nr * sizeof(*len));
736         /* Move the pointer to proper offset into 'info'. */
737         (HYPERCALL_BUFFER(info))->ubuf = info + *done;
738         (HYPERCALL_BUFFER(name))->ubuf = name + (sz * *done);
739         (HYPERCALL_BUFFER(len))->ubuf = len + *done;
740         /* Allocate memory. */
741         rc = xc_hypercall_bounce_pre(xch, info);
742         if ( rc )
743             break;
744 
745         rc = xc_hypercall_bounce_pre(xch, name);
746         if ( rc )
747             break;
748 
749         rc = xc_hypercall_bounce_pre(xch, len);
750         if ( rc )
751             break;
752 
753         set_xen_guest_handle(sysctl.u.livepatch.u.list.status, info);
754         set_xen_guest_handle(sysctl.u.livepatch.u.list.name, name);
755         set_xen_guest_handle(sysctl.u.livepatch.u.list.len, len);
756 
757         rc = do_sysctl(xch, &sysctl);
758         /*
759          * From here on we MUST call xc_hypercall_bounce. If rc < 0 we
760          * end up doing it (outside the loop), so using a break is OK.
761          */
762         if ( rc < 0 && errno == E2BIG )
763         {
764             if ( max_batch_sz <= 1 )
765                 break;
766             max_batch_sz >>= 1;
767             adjust = 1; /* For the loop conditional to let us loop again. */
768             /* No memory leaks! */
769             xc_hypercall_bounce_post(xch, info);
770             xc_hypercall_bounce_post(xch, name);
771             xc_hypercall_bounce_post(xch, len);
772             continue;
773         }
774         else if ( rc < 0 ) /* For all other errors we bail out. */
775             break;
776 
777         if ( !version )
778             version = sysctl.u.livepatch.u.list.version;
779 
780         if ( sysctl.u.livepatch.u.list.version != version )
781         {
782             /* We could make this configurable as parameter? */
783             if ( retries++ > 3 )
784             {
785                 rc = -1;
786                 errno = EBUSY;
787                 break;
788             }
789             *done = 0; /* Retry from scratch. */
790             version = sysctl.u.livepatch.u.list.version;
791             adjust = 1; /* And make sure we continue in the loop. */
792             /* No memory leaks. */
793             xc_hypercall_bounce_post(xch, info);
794             xc_hypercall_bounce_post(xch, name);
795             xc_hypercall_bounce_post(xch, len);
796             continue;
797         }
798 
799         /* We should never hit this, but just in case. */
800         if ( rc > nr )
801         {
802             errno = EOVERFLOW; /* Overflow! */
803             rc = -1;
804             break;
805         }
806         *left = sysctl.u.livepatch.u.list.nr; /* Total remaining count. */
807         /* Copy only up 'rc' of data' - we could add 'min(rc,nr) if desired. */
808         HYPERCALL_BOUNCE_SET_SIZE(info, (rc * sizeof(*info)));
809         HYPERCALL_BOUNCE_SET_SIZE(name, (rc * sz));
810         HYPERCALL_BOUNCE_SET_SIZE(len, (rc * sizeof(*len)));
811         /* Bounce the data and free the bounce buffer. */
812         xc_hypercall_bounce_post(xch, info);
813         xc_hypercall_bounce_post(xch, name);
814         xc_hypercall_bounce_post(xch, len);
815         /* And update how many elements of info we have copied into. */
816         *done += rc;
817         /* Update idx. */
818         sysctl.u.livepatch.u.list.idx = *done;
819     } while ( adjust || (*done < max && *left != 0) );
820 
821     if ( rc < 0 )
822     {
823         xc_hypercall_bounce_post(xch, len);
824         xc_hypercall_bounce_post(xch, name);
825         xc_hypercall_bounce_post(xch, info);
826     }
827 
828     return rc > 0 ? 0 : rc;
829 }
830 
_xc_livepatch_action(xc_interface * xch,char * name,unsigned int action,uint32_t timeout)831 static int _xc_livepatch_action(xc_interface *xch,
832                                 char *name,
833                                 unsigned int action,
834                                 uint32_t timeout)
835 {
836     int rc;
837     DECLARE_SYSCTL;
838     /* The size is figured out when we strlen(name) */
839     DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
840     struct xen_livepatch_name def_name = { };
841 
842     def_name.size = strlen(name) + 1;
843 
844     if ( def_name.size > XEN_LIVEPATCH_NAME_SIZE )
845     {
846         errno = EINVAL;
847         return -1;
848     }
849 
850     HYPERCALL_BOUNCE_SET_SIZE(name, def_name.size);
851 
852     if ( xc_hypercall_bounce_pre(xch, name) )
853         return -1;
854 
855     sysctl.cmd = XEN_SYSCTL_livepatch_op;
856     sysctl.u.livepatch.cmd = XEN_SYSCTL_LIVEPATCH_ACTION;
857     sysctl.u.livepatch.pad = 0;
858     sysctl.u.livepatch.u.action.cmd = action;
859     sysctl.u.livepatch.u.action.timeout = timeout;
860 
861     sysctl.u.livepatch.u.action.name = def_name;
862     set_xen_guest_handle(sysctl.u.livepatch.u.action.name.name, name);
863 
864     rc = do_sysctl(xch, &sysctl);
865 
866     xc_hypercall_bounce_post(xch, name);
867 
868     return rc;
869 }
870 
xc_livepatch_apply(xc_interface * xch,char * name,uint32_t timeout)871 int xc_livepatch_apply(xc_interface *xch, char *name, uint32_t timeout)
872 {
873     return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_APPLY, timeout);
874 }
875 
xc_livepatch_revert(xc_interface * xch,char * name,uint32_t timeout)876 int xc_livepatch_revert(xc_interface *xch, char *name, uint32_t timeout)
877 {
878     return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REVERT, timeout);
879 }
880 
xc_livepatch_unload(xc_interface * xch,char * name,uint32_t timeout)881 int xc_livepatch_unload(xc_interface *xch, char *name, uint32_t timeout)
882 {
883     return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_UNLOAD, timeout);
884 }
885 
xc_livepatch_replace(xc_interface * xch,char * name,uint32_t timeout)886 int xc_livepatch_replace(xc_interface *xch, char *name, uint32_t timeout)
887 {
888     return _xc_livepatch_action(xch, name, LIVEPATCH_ACTION_REPLACE, timeout);
889 }
890 
891 /*
892  * Local variables:
893  * mode: C
894  * c-file-style: "BSD"
895  * c-basic-offset: 4
896  * tab-width: 4
897  * indent-tabs-mode: nil
898  * End:
899  */
900