1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "xc_private.h"
21 #include "xg_private.h"
22 #include "xc_dom.h"
23 #include <stdarg.h>
24 #include <stdlib.h>
25 #include <unistd.h>
26 #include <pthread.h>
27 #include <assert.h>
28
xc_interface_open(xentoollog_logger * logger,xentoollog_logger * dombuild_logger,unsigned open_flags)29 struct xc_interface_core *xc_interface_open(xentoollog_logger *logger,
30 xentoollog_logger *dombuild_logger,
31 unsigned open_flags)
32 {
33 struct xc_interface_core xch_buf = { 0 }, *xch = &xch_buf;
34
35 xch->flags = open_flags;
36 xch->dombuild_logger_file = 0;
37 xc_clear_last_error(xch);
38
39 xch->error_handler = logger; xch->error_handler_tofree = 0;
40 xch->dombuild_logger = dombuild_logger; xch->dombuild_logger_tofree = 0;
41
42 if (!xch->error_handler) {
43 xch->error_handler = xch->error_handler_tofree =
44 (xentoollog_logger*)
45 xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
46 if (!xch->error_handler)
47 goto err;
48 }
49
50 xch = malloc(sizeof(*xch));
51 if (!xch) {
52 xch = &xch_buf;
53 PERROR("Could not allocate new xc_interface struct");
54 goto err;
55 }
56 *xch = xch_buf;
57
58 if (open_flags & XC_OPENFLAG_DUMMY)
59 return xch; /* We are done */
60
61 xch->xcall = xencall_open(xch->error_handler,
62 open_flags & XC_OPENFLAG_NON_REENTRANT ? XENCALL_OPENFLAG_NON_REENTRANT : 0U);
63 if ( xch->xcall == NULL )
64 goto err;
65
66 xch->fmem = xenforeignmemory_open(xch->error_handler, 0);
67 if ( xch->fmem == NULL )
68 goto err;
69
70 xch->dmod = xendevicemodel_open(xch->error_handler, 0);
71 if ( xch->dmod == NULL )
72 goto err;
73
74 return xch;
75
76 err:
77 xenforeignmemory_close(xch->fmem);
78 xencall_close(xch->xcall);
79 xtl_logger_destroy(xch->error_handler_tofree);
80 if (xch != &xch_buf) free(xch);
81 return NULL;
82 }
83
xc_interface_close(xc_interface * xch)84 int xc_interface_close(xc_interface *xch)
85 {
86 int rc = 0;
87
88 if (!xch)
89 return 0;
90
91 rc = xencall_close(xch->xcall);
92 if (rc) PERROR("Could not close xencall interface");
93
94 rc = xenforeignmemory_close(xch->fmem);
95 if (rc) PERROR("Could not close foreign memory interface");
96
97 rc = xendevicemodel_close(xch->dmod);
98 if (rc) PERROR("Could not close device model interface");
99
100 xtl_logger_destroy(xch->dombuild_logger_tofree);
101 xtl_logger_destroy(xch->error_handler_tofree);
102
103 free(xch);
104 return rc;
105 }
106
107 static pthread_key_t errbuf_pkey;
108 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
109
xc_get_last_error(xc_interface * xch)110 const xc_error *xc_get_last_error(xc_interface *xch)
111 {
112 return &xch->last_error;
113 }
114
xc_clear_last_error(xc_interface * xch)115 void xc_clear_last_error(xc_interface *xch)
116 {
117 xch->last_error.code = XC_ERROR_NONE;
118 xch->last_error.message[0] = '\0';
119 }
120
xc_error_code_to_desc(int code)121 const char *xc_error_code_to_desc(int code)
122 {
123 /* Sync to members of xc_error_code enumeration in xenctrl.h */
124 switch ( code )
125 {
126 case XC_ERROR_NONE:
127 return "No error details";
128 case XC_INTERNAL_ERROR:
129 return "Internal error";
130 case XC_INVALID_KERNEL:
131 return "Invalid kernel";
132 case XC_INVALID_PARAM:
133 return "Invalid configuration";
134 case XC_OUT_OF_MEMORY:
135 return "Out of memory";
136 }
137
138 return "Unknown error code";
139 }
140
xc_reportv(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,va_list args)141 void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
142 xentoollog_level level, int code,
143 const char *fmt, va_list args) {
144 int saved_errno = errno;
145 char msgbuf[XC_MAX_ERROR_MSG_LEN];
146 char *msg;
147
148 /* Strip newlines from messages.
149 * XXX really the messages themselves should have the newlines removed.
150 */
151 char fmt_nonewline[512];
152 int fmt_l;
153
154 fmt_l = strlen(fmt);
155 if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
156 memcpy(fmt_nonewline, fmt, fmt_l-1);
157 fmt_nonewline[fmt_l-1] = 0;
158 fmt = fmt_nonewline;
159 }
160
161 if ( level >= XTL_ERROR ) {
162 msg = xch->last_error.message;
163 xch->last_error.code = code;
164 } else {
165 msg = msgbuf;
166 }
167 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
168 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
169
170 xtl_log(lg, level, -1, "xc",
171 "%s" "%s%s", msg,
172 code?": ":"", code ? xc_error_code_to_desc(code) : "");
173
174 errno = saved_errno;
175 }
176
xc_report(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,...)177 void xc_report(xc_interface *xch, xentoollog_logger *lg,
178 xentoollog_level level, int code, const char *fmt, ...) {
179 va_list args;
180 va_start(args,fmt);
181 xc_reportv(xch,lg,level,code,fmt,args);
182 va_end(args);
183 }
184
xc_report_error(xc_interface * xch,int code,const char * fmt,...)185 void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
186 {
187 va_list args;
188 va_start(args, fmt);
189 xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
190 va_end(args);
191 }
192
xc_set_progress_prefix(xc_interface * xch,const char * doing)193 const char *xc_set_progress_prefix(xc_interface *xch, const char *doing)
194 {
195 const char *old = xch->currently_progress_reporting;
196
197 xch->currently_progress_reporting = doing;
198 return old;
199 }
200
xc_report_progress_single(xc_interface * xch,const char * doing)201 void xc_report_progress_single(xc_interface *xch, const char *doing)
202 {
203 assert(doing);
204 xtl_progress(xch->error_handler, "xc", doing, 0, 0);
205 }
206
xc_report_progress_step(xc_interface * xch,unsigned long done,unsigned long total)207 void xc_report_progress_step(xc_interface *xch,
208 unsigned long done, unsigned long total)
209 {
210 assert(xch->currently_progress_reporting);
211 xtl_progress(xch->error_handler, "xc",
212 xch->currently_progress_reporting, done, total);
213 }
214
xc_get_pfn_type_batch(xc_interface * xch,uint32_t dom,unsigned int num,xen_pfn_t * arr)215 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
216 unsigned int num, xen_pfn_t *arr)
217 {
218 int rc;
219 DECLARE_DOMCTL;
220 DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
221 if ( xc_hypercall_bounce_pre(xch, arr) )
222 return -1;
223 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
224 domctl.domain = dom;
225 domctl.u.getpageframeinfo3.num = num;
226 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
227 rc = do_domctl(xch, &domctl);
228 xc_hypercall_bounce_post(xch, arr);
229 return rc;
230 }
231
xc_mmuext_op(xc_interface * xch,struct mmuext_op * op,unsigned int nr_ops,uint32_t dom)232 int xc_mmuext_op(
233 xc_interface *xch,
234 struct mmuext_op *op,
235 unsigned int nr_ops,
236 uint32_t dom)
237 {
238 DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
239 long ret = -1;
240
241 if ( xc_hypercall_bounce_pre(xch, op) )
242 {
243 PERROR("Could not bounce memory for mmuext op hypercall");
244 goto out1;
245 }
246
247 ret = xencall4(xch->xcall, __HYPERVISOR_mmuext_op,
248 HYPERCALL_BUFFER_AS_ARG(op),
249 nr_ops, 0, dom);
250
251 xc_hypercall_bounce_post(xch, op);
252
253 out1:
254 return ret;
255 }
256
flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)257 static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
258 {
259 int rc, err = 0;
260 DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
261
262 if ( mmu->idx == 0 )
263 return 0;
264
265 if ( xc_hypercall_bounce_pre(xch, updates) )
266 {
267 PERROR("flush_mmu_updates: bounce buffer failed");
268 err = 1;
269 goto out;
270 }
271
272 rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update,
273 HYPERCALL_BUFFER_AS_ARG(updates),
274 mmu->idx, 0, mmu->subject);
275 if ( rc < 0 )
276 {
277 ERROR("Failure when submitting mmu updates");
278 err = 1;
279 }
280
281 mmu->idx = 0;
282
283 xc_hypercall_bounce_post(xch, updates);
284
285 out:
286 return err;
287 }
288
xc_alloc_mmu_updates(xc_interface * xch,unsigned int subject)289 struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject)
290 {
291 struct xc_mmu *mmu = malloc(sizeof(*mmu));
292 if ( mmu == NULL )
293 return mmu;
294 mmu->idx = 0;
295 mmu->subject = subject;
296 return mmu;
297 }
298
xc_add_mmu_update(xc_interface * xch,struct xc_mmu * mmu,unsigned long long ptr,unsigned long long val)299 int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
300 unsigned long long ptr, unsigned long long val)
301 {
302 mmu->updates[mmu->idx].ptr = ptr;
303 mmu->updates[mmu->idx].val = val;
304
305 if ( ++mmu->idx == MAX_MMU_UPDATES )
306 return flush_mmu_updates(xch, mmu);
307
308 return 0;
309 }
310
xc_flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)311 int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
312 {
313 return flush_mmu_updates(xch, mmu);
314 }
315
do_memory_op(xc_interface * xch,int cmd,void * arg,size_t len)316 long do_memory_op(xc_interface *xch, int cmd, void *arg, size_t len)
317 {
318 DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
319 long ret = -1;
320
321 if ( xc_hypercall_bounce_pre(xch, arg) )
322 {
323 PERROR("Could not bounce memory for XENMEM hypercall");
324 goto out1;
325 }
326
327 ret = xencall2(xch->xcall, __HYPERVISOR_memory_op,
328 cmd, HYPERCALL_BUFFER_AS_ARG(arg));
329
330 xc_hypercall_bounce_post(xch, arg);
331 out1:
332 return ret;
333 }
334
xc_maximum_ram_page(xc_interface * xch,unsigned long * max_mfn)335 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
336 {
337 long rc = do_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
338
339 if ( rc >= 0 )
340 {
341 *max_mfn = rc;
342 rc = 0;
343 }
344 return rc;
345 }
346
xc_domain_get_cpu_usage(xc_interface * xch,uint32_t domid,int vcpu)347 long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
348 {
349 DECLARE_DOMCTL;
350
351 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
352 domctl.domain = domid;
353 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
354 if ( (do_domctl(xch, &domctl) < 0) )
355 {
356 PERROR("Could not get info on domain");
357 return -1;
358 }
359 return domctl.u.getvcpuinfo.cpu_time;
360 }
361
xc_machphys_mfn_list(xc_interface * xch,unsigned long max_extents,xen_pfn_t * extent_start)362 int xc_machphys_mfn_list(xc_interface *xch,
363 unsigned long max_extents,
364 xen_pfn_t *extent_start)
365 {
366 int rc;
367 DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
368 struct xen_machphys_mfn_list xmml = {
369 .max_extents = max_extents,
370 };
371
372 if ( xc_hypercall_bounce_pre(xch, extent_start) )
373 {
374 PERROR("Could not bounce memory for XENMEM_machphys_mfn_list hypercall");
375 return -1;
376 }
377
378 set_xen_guest_handle(xmml.extent_start, extent_start);
379 rc = do_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
380 if (rc || xmml.nr_extents != max_extents)
381 rc = -1;
382 else
383 rc = 0;
384
385 xc_hypercall_bounce_post(xch, extent_start);
386
387 return rc;
388 }
389
xc_get_pfn_list(xc_interface * xch,uint32_t domid,uint64_t * pfn_buf,unsigned long max_pfns)390 int xc_get_pfn_list(xc_interface *xch,
391 uint32_t domid,
392 uint64_t *pfn_buf,
393 unsigned long max_pfns)
394 {
395 DECLARE_DOMCTL;
396 DECLARE_HYPERCALL_BOUNCE(pfn_buf, max_pfns * sizeof(*pfn_buf), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
397 int ret;
398
399 if ( xc_hypercall_bounce_pre(xch, pfn_buf) )
400 {
401 PERROR("xc_get_pfn_list: pfn_buf bounce failed");
402 return -1;
403 }
404
405 domctl.cmd = XEN_DOMCTL_getmemlist;
406 domctl.domain = domid;
407 domctl.u.getmemlist.max_pfns = max_pfns;
408 set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
409
410 ret = do_domctl(xch, &domctl);
411
412 xc_hypercall_bounce_post(xch, pfn_buf);
413
414 return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
415 }
416
xc_get_tot_pages(xc_interface * xch,uint32_t domid)417 long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
418 {
419 xc_dominfo_t info;
420 if ( (xc_domain_getinfo(xch, domid, 1, &info) != 1) ||
421 (info.domid != domid) )
422 return -1;
423 return info.nr_pages;
424 }
425
xc_copy_to_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,const char * src_page)426 int xc_copy_to_domain_page(xc_interface *xch,
427 uint32_t domid,
428 unsigned long dst_pfn,
429 const char *src_page)
430 {
431 void *vaddr = xc_map_foreign_range(
432 xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
433 if ( vaddr == NULL )
434 return -1;
435 memcpy(vaddr, src_page, PAGE_SIZE);
436 munmap(vaddr, PAGE_SIZE);
437 xc_domain_cacheflush(xch, domid, dst_pfn, 1);
438 return 0;
439 }
440
xc_clear_domain_pages(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,int num)441 int xc_clear_domain_pages(xc_interface *xch,
442 uint32_t domid,
443 unsigned long dst_pfn,
444 int num)
445 {
446 size_t size = num * PAGE_SIZE;
447 void *vaddr = xc_map_foreign_range(
448 xch, domid, size, PROT_WRITE, dst_pfn);
449 if ( vaddr == NULL )
450 return -1;
451 memset(vaddr, 0, size);
452 munmap(vaddr, size);
453 xc_domain_cacheflush(xch, domid, dst_pfn, num);
454 return 0;
455 }
456
xc_domctl(xc_interface * xch,struct xen_domctl * domctl)457 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
458 {
459 return do_domctl(xch, domctl);
460 }
461
xc_sysctl(xc_interface * xch,struct xen_sysctl * sysctl)462 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
463 {
464 return do_sysctl(xch, sysctl);
465 }
466
xc_version(xc_interface * xch,int cmd,void * arg)467 int xc_version(xc_interface *xch, int cmd, void *arg)
468 {
469 DECLARE_HYPERCALL_BOUNCE(arg, 0, XC_HYPERCALL_BUFFER_BOUNCE_OUT); /* Size unknown until cmd decoded */
470 size_t sz;
471 int rc;
472
473 switch ( cmd )
474 {
475 case XENVER_version:
476 sz = 0;
477 break;
478 case XENVER_extraversion:
479 sz = sizeof(xen_extraversion_t);
480 break;
481 case XENVER_compile_info:
482 sz = sizeof(xen_compile_info_t);
483 break;
484 case XENVER_capabilities:
485 sz = sizeof(xen_capabilities_info_t);
486 break;
487 case XENVER_changeset:
488 sz = sizeof(xen_changeset_info_t);
489 break;
490 case XENVER_platform_parameters:
491 sz = sizeof(xen_platform_parameters_t);
492 break;
493 case XENVER_get_features:
494 sz = sizeof(xen_feature_info_t);
495 break;
496 case XENVER_pagesize:
497 sz = 0;
498 break;
499 case XENVER_guest_handle:
500 sz = sizeof(xen_domain_handle_t);
501 break;
502 case XENVER_commandline:
503 sz = sizeof(xen_commandline_t);
504 break;
505 case XENVER_build_id:
506 {
507 xen_build_id_t *build_id = (xen_build_id_t *)arg;
508 sz = sizeof(*build_id) + build_id->len;
509 HYPERCALL_BOUNCE_SET_DIR(arg, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
510 break;
511 }
512 default:
513 ERROR("xc_version: unknown command %d\n", cmd);
514 return -EINVAL;
515 }
516
517 HYPERCALL_BOUNCE_SET_SIZE(arg, sz);
518
519 if ( (sz != 0) && xc_hypercall_bounce_pre(xch, arg) )
520 {
521 PERROR("Could not bounce buffer for version hypercall");
522 return -ENOMEM;
523 }
524
525 rc = do_xen_version(xch, cmd, HYPERCALL_BUFFER(arg));
526
527 if ( sz != 0 )
528 xc_hypercall_bounce_post(xch, arg);
529
530 return rc;
531 }
532
xc_make_page_below_4G(xc_interface * xch,uint32_t domid,unsigned long mfn)533 unsigned long xc_make_page_below_4G(
534 xc_interface *xch, uint32_t domid, unsigned long mfn)
535 {
536 xen_pfn_t old_mfn = mfn;
537 xen_pfn_t new_mfn;
538
539 if ( xc_domain_decrease_reservation_exact(
540 xch, domid, 1, 0, &old_mfn) != 0 )
541 {
542 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
543 return 0;
544 }
545
546 if ( xc_domain_increase_reservation_exact(
547 xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
548 {
549 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
550 return 0;
551 }
552
553 return new_mfn;
554 }
555
556 static void
_xc_clean_errbuf(void * m)557 _xc_clean_errbuf(void * m)
558 {
559 free(m);
560 pthread_setspecific(errbuf_pkey, NULL);
561 }
562
563 static void
_xc_init_errbuf(void)564 _xc_init_errbuf(void)
565 {
566 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
567 }
568
xc_strerror(xc_interface * xch,int errcode)569 const char *xc_strerror(xc_interface *xch, int errcode)
570 {
571 if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
572 {
573 return strerror(errcode);
574 }
575 else
576 {
577 #define XS_BUFSIZE 32
578 char *errbuf;
579 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
580 char *strerror_str;
581
582 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
583
584 errbuf = pthread_getspecific(errbuf_pkey);
585 if (errbuf == NULL) {
586 errbuf = malloc(XS_BUFSIZE);
587 if ( errbuf == NULL )
588 return "(failed to allocate errbuf)";
589 pthread_setspecific(errbuf_pkey, errbuf);
590 }
591
592 /*
593 * Thread-unsafe strerror() is protected by a local mutex. We copy the
594 * string to a thread-private buffer before releasing the mutex.
595 */
596 pthread_mutex_lock(&mutex);
597 strerror_str = strerror(errcode);
598 strncpy(errbuf, strerror_str, XS_BUFSIZE);
599 errbuf[XS_BUFSIZE-1] = '\0';
600 pthread_mutex_unlock(&mutex);
601
602 return errbuf;
603 }
604 }
605
bitmap_64_to_byte(uint8_t * bp,const uint64_t * lp,int nbits)606 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
607 {
608 uint64_t l;
609 int i, j, b;
610
611 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
612 l = lp[i];
613 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
614 bp[b+j] = l;
615 l >>= 8;
616 nbits -= 8;
617 }
618 }
619 }
620
bitmap_byte_to_64(uint64_t * lp,const uint8_t * bp,int nbits)621 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
622 {
623 uint64_t l;
624 int i, j, b;
625
626 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
627 l = 0;
628 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
629 l |= (uint64_t)bp[b+j] << (j*8);
630 nbits -= 8;
631 }
632 lp[i] = l;
633 }
634 }
635
read_exact(int fd,void * data,size_t size)636 int read_exact(int fd, void *data, size_t size)
637 {
638 size_t offset = 0;
639 ssize_t len;
640
641 while ( offset < size )
642 {
643 len = read(fd, (char *)data + offset, size - offset);
644 if ( (len == -1) && (errno == EINTR) )
645 continue;
646 if ( len == 0 )
647 errno = 0;
648 if ( len <= 0 )
649 return -1;
650 offset += len;
651 }
652
653 return 0;
654 }
655
write_exact(int fd,const void * data,size_t size)656 int write_exact(int fd, const void *data, size_t size)
657 {
658 size_t offset = 0;
659 ssize_t len;
660
661 while ( offset < size )
662 {
663 len = write(fd, (const char *)data + offset, size - offset);
664 if ( (len == -1) && (errno == EINTR) )
665 continue;
666 if ( len <= 0 )
667 return -1;
668 offset += len;
669 }
670
671 return 0;
672 }
673
674 #if defined(__MINIOS__)
675 /*
676 * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s.
677 */
writev_exact(int fd,const struct iovec * iov,int iovcnt)678 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
679 {
680 int rc, i;
681
682 for ( i = 0; i < iovcnt; ++i )
683 {
684 rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len);
685 if ( rc )
686 return rc;
687 }
688
689 return 0;
690 }
691 #else
writev_exact(int fd,const struct iovec * iov,int iovcnt)692 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
693 {
694 struct iovec *local_iov = NULL;
695 int rc = 0, iov_idx = 0, saved_errno = 0;
696 ssize_t len;
697
698 while ( iov_idx < iovcnt )
699 {
700 /*
701 * Skip over iov[] entries with 0 length.
702 *
703 * This is needed to cover the case where we took a partial write and
704 * all remaining vectors are of 0 length. In such a case, the results
705 * from writev() are indistinguishable from EOF.
706 */
707 while ( iov[iov_idx].iov_len == 0 )
708 if ( ++iov_idx == iovcnt )
709 goto out;
710
711 len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX));
712 saved_errno = errno;
713
714 if ( (len == -1) && (errno == EINTR) )
715 continue;
716 if ( len <= 0 )
717 {
718 rc = -1;
719 goto out;
720 }
721
722 /* Check iov[] to see whether we had a partial or complete write. */
723 while ( (len > 0) && (iov_idx < iovcnt) )
724 {
725 if ( len >= iov[iov_idx].iov_len )
726 len -= iov[iov_idx++].iov_len;
727 else
728 {
729 /* Partial write of iov[iov_idx]. Copy iov so we can adjust
730 * element iov_idx and resubmit the rest. */
731 if ( !local_iov )
732 {
733 local_iov = malloc(iovcnt * sizeof(*iov));
734 if ( !local_iov )
735 {
736 saved_errno = ENOMEM;
737 goto out;
738 }
739
740 iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov));
741 }
742
743 local_iov[iov_idx].iov_base += len;
744 local_iov[iov_idx].iov_len -= len;
745 break;
746 }
747 }
748 }
749
750 saved_errno = 0;
751
752 out:
753 free(local_iov);
754 errno = saved_errno;
755 return rc;
756 }
757 #endif
758
xc_ffs8(uint8_t x)759 int xc_ffs8(uint8_t x)
760 {
761 int i;
762 for ( i = 0; i < 8; i++ )
763 if ( x & (1u << i) )
764 return i+1;
765 return 0;
766 }
767
xc_ffs16(uint16_t x)768 int xc_ffs16(uint16_t x)
769 {
770 uint8_t h = x>>8, l = x;
771 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
772 }
773
xc_ffs32(uint32_t x)774 int xc_ffs32(uint32_t x)
775 {
776 uint16_t h = x>>16, l = x;
777 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
778 }
779
xc_ffs64(uint64_t x)780 int xc_ffs64(uint64_t x)
781 {
782 uint32_t h = x>>32, l = x;
783 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
784 }
785
786 /*
787 * Local variables:
788 * mode: C
789 * c-file-style: "BSD"
790 * c-basic-offset: 4
791 * tab-width: 4
792 * indent-tabs-mode: nil
793 * End:
794 */
795