1 /******************************************************************************
2 * xc_private.c
3 *
4 * Helper functions for the rest of the library.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation;
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "xc_private.h"
21 #include <stdarg.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <pthread.h>
25 #include <assert.h>
26
xc_interface_open(xentoollog_logger * logger,xentoollog_logger * dombuild_logger,unsigned open_flags)27 struct xc_interface_core *xc_interface_open(xentoollog_logger *logger,
28 xentoollog_logger *dombuild_logger,
29 unsigned open_flags)
30 {
31 struct xc_interface_core xch_buf = { 0 }, *xch = &xch_buf;
32
33 xch->flags = open_flags;
34 xch->dombuild_logger_file = 0;
35 xc_clear_last_error(xch);
36
37 xch->error_handler = logger; xch->error_handler_tofree = 0;
38 xch->dombuild_logger = dombuild_logger; xch->dombuild_logger_tofree = 0;
39
40 if (!xch->error_handler) {
41 xch->error_handler = xch->error_handler_tofree =
42 (xentoollog_logger*)
43 xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
44 if (!xch->error_handler)
45 goto err;
46 }
47
48 xch = malloc(sizeof(*xch));
49 if (!xch) {
50 xch = &xch_buf;
51 PERROR("Could not allocate new xc_interface struct");
52 goto err;
53 }
54 *xch = xch_buf;
55
56 if (open_flags & XC_OPENFLAG_DUMMY)
57 return xch; /* We are done */
58
59 xch->xcall = xencall_open(xch->error_handler,
60 open_flags & XC_OPENFLAG_NON_REENTRANT ? XENCALL_OPENFLAG_NON_REENTRANT : 0U);
61 if ( xch->xcall == NULL )
62 goto err;
63
64 xch->fmem = xenforeignmemory_open(xch->error_handler, 0);
65 if ( xch->fmem == NULL )
66 goto err;
67
68 xch->dmod = xendevicemodel_open(xch->error_handler, 0);
69 if ( xch->dmod == NULL )
70 goto err;
71
72 return xch;
73
74 err:
75 xenforeignmemory_close(xch->fmem);
76 xencall_close(xch->xcall);
77 xtl_logger_destroy(xch->error_handler_tofree);
78 if (xch != &xch_buf) free(xch);
79 return NULL;
80 }
81
xc_interface_close(xc_interface * xch)82 int xc_interface_close(xc_interface *xch)
83 {
84 int rc = 0;
85
86 if (!xch)
87 return 0;
88
89 rc = xencall_close(xch->xcall);
90 if (rc) PERROR("Could not close xencall interface");
91
92 rc = xenforeignmemory_close(xch->fmem);
93 if (rc) PERROR("Could not close foreign memory interface");
94
95 rc = xendevicemodel_close(xch->dmod);
96 if (rc) PERROR("Could not close device model interface");
97
98 xtl_logger_destroy(xch->dombuild_logger_tofree);
99 xtl_logger_destroy(xch->error_handler_tofree);
100
101 free(xch);
102 return rc;
103 }
104
xc_interface_xcall_handle(xc_interface * xch)105 xencall_handle *xc_interface_xcall_handle(xc_interface *xch)
106 {
107 return xch->xcall;
108 }
109
xc_interface_fmem_handle(xc_interface * xch)110 struct xenforeignmemory_handle *xc_interface_fmem_handle(xc_interface *xch)
111 {
112 return xch->fmem;
113 }
114
xc_interface_dmod_handle(xc_interface * xch)115 struct xendevicemodel_handle *xc_interface_dmod_handle(xc_interface *xch)
116 {
117 return xch->dmod;
118 }
119
120 static pthread_key_t errbuf_pkey;
121 static pthread_once_t errbuf_pkey_once = PTHREAD_ONCE_INIT;
122
xc_get_last_error(xc_interface * xch)123 const xc_error *xc_get_last_error(xc_interface *xch)
124 {
125 return &xch->last_error;
126 }
127
xc_clear_last_error(xc_interface * xch)128 void xc_clear_last_error(xc_interface *xch)
129 {
130 xch->last_error.code = XC_ERROR_NONE;
131 xch->last_error.message[0] = '\0';
132 }
133
xc_error_code_to_desc(int code)134 const char *xc_error_code_to_desc(int code)
135 {
136 /* Sync to members of xc_error_code enumeration in xenctrl.h */
137 switch ( code )
138 {
139 case XC_ERROR_NONE:
140 return "No error details";
141 case XC_INTERNAL_ERROR:
142 return "Internal error";
143 case XC_INVALID_KERNEL:
144 return "Invalid kernel";
145 case XC_INVALID_PARAM:
146 return "Invalid configuration";
147 case XC_OUT_OF_MEMORY:
148 return "Out of memory";
149 }
150
151 return "Unknown error code";
152 }
153
xc_reportv(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,va_list args)154 void xc_reportv(xc_interface *xch, xentoollog_logger *lg,
155 xentoollog_level level, int code,
156 const char *fmt, va_list args) {
157 int saved_errno = errno;
158 char msgbuf[XC_MAX_ERROR_MSG_LEN];
159 char *msg;
160
161 /* Strip newlines from messages.
162 * XXX really the messages themselves should have the newlines removed.
163 */
164 char fmt_nonewline[512];
165 int fmt_l;
166
167 fmt_l = strlen(fmt);
168 if (fmt_l && fmt[fmt_l-1]=='\n' && fmt_l < sizeof(fmt_nonewline)) {
169 memcpy(fmt_nonewline, fmt, fmt_l-1);
170 fmt_nonewline[fmt_l-1] = 0;
171 fmt = fmt_nonewline;
172 }
173
174 if ( level >= XTL_ERROR ) {
175 msg = xch->last_error.message;
176 xch->last_error.code = code;
177 } else {
178 msg = msgbuf;
179 }
180 vsnprintf(msg, XC_MAX_ERROR_MSG_LEN-1, fmt, args);
181 msg[XC_MAX_ERROR_MSG_LEN-1] = '\0';
182
183 xtl_log(lg, level, -1, "xc",
184 "%s" "%s%s", msg,
185 code?": ":"", code ? xc_error_code_to_desc(code) : "");
186
187 errno = saved_errno;
188 }
189
xc_report(xc_interface * xch,xentoollog_logger * lg,xentoollog_level level,int code,const char * fmt,...)190 void xc_report(xc_interface *xch, xentoollog_logger *lg,
191 xentoollog_level level, int code, const char *fmt, ...) {
192 va_list args;
193 va_start(args,fmt);
194 xc_reportv(xch,lg,level,code,fmt,args);
195 va_end(args);
196 }
197
xc_report_error(xc_interface * xch,int code,const char * fmt,...)198 void xc_report_error(xc_interface *xch, int code, const char *fmt, ...)
199 {
200 va_list args;
201 va_start(args, fmt);
202 xc_reportv(xch, xch->error_handler, XTL_ERROR, code, fmt, args);
203 va_end(args);
204 }
205
xc_set_progress_prefix(xc_interface * xch,const char * doing)206 const char *xc_set_progress_prefix(xc_interface *xch, const char *doing)
207 {
208 const char *old = xch->currently_progress_reporting;
209
210 xch->currently_progress_reporting = doing;
211 return old;
212 }
213
xc_report_progress_single(xc_interface * xch,const char * doing)214 void xc_report_progress_single(xc_interface *xch, const char *doing)
215 {
216 assert(doing);
217 xtl_progress(xch->error_handler, "xc", doing, 0, 0);
218 }
219
xc_report_progress_step(xc_interface * xch,unsigned long done,unsigned long total)220 void xc_report_progress_step(xc_interface *xch,
221 unsigned long done, unsigned long total)
222 {
223 assert(xch->currently_progress_reporting);
224 xtl_progress(xch->error_handler, "xc",
225 xch->currently_progress_reporting, done, total);
226 }
227
xc_get_pfn_type_batch(xc_interface * xch,uint32_t dom,unsigned int num,xen_pfn_t * arr)228 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
229 unsigned int num, xen_pfn_t *arr)
230 {
231 int rc;
232 struct xen_domctl domctl = {};
233 DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
234 if ( xc_hypercall_bounce_pre(xch, arr) )
235 return -1;
236 domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
237 domctl.domain = dom;
238 domctl.u.getpageframeinfo3.num = num;
239 set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
240 rc = do_domctl_retry_efault(xch, &domctl);
241 xc_hypercall_bounce_post(xch, arr);
242 return rc;
243 }
244
xc_mmuext_op(xc_interface * xch,struct mmuext_op * op,unsigned int nr_ops,uint32_t dom)245 int xc_mmuext_op(
246 xc_interface *xch,
247 struct mmuext_op *op,
248 unsigned int nr_ops,
249 uint32_t dom)
250 {
251 DECLARE_HYPERCALL_BOUNCE(op, nr_ops*sizeof(*op), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
252 long ret = -1;
253
254 if ( xc_hypercall_bounce_pre(xch, op) )
255 {
256 PERROR("Could not bounce memory for mmuext op hypercall");
257 goto out1;
258 }
259
260 ret = xencall4(xch->xcall, __HYPERVISOR_mmuext_op,
261 HYPERCALL_BUFFER_AS_ARG(op),
262 nr_ops, 0, dom);
263
264 xc_hypercall_bounce_post(xch, op);
265
266 out1:
267 return ret;
268 }
269
flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)270 static int flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
271 {
272 int rc, err = 0;
273 DECLARE_NAMED_HYPERCALL_BOUNCE(updates, mmu->updates, mmu->idx*sizeof(*mmu->updates), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
274
275 if ( mmu->idx == 0 )
276 return 0;
277
278 if ( xc_hypercall_bounce_pre(xch, updates) )
279 {
280 PERROR("flush_mmu_updates: bounce buffer failed");
281 err = 1;
282 goto out;
283 }
284
285 rc = xencall4(xch->xcall, __HYPERVISOR_mmu_update,
286 HYPERCALL_BUFFER_AS_ARG(updates),
287 mmu->idx, 0, mmu->subject);
288 if ( rc < 0 )
289 {
290 ERROR("Failure when submitting mmu updates");
291 err = 1;
292 }
293
294 mmu->idx = 0;
295
296 xc_hypercall_bounce_post(xch, updates);
297
298 out:
299 return err;
300 }
301
xc_alloc_mmu_updates(xc_interface * xch,unsigned int subject)302 struct xc_mmu *xc_alloc_mmu_updates(xc_interface *xch, unsigned int subject)
303 {
304 struct xc_mmu *mmu = malloc(sizeof(*mmu));
305 if ( mmu == NULL )
306 return mmu;
307 mmu->idx = 0;
308 mmu->subject = subject;
309 return mmu;
310 }
311
xc_add_mmu_update(xc_interface * xch,struct xc_mmu * mmu,unsigned long long ptr,unsigned long long val)312 int xc_add_mmu_update(xc_interface *xch, struct xc_mmu *mmu,
313 unsigned long long ptr, unsigned long long val)
314 {
315 mmu->updates[mmu->idx].ptr = ptr;
316 mmu->updates[mmu->idx].val = val;
317
318 if ( ++mmu->idx == MAX_MMU_UPDATES )
319 return flush_mmu_updates(xch, mmu);
320
321 return 0;
322 }
323
xc_flush_mmu_updates(xc_interface * xch,struct xc_mmu * mmu)324 int xc_flush_mmu_updates(xc_interface *xch, struct xc_mmu *mmu)
325 {
326 return flush_mmu_updates(xch, mmu);
327 }
328
xc_memory_op(xc_interface * xch,unsigned int cmd,void * arg,size_t len)329 long xc_memory_op(xc_interface *xch, unsigned int cmd, void *arg, size_t len)
330 {
331 DECLARE_HYPERCALL_BOUNCE(arg, len, XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
332 long ret = -1;
333
334 if ( xc_hypercall_bounce_pre(xch, arg) )
335 {
336 PERROR("Could not bounce memory for XENMEM hypercall");
337 goto out1;
338 }
339
340 #if defined(__linux__) || defined(__sun__)
341 /*
342 * Some sub-ops return values which don't fit in "int". On platforms
343 * without a specific hypercall return value field in the privcmd
344 * interface structure, issue the request as a single-element multicall,
345 * to be able to capture the full return value.
346 */
347 if ( sizeof(long) > sizeof(int) )
348 {
349 multicall_entry_t multicall = {
350 .op = __HYPERVISOR_memory_op,
351 .args[0] = cmd,
352 .args[1] = HYPERCALL_BUFFER_AS_ARG(arg),
353 }, *call = &multicall;
354 DECLARE_HYPERCALL_BOUNCE(call, sizeof(*call),
355 XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
356
357 if ( xc_hypercall_bounce_pre(xch, call) )
358 {
359 PERROR("Could not bounce buffer for memory_op hypercall");
360 goto out1;
361 }
362
363 ret = do_multicall_op(xch, HYPERCALL_BUFFER(call), 1);
364
365 xc_hypercall_bounce_post(xch, call);
366
367 if ( !ret )
368 {
369 ret = multicall.result;
370 if ( multicall.result > ~0xfffUL )
371 {
372 errno = -ret;
373 ret = -1;
374 }
375 }
376 }
377 else
378 #endif
379 ret = xencall2L(xch->xcall, __HYPERVISOR_memory_op,
380 cmd, HYPERCALL_BUFFER_AS_ARG(arg));
381
382 xc_hypercall_bounce_post(xch, arg);
383 out1:
384 return ret;
385 }
386
xc_maximum_ram_page(xc_interface * xch,unsigned long * max_mfn)387 int xc_maximum_ram_page(xc_interface *xch, unsigned long *max_mfn)
388 {
389 long rc = xc_memory_op(xch, XENMEM_maximum_ram_page, NULL, 0);
390
391 if ( rc >= 0 )
392 {
393 *max_mfn = rc;
394 rc = 0;
395 }
396 return rc;
397 }
398
xc_domain_get_cpu_usage(xc_interface * xch,uint32_t domid,int vcpu)399 long long xc_domain_get_cpu_usage(xc_interface *xch, uint32_t domid, int vcpu)
400 {
401 struct xen_domctl domctl = {};
402
403 domctl.cmd = XEN_DOMCTL_getvcpuinfo;
404 domctl.domain = domid;
405 domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu;
406 if ( (do_domctl(xch, &domctl) < 0) )
407 {
408 PERROR("Could not get info on domain");
409 return -1;
410 }
411 return domctl.u.getvcpuinfo.cpu_time;
412 }
413
xc_machphys_mfn_list(xc_interface * xch,unsigned long max_extents,xen_pfn_t * extent_start)414 int xc_machphys_mfn_list(xc_interface *xch,
415 unsigned long max_extents,
416 xen_pfn_t *extent_start)
417 {
418 int rc;
419 DECLARE_HYPERCALL_BOUNCE(extent_start, max_extents * sizeof(xen_pfn_t), XC_HYPERCALL_BUFFER_BOUNCE_OUT);
420 struct xen_machphys_mfn_list xmml = {
421 .max_extents = max_extents,
422 };
423
424 if ( xc_hypercall_bounce_pre(xch, extent_start) )
425 {
426 PERROR("Could not bounce memory for XENMEM_machphys_mfn_list hypercall");
427 return -1;
428 }
429
430 set_xen_guest_handle(xmml.extent_start, extent_start);
431 rc = xc_memory_op(xch, XENMEM_machphys_mfn_list, &xmml, sizeof(xmml));
432 if (rc || xmml.nr_extents != max_extents)
433 rc = -1;
434 else
435 rc = 0;
436
437 xc_hypercall_bounce_post(xch, extent_start);
438
439 return rc;
440 }
441
xc_get_tot_pages(xc_interface * xch,uint32_t domid)442 long xc_get_tot_pages(xc_interface *xch, uint32_t domid)
443 {
444 xc_domaininfo_t info;
445
446 if ( xc_domain_getinfo_single(xch, domid, &info) < 0 )
447 return -1;
448
449 return info.tot_pages;
450 }
451
xc_copy_to_domain_page(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,const char * src_page)452 int xc_copy_to_domain_page(xc_interface *xch,
453 uint32_t domid,
454 unsigned long dst_pfn,
455 const char *src_page)
456 {
457 void *vaddr = xc_map_foreign_range(
458 xch, domid, PAGE_SIZE, PROT_WRITE, dst_pfn);
459 if ( vaddr == NULL )
460 return -1;
461 memcpy(vaddr, src_page, PAGE_SIZE);
462 munmap(vaddr, PAGE_SIZE);
463 xc_domain_cacheflush(xch, domid, dst_pfn, 1);
464 return 0;
465 }
466
xc_clear_domain_pages(xc_interface * xch,uint32_t domid,unsigned long dst_pfn,int num)467 int xc_clear_domain_pages(xc_interface *xch,
468 uint32_t domid,
469 unsigned long dst_pfn,
470 int num)
471 {
472 size_t size = num * PAGE_SIZE;
473 void *vaddr = xc_map_foreign_range(
474 xch, domid, size, PROT_WRITE, dst_pfn);
475 if ( vaddr == NULL )
476 return -1;
477 memset(vaddr, 0, size);
478 munmap(vaddr, size);
479 xc_domain_cacheflush(xch, domid, dst_pfn, num);
480 return 0;
481 }
482
xc_domctl(xc_interface * xch,struct xen_domctl * domctl)483 int xc_domctl(xc_interface *xch, struct xen_domctl *domctl)
484 {
485 return do_domctl(xch, domctl);
486 }
487
xc_sysctl(xc_interface * xch,struct xen_sysctl * sysctl)488 int xc_sysctl(xc_interface *xch, struct xen_sysctl *sysctl)
489 {
490 return do_sysctl(xch, sysctl);
491 }
492
xc_make_page_below_4G(xc_interface * xch,uint32_t domid,unsigned long mfn)493 unsigned long xc_make_page_below_4G(
494 xc_interface *xch, uint32_t domid, unsigned long mfn)
495 {
496 xen_pfn_t old_mfn = mfn;
497 xen_pfn_t new_mfn;
498
499 if ( xc_domain_decrease_reservation_exact(
500 xch, domid, 1, 0, &old_mfn) != 0 )
501 {
502 DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
503 return 0;
504 }
505
506 if ( xc_domain_increase_reservation_exact(
507 xch, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 )
508 {
509 DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
510 return 0;
511 }
512
513 return new_mfn;
514 }
515
516 static void
_xc_clean_errbuf(void * m)517 _xc_clean_errbuf(void * m)
518 {
519 free(m);
520 pthread_setspecific(errbuf_pkey, NULL);
521 }
522
523 static void
_xc_init_errbuf(void)524 _xc_init_errbuf(void)
525 {
526 pthread_key_create(&errbuf_pkey, _xc_clean_errbuf);
527 }
528
xc_strerror(xc_interface * xch,int errcode)529 const char *xc_strerror(xc_interface *xch, int errcode)
530 {
531 if ( xch->flags & XC_OPENFLAG_NON_REENTRANT )
532 {
533 return strerror(errcode);
534 }
535 else
536 {
537 #define XS_BUFSIZE 32
538 char *errbuf;
539 static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
540 char *strerror_str;
541
542 pthread_once(&errbuf_pkey_once, _xc_init_errbuf);
543
544 errbuf = pthread_getspecific(errbuf_pkey);
545 if (errbuf == NULL) {
546 errbuf = malloc(XS_BUFSIZE);
547 if ( errbuf == NULL )
548 return "(failed to allocate errbuf)";
549 pthread_setspecific(errbuf_pkey, errbuf);
550 }
551
552 /*
553 * Thread-unsafe strerror() is protected by a local mutex. We copy the
554 * string to a thread-private buffer before releasing the mutex.
555 */
556 pthread_mutex_lock(&mutex);
557 strerror_str = strerror(errcode);
558 strncpy(errbuf, strerror_str, XS_BUFSIZE);
559 errbuf[XS_BUFSIZE-1] = '\0';
560 pthread_mutex_unlock(&mutex);
561
562 return errbuf;
563 }
564 }
565
bitmap_64_to_byte(uint8_t * bp,const uint64_t * lp,int nbits)566 void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits)
567 {
568 uint64_t l;
569 int i, j, b;
570
571 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
572 l = lp[i];
573 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
574 bp[b+j] = l;
575 l >>= 8;
576 nbits -= 8;
577 }
578 }
579 }
580
bitmap_byte_to_64(uint64_t * lp,const uint8_t * bp,int nbits)581 void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits)
582 {
583 uint64_t l;
584 int i, j, b;
585
586 for (i = 0, b = 0; nbits > 0; i++, b += sizeof(l)) {
587 l = 0;
588 for (j = 0; (j < sizeof(l)) && (nbits > 0); j++) {
589 l |= (uint64_t)bp[b+j] << (j*8);
590 nbits -= 8;
591 }
592 lp[i] = l;
593 }
594 }
595
read_exact(int fd,void * data,size_t size)596 int read_exact(int fd, void *data, size_t size)
597 {
598 size_t offset = 0;
599 ssize_t len;
600
601 while ( offset < size )
602 {
603 len = read(fd, (char *)data + offset, size - offset);
604 if ( (len == -1) && (errno == EINTR) )
605 continue;
606 if ( len == 0 )
607 errno = 0;
608 if ( len <= 0 )
609 return -1;
610 offset += len;
611 }
612
613 return 0;
614 }
615
write_exact(int fd,const void * data,size_t size)616 int write_exact(int fd, const void *data, size_t size)
617 {
618 size_t offset = 0;
619 ssize_t len;
620
621 while ( offset < size )
622 {
623 len = write(fd, (const char *)data + offset, size - offset);
624 if ( (len == -1) && (errno == EINTR) )
625 continue;
626 if ( len <= 0 )
627 return -1;
628 offset += len;
629 }
630
631 return 0;
632 }
633
634 #if defined(__MINIOS__)
635 /*
636 * MiniOS's libc doesn't know about writev(). Implement it as multiple write()s.
637 */
writev_exact(int fd,const struct iovec * iov,int iovcnt)638 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
639 {
640 int rc, i;
641
642 for ( i = 0; i < iovcnt; ++i )
643 {
644 rc = write_exact(fd, iov[i].iov_base, iov[i].iov_len);
645 if ( rc )
646 return rc;
647 }
648
649 return 0;
650 }
651 #else
writev_exact(int fd,const struct iovec * iov,int iovcnt)652 int writev_exact(int fd, const struct iovec *iov, int iovcnt)
653 {
654 struct iovec *local_iov = NULL;
655 int rc = 0, iov_idx = 0, saved_errno = 0;
656 ssize_t len;
657
658 while ( iov_idx < iovcnt )
659 {
660 /*
661 * Skip over iov[] entries with 0 length.
662 *
663 * This is needed to cover the case where we took a partial write and
664 * all remaining vectors are of 0 length. In such a case, the results
665 * from writev() are indistinguishable from EOF.
666 */
667 while ( iov[iov_idx].iov_len == 0 )
668 if ( ++iov_idx == iovcnt )
669 goto out;
670
671 len = writev(fd, &iov[iov_idx], min(iovcnt - iov_idx, IOV_MAX));
672 saved_errno = errno;
673
674 if ( (len == -1) && (errno == EINTR) )
675 continue;
676 if ( len <= 0 )
677 {
678 rc = -1;
679 goto out;
680 }
681
682 /* Check iov[] to see whether we had a partial or complete write. */
683 while ( (len > 0) && (iov_idx < iovcnt) )
684 {
685 if ( len >= iov[iov_idx].iov_len )
686 len -= iov[iov_idx++].iov_len;
687 else
688 {
689 /* Partial write of iov[iov_idx]. Copy iov so we can adjust
690 * element iov_idx and resubmit the rest. */
691 if ( !local_iov )
692 {
693 local_iov = malloc(iovcnt * sizeof(*iov));
694 if ( !local_iov )
695 {
696 saved_errno = ENOMEM;
697 rc = -1;
698 goto out;
699 }
700
701 iov = memcpy(local_iov, iov, iovcnt * sizeof(*iov));
702 }
703
704 local_iov[iov_idx].iov_base += len;
705 local_iov[iov_idx].iov_len -= len;
706 break;
707 }
708 }
709 }
710
711 saved_errno = 0;
712
713 out:
714 free(local_iov);
715 errno = saved_errno;
716 return rc;
717 }
718 #endif
719
xc_ffs8(uint8_t x)720 int xc_ffs8(uint8_t x)
721 {
722 int i;
723 for ( i = 0; i < 8; i++ )
724 if ( x & (1u << i) )
725 return i+1;
726 return 0;
727 }
728
xc_ffs16(uint16_t x)729 int xc_ffs16(uint16_t x)
730 {
731 uint8_t h = x>>8, l = x;
732 return l ? xc_ffs8(l) : h ? xc_ffs8(h) + 8 : 0;
733 }
734
xc_ffs32(uint32_t x)735 int xc_ffs32(uint32_t x)
736 {
737 uint16_t h = x>>16, l = x;
738 return l ? xc_ffs16(l) : h ? xc_ffs16(h) + 16 : 0;
739 }
740
xc_ffs64(uint64_t x)741 int xc_ffs64(uint64_t x)
742 {
743 uint32_t h = x>>32, l = x;
744 return l ? xc_ffs32(l) : h ? xc_ffs32(h) + 32 : 0;
745 }
746
747 /*
748 * Local variables:
749 * mode: C
750 * c-file-style: "BSD"
751 * c-basic-offset: 4
752 * tab-width: 4
753 * indent-tabs-mode: nil
754 * End:
755 */
756