1 /*
2 Xen Store Daemon interface providing simple tree-like database.
3 Copyright (C) 2005 Rusty Russell IBM Corporation
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #define _GNU_SOURCE
20
21 #include <sys/types.h>
22 #include <sys/stat.h>
23 #include <fcntl.h>
24 #include <sys/uio.h>
25 #include <sys/socket.h>
26 #include <sys/un.h>
27 #include <string.h>
28 #include <unistd.h>
29 #include <stdbool.h>
30 #include <stdlib.h>
31 #include <assert.h>
32 #include <stdio.h>
33 #include <signal.h>
34 #include <stdint.h>
35 #include <errno.h>
36 #include <xen-tools/common-macros.h>
37 #include <xen-tools/xenstore-common.h>
38 #include "xenstore.h"
39
40 #include <xentoolcore_internal.h>
41 #include <xen_list.h>
42
43 #ifndef O_CLOEXEC
44 #define O_CLOEXEC 0
45 #endif
46
47 #ifndef SOCK_CLOEXEC
48 #define SOCK_CLOEXEC 0
49 #endif
50
51 struct xs_stored_msg {
52 XEN_TAILQ_ENTRY(struct xs_stored_msg) list;
53 struct xsd_sockmsg hdr;
54 char *body;
55 };
56
57 #ifdef USE_PTHREAD
58
59 #include <pthread.h>
60
61 #ifdef USE_DLSYM
62 #include <dlfcn.h>
63 #endif
64
65 struct xs_handle {
66 /* Communications channel to xenstore daemon. */
67 int fd;
68 Xentoolcore__Active_Handle tc_ah; /* for restrict */
69
70 /*
71 * A read thread which pulls messages off the comms channel and
72 * signals waiters.
73 */
74 pthread_t read_thr;
75 int read_thr_exists;
76
77 /*
78 * A list of fired watch messages, protected by a mutex. Users can
79 * wait on the conditional variable until a watch is pending.
80 */
81 XEN_TAILQ_HEAD(, struct xs_stored_msg) watch_list;
82 pthread_mutex_t watch_mutex;
83 pthread_cond_t watch_condvar;
84
85 /* Clients can select() on this pipe to wait for a watch to fire. */
86 int watch_pipe[2];
87 /* Filtering watch event in unwatch function? */
88 bool unwatch_filter;
89
90 /*
91 * A list of replies. Currently only one will ever be outstanding
92 * because we serialise requests. The requester can wait on the
93 * conditional variable for its response.
94 */
95 XEN_TAILQ_HEAD(, struct xs_stored_msg) reply_list;
96 pthread_mutex_t reply_mutex;
97 pthread_cond_t reply_condvar;
98
99 /* One request at a time. */
100 pthread_mutex_t request_mutex;
101
102 /* Lock discipline:
103 * Only holder of the request lock may write to h->fd.
104 * Only holder of the request lock may access read_thr_exists.
105 * If read_thr_exists==0, only holder of request lock may read h->fd;
106 * If read_thr_exists==1, only the read thread may read h->fd.
107 * Only holder of the reply lock may access reply_list.
108 * Only holder of the watch lock may access watch_list.
109 * Lock hierarchy:
110 * The order in which to acquire locks is
111 * request_mutex
112 * reply_mutex
113 * watch_mutex
114 */
115 };
116
117 #define mutex_lock(m) pthread_mutex_lock(m)
118 #define mutex_unlock(m) pthread_mutex_unlock(m)
119 #define condvar_signal(c) pthread_cond_signal(c)
120 #define condvar_wait(c,m) pthread_cond_wait(c,m)
121 #define cleanup_push(f, a) \
122 pthread_cleanup_push((void (*)(void *))(f), (void *)(a))
123 /*
124 * Some definitions of pthread_cleanup_pop() are a macro starting with an
125 * end-brace. GCC then complains if we immediately precede that with a label.
126 * Hence we insert a dummy statement to appease the compiler in this situation.
127 */
128 #define cleanup_pop(run) ((void)0); pthread_cleanup_pop(run)
129
130 #define read_thread_exists(h) (h->read_thr_exists)
131
132 /* Because pthread_cleanup_p* are not available when USE_PTHREAD is
133 * disabled, use these macros which convert appropriately. */
134 #define cleanup_push_heap(p) cleanup_push(free, p)
135 #define cleanup_pop_heap(run, p) cleanup_pop((run))
136
137 static void *read_thread(void *arg);
138
139 #else /* !defined(USE_PTHREAD) */
140
141 struct xs_handle {
142 int fd;
143 Xentoolcore__Active_Handle tc_ah; /* for restrict */
144 XEN_TAILQ_HEAD(, struct xs_stored_msg) reply_list;
145 XEN_TAILQ_HEAD(, struct xs_stored_msg) watch_list;
146 /* Clients can select() on this pipe to wait for a watch to fire. */
147 int watch_pipe[2];
148 /* Filtering watch event in unwatch function? */
149 bool unwatch_filter;
150 };
151
152 #define mutex_lock(m) ((void)0)
153 #define mutex_unlock(m) ((void)0)
154 #define condvar_signal(c) ((void)0)
155 #define condvar_wait(c,m) ((void)0)
156 #define cleanup_push(f, a) ((void)0)
157 #define cleanup_pop(run) ((void)0)
158 #define read_thread_exists(h) (0)
159
160 #define cleanup_push_heap(p) ((void)0)
161 #define cleanup_pop_heap(run, p) do { if ((run)) free(p); } while(0)
162
163 #endif
164
165 static int read_message(struct xs_handle *h, int nonblocking);
166
setnonblock(int fd,int nonblock)167 static bool setnonblock(int fd, int nonblock) {
168 int flags = fcntl(fd, F_GETFL);
169 if (flags == -1)
170 return false;
171
172 if (nonblock)
173 flags |= O_NONBLOCK;
174 else
175 flags &= ~O_NONBLOCK;
176
177 if (fcntl(fd, F_SETFL, flags) == -1)
178 return false;
179
180 return true;
181 }
182
set_cloexec(int fd)183 static bool set_cloexec(int fd)
184 {
185 int flags = fcntl(fd, F_GETFD);
186
187 if (flags < 0)
188 return false;
189
190 return fcntl(fd, F_SETFD, flags | FD_CLOEXEC) >= 0;
191 }
192
pipe_cloexec(int fds[2])193 static int pipe_cloexec(int fds[2])
194 {
195 #if HAVE_PIPE2
196 return pipe2(fds, O_CLOEXEC);
197 #else
198 if (pipe(fds) < 0)
199 return -1;
200 /* Best effort to set CLOEXEC. Racy. */
201 set_cloexec(fds[0]);
202 set_cloexec(fds[1]);
203 return 0;
204 #endif
205 }
206
xs_fileno(struct xs_handle * h)207 int xs_fileno(struct xs_handle *h)
208 {
209 char c = 0;
210
211 mutex_lock(&h->watch_mutex);
212
213 if ((h->watch_pipe[0] == -1) && (pipe_cloexec(h->watch_pipe) != -1)) {
214 /* Kick things off if the watch list is already non-empty. */
215 if (!XEN_TAILQ_EMPTY(&h->watch_list))
216 while (write(h->watch_pipe[1], &c, 1) != 1)
217 continue;
218 }
219
220 mutex_unlock(&h->watch_mutex);
221
222 return h->watch_pipe[0];
223 }
224
get_socket(const char * connect_to)225 static int get_socket(const char *connect_to)
226 {
227 struct sockaddr_un addr;
228 int sock, saved_errno;
229
230 sock = socket(PF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
231 if (sock < 0)
232 return -1;
233
234 /* Compat for non-SOCK_CLOEXEC environments. Racy. */
235 if (!SOCK_CLOEXEC && !set_cloexec(sock))
236 goto error;
237
238 addr.sun_family = AF_UNIX;
239 if(strlen(connect_to) >= sizeof(addr.sun_path)) {
240 errno = EINVAL;
241 goto error;
242 }
243 strcpy(addr.sun_path, connect_to);
244
245 if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) != 0)
246 goto error;
247
248 return sock;
249
250 error:
251 saved_errno = errno;
252 close(sock);
253 errno = saved_errno;
254 return -1;
255 }
256
get_dev(const char * connect_to)257 static int get_dev(const char *connect_to)
258 {
259 int fd, saved_errno;
260
261 fd = open(connect_to, O_RDWR | O_CLOEXEC);
262 if (fd < 0)
263 return -1;
264
265 /* Compat for non-O_CLOEXEC environments. Racy. */
266 if (!O_CLOEXEC && !set_cloexec(fd))
267 goto error;
268
269 return fd;
270
271 error:
272 saved_errno = errno;
273 close(fd);
274 errno = saved_errno;
275
276 return -1;
277 }
278
all_restrict_cb(Xentoolcore__Active_Handle * ah,domid_t domid)279 static int all_restrict_cb(Xentoolcore__Active_Handle *ah, domid_t domid) {
280 struct xs_handle *h = CONTAINER_OF(ah, *h, tc_ah);
281 return xentoolcore__restrict_by_dup2_null(h->fd);
282 }
283
get_handle(const char * connect_to)284 static struct xs_handle *get_handle(const char *connect_to)
285 {
286 struct stat buf;
287 struct xs_handle *h = NULL;
288 int saved_errno;
289
290 h = malloc(sizeof(*h));
291 if (h == NULL)
292 goto err;
293
294 memset(h, 0, sizeof(*h));
295 h->fd = -1;
296
297 h->tc_ah.restrict_callback = all_restrict_cb;
298 xentoolcore__register_active_handle(&h->tc_ah);
299
300 if (stat(connect_to, &buf) != 0)
301 goto err;
302
303 if (S_ISSOCK(buf.st_mode))
304 h->fd = get_socket(connect_to);
305 else
306 h->fd = get_dev(connect_to);
307
308 if (h->fd == -1)
309 goto err;
310
311 XEN_TAILQ_INIT(&h->reply_list);
312 XEN_TAILQ_INIT(&h->watch_list);
313
314 /* Watch pipe is allocated on demand in xs_fileno(). */
315 h->watch_pipe[0] = h->watch_pipe[1] = -1;
316
317 h->unwatch_filter = false;
318
319 #ifdef USE_PTHREAD
320 pthread_mutex_init(&h->watch_mutex, NULL);
321 pthread_cond_init(&h->watch_condvar, NULL);
322
323 pthread_mutex_init(&h->reply_mutex, NULL);
324 pthread_cond_init(&h->reply_condvar, NULL);
325
326 pthread_mutex_init(&h->request_mutex, NULL);
327 #endif
328
329 return h;
330
331 err:
332 saved_errno = errno;
333
334 if (h) {
335 xentoolcore__deregister_active_handle(&h->tc_ah);
336 if (h->fd >= 0)
337 close(h->fd);
338 }
339 free(h);
340
341 errno = saved_errno;
342 return NULL;
343 }
344
xs_daemon_open(void)345 struct xs_handle *xs_daemon_open(void)
346 {
347 return xs_open(0);
348 }
349
xs_daemon_open_readonly(void)350 struct xs_handle *xs_daemon_open_readonly(void)
351 {
352 return xs_open(0);
353 }
354
xs_domain_open(void)355 struct xs_handle *xs_domain_open(void)
356 {
357 return xs_open(0);
358 }
359
xs_domain_dev(void)360 static const char *xs_domain_dev(void)
361 {
362 char *s = getenv("XENSTORED_PATH");
363 if (s)
364 return s;
365 #if defined(__RUMPUSER_XEN__) || defined(__RUMPRUN__)
366 return "/dev/xen/xenbus";
367 #elif defined(__linux__)
368 if (access("/dev/xen/xenbus", F_OK) == 0)
369 return "/dev/xen/xenbus";
370 return "/proc/xen/xenbus";
371 #elif defined(__NetBSD__)
372 return "/kern/xen/xenbus";
373 #elif defined(__FreeBSD__)
374 return "/dev/xen/xenstore";
375 #else
376 return "/dev/xen/xenbus";
377 #endif
378 }
379
xs_open(unsigned long flags)380 struct xs_handle *xs_open(unsigned long flags)
381 {
382 struct xs_handle *xsh = NULL;
383
384 xsh = get_handle(xs_daemon_socket());
385
386 if (!xsh)
387 xsh = get_handle(xs_domain_dev());
388
389 if (xsh && (flags & XS_UNWATCH_FILTER))
390 xsh->unwatch_filter = true;
391
392 return xsh;
393 }
394
close_free_msgs(struct xs_handle * h)395 static void close_free_msgs(struct xs_handle *h) {
396 struct xs_stored_msg *msg, *tmsg;
397
398 XEN_TAILQ_FOREACH_SAFE(msg, &h->reply_list, list, tmsg) {
399 free(msg->body);
400 free(msg);
401 }
402
403 XEN_TAILQ_FOREACH_SAFE(msg, &h->watch_list, list, tmsg) {
404 free(msg->body);
405 free(msg);
406 }
407 }
408
close_fds_free(struct xs_handle * h)409 static void close_fds_free(struct xs_handle *h) {
410 if (h->watch_pipe[0] != -1) {
411 close(h->watch_pipe[0]);
412 close(h->watch_pipe[1]);
413 }
414
415 xentoolcore__deregister_active_handle(&h->tc_ah);
416 close(h->fd);
417
418 free(h);
419 }
420
xs_daemon_destroy_postfork(struct xs_handle * h)421 void xs_daemon_destroy_postfork(struct xs_handle *h)
422 {
423 close_free_msgs(h);
424 close_fds_free(h);
425 }
426
xs_daemon_close(struct xs_handle * h)427 void xs_daemon_close(struct xs_handle *h)
428 {
429 #ifdef USE_PTHREAD
430 if (h->read_thr_exists) {
431 pthread_cancel(h->read_thr);
432 pthread_join(h->read_thr, NULL);
433 }
434 #endif
435
436 mutex_lock(&h->request_mutex);
437 mutex_lock(&h->reply_mutex);
438 mutex_lock(&h->watch_mutex);
439
440 close_free_msgs(h);
441
442 mutex_unlock(&h->request_mutex);
443 mutex_unlock(&h->reply_mutex);
444 mutex_unlock(&h->watch_mutex);
445
446 close_fds_free(h);
447 }
448
xs_close(struct xs_handle * xsh)449 void xs_close(struct xs_handle* xsh)
450 {
451 if (xsh)
452 xs_daemon_close(xsh);
453 }
454
read_all(int fd,void * data,unsigned int len,int nonblocking)455 static bool read_all(int fd, void *data, unsigned int len, int nonblocking)
456 /* With nonblocking, either reads either everything requested,
457 * or nothing. */
458 {
459 if (!len)
460 return true;
461
462 if (nonblocking && !setnonblock(fd, 1))
463 return false;
464
465 while (len) {
466 int done;
467
468 done = read(fd, data, len);
469 if (done < 0) {
470 if (errno == EINTR)
471 continue;
472 goto out_false;
473 }
474 if (done == 0) {
475 /* It closed fd on us? EBADF is appropriate. */
476 errno = EBADF;
477 goto out_false;
478 }
479 data += done;
480 len -= done;
481
482 if (nonblocking) {
483 nonblocking = 0;
484 if (!setnonblock(fd, 0))
485 goto out_false;
486 }
487 }
488
489 return true;
490
491 out_false:
492 if (nonblocking)
493 setnonblock(fd, 0);
494 return false;
495 }
496
497 #ifdef XSTEST
498 #define read_all read_all_choice
499 #define xs_write_all write_all_choice
500 #else
501 /* Simple routine for writing to sockets, etc. */
xs_write_all(int fd,const void * data,unsigned int len)502 bool xs_write_all(int fd, const void *data, unsigned int len)
503 {
504 while (len) {
505 int done;
506
507 done = write(fd, data, len);
508 if (done < 0 && errno == EINTR)
509 continue;
510 if (done <= 0)
511 return false;
512 data += done;
513 len -= done;
514 }
515
516 return true;
517 }
518 #endif
519
get_error(const char * errorstring)520 static int get_error(const char *errorstring)
521 {
522 unsigned int i;
523
524 for (i = 0; strcmp(errorstring, xsd_errors[i].errstring); i++)
525 if (i == ARRAY_SIZE(xsd_errors) - 1)
526 return EINVAL;
527 return xsd_errors[i].errnum;
528 }
529
530 /* Adds extra nul terminator, because we generally (always?) hold strings. */
read_reply(struct xs_handle * h,enum xsd_sockmsg_type * type,unsigned int * len)531 static void *read_reply(
532 struct xs_handle *h, enum xsd_sockmsg_type *type, unsigned int *len)
533 {
534 struct xs_stored_msg *msg;
535 char *body;
536 int read_from_thread;
537
538 read_from_thread = read_thread_exists(h);
539
540 /* Read from comms channel ourselves if there is no reader thread. */
541 if (!read_from_thread && (read_message(h, 0) == -1))
542 return NULL;
543
544 mutex_lock(&h->reply_mutex);
545 #ifdef USE_PTHREAD
546 while (XEN_TAILQ_EMPTY(&h->reply_list) && read_from_thread && h->fd != -1)
547 condvar_wait(&h->reply_condvar, &h->reply_mutex);
548 #endif
549 if (XEN_TAILQ_EMPTY(&h->reply_list)) {
550 mutex_unlock(&h->reply_mutex);
551 errno = EINVAL;
552 return NULL;
553 }
554 msg = XEN_TAILQ_FIRST(&h->reply_list);
555 XEN_TAILQ_REMOVE(&h->reply_list, msg, list);
556 assert(XEN_TAILQ_EMPTY(&h->reply_list));
557 mutex_unlock(&h->reply_mutex);
558
559 *type = msg->hdr.type;
560 if (len)
561 *len = msg->hdr.len;
562 body = msg->body;
563
564 free(msg);
565
566 return body;
567 }
568
569 /* Send message to xs, get malloc'ed reply. NULL and set errno on error. */
xs_talkv(struct xs_handle * h,xs_transaction_t t,enum xsd_sockmsg_type type,const struct iovec * iovec,unsigned int num_vecs,unsigned int * len)570 static void *xs_talkv(struct xs_handle *h, xs_transaction_t t,
571 enum xsd_sockmsg_type type,
572 const struct iovec *iovec,
573 unsigned int num_vecs,
574 unsigned int *len)
575 {
576 struct xsd_sockmsg msg;
577 void *ret = NULL;
578 int saved_errno;
579 unsigned int i;
580 struct sigaction ignorepipe, oldact;
581
582 msg.tx_id = t;
583 msg.req_id = 0;
584 msg.type = type;
585 msg.len = 0;
586 for (i = 0; i < num_vecs; i++)
587 msg.len += iovec[i].iov_len;
588
589 if (msg.len > XENSTORE_PAYLOAD_MAX) {
590 errno = E2BIG;
591 return 0;
592 }
593
594 ignorepipe.sa_handler = SIG_IGN;
595 sigemptyset(&ignorepipe.sa_mask);
596 ignorepipe.sa_flags = 0;
597 sigaction(SIGPIPE, &ignorepipe, &oldact);
598
599 mutex_lock(&h->request_mutex);
600
601 if (!xs_write_all(h->fd, &msg, sizeof(msg)))
602 goto fail;
603
604 for (i = 0; i < num_vecs; i++)
605 if (!xs_write_all(h->fd, iovec[i].iov_base, iovec[i].iov_len))
606 goto fail;
607
608 ret = read_reply(h, &msg.type, len);
609 if (!ret)
610 goto fail;
611
612 mutex_unlock(&h->request_mutex);
613
614 sigaction(SIGPIPE, &oldact, NULL);
615 if (msg.type == XS_ERROR) {
616 saved_errno = get_error(ret);
617 free(ret);
618 errno = saved_errno;
619 return NULL;
620 }
621
622 if (msg.type != type) {
623 free(ret);
624 saved_errno = EBADF;
625 goto close_fd;
626 }
627 return ret;
628
629 fail:
630 /* We're in a bad state, so close fd. */
631 saved_errno = errno;
632 mutex_unlock(&h->request_mutex);
633 sigaction(SIGPIPE, &oldact, NULL);
634 close_fd:
635 close(h->fd);
636 h->fd = -1;
637 errno = saved_errno;
638 return NULL;
639 }
640
641 /* free(), but don't change errno. */
free_no_errno(void * p)642 static void free_no_errno(void *p)
643 {
644 int saved_errno = errno;
645 free(p);
646 errno = saved_errno;
647 }
648
649 /* Simplified version of xs_talkv: single message. */
xs_single(struct xs_handle * h,xs_transaction_t t,enum xsd_sockmsg_type type,const char * string,unsigned int * len)650 static void *xs_single(struct xs_handle *h, xs_transaction_t t,
651 enum xsd_sockmsg_type type,
652 const char *string,
653 unsigned int *len)
654 {
655 struct iovec iovec;
656
657 iovec.iov_base = (void *)string;
658 iovec.iov_len = strlen(string) + 1;
659 return xs_talkv(h, t, type, &iovec, 1, len);
660 }
661
xs_bool(char * reply)662 static bool xs_bool(char *reply)
663 {
664 if (!reply)
665 return false;
666 free(reply);
667 return true;
668 }
669
xs_directory_common(char * strings,unsigned int len,unsigned int * num)670 static char **xs_directory_common(char *strings, unsigned int len,
671 unsigned int *num)
672 {
673 char *p, **ret;
674
675 /* Count the strings. */
676 *num = xenstore_count_strings(strings, len);
677
678 /* Transfer to one big alloc for easy freeing. */
679 ret = malloc(*num * sizeof(char *) + len);
680 if (!ret) {
681 free_no_errno(strings);
682 return NULL;
683 }
684 memcpy(&ret[*num], strings, len);
685 free_no_errno(strings);
686
687 strings = (char *)&ret[*num];
688 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
689 ret[(*num)++] = p;
690 return ret;
691 }
692
xs_directory_part(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)693 static char **xs_directory_part(struct xs_handle *h, xs_transaction_t t,
694 const char *path, unsigned int *num)
695 {
696 unsigned int off, result_len;
697 char gen[24], offstr[8];
698 struct iovec iovec[2];
699 char *result = NULL, *strings = NULL;
700
701 memset(gen, 0, sizeof(gen));
702 iovec[0].iov_base = (void *)path;
703 iovec[0].iov_len = strlen(path) + 1;
704
705 for (off = 0;;) {
706 snprintf(offstr, sizeof(offstr), "%u", off);
707 iovec[1].iov_base = (void *)offstr;
708 iovec[1].iov_len = strlen(offstr) + 1;
709 result = xs_talkv(h, t, XS_DIRECTORY_PART, iovec, 2,
710 &result_len);
711
712 /* If XS_DIRECTORY_PART isn't supported return E2BIG. */
713 if (!result) {
714 if (errno == ENOSYS)
715 errno = E2BIG;
716 return NULL;
717 }
718
719 if (off) {
720 if (strcmp(gen, result)) {
721 free(result);
722 free(strings);
723 strings = NULL;
724 off = 0;
725 continue;
726 }
727 } else
728 strncpy(gen, result, sizeof(gen) - 1);
729
730 result_len -= strlen(result) + 1;
731 strings = realloc(strings, off + result_len);
732 memcpy(strings + off, result + strlen(result) + 1, result_len);
733 free(result);
734 off += result_len;
735
736 if (off <= 1 || strings[off - 2] == 0)
737 break;
738 }
739
740 if (off > 1)
741 off--;
742
743 return xs_directory_common(strings, off, num);
744 }
745
xs_directory(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)746 char **xs_directory(struct xs_handle *h, xs_transaction_t t,
747 const char *path, unsigned int *num)
748 {
749 char *strings;
750 unsigned int len;
751
752 strings = xs_single(h, t, XS_DIRECTORY, path, &len);
753 if (!strings) {
754 if (errno != E2BIG)
755 return NULL;
756 return xs_directory_part(h, t, path, num);
757 }
758
759 return xs_directory_common(strings, len, num);
760 }
761
762 /* Get the value of a single file, nul terminated.
763 * Returns a malloced value: call free() on it after use.
764 * len indicates length in bytes, not including the nul.
765 * Returns NULL on failure.
766 */
xs_read(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * len)767 void *xs_read(struct xs_handle *h, xs_transaction_t t,
768 const char *path, unsigned int *len)
769 {
770 return xs_single(h, t, XS_READ, path, len);
771 }
772
773 /* Write the value of a single file.
774 * Returns false on failure.
775 */
xs_write(struct xs_handle * h,xs_transaction_t t,const char * path,const void * data,unsigned int len)776 bool xs_write(struct xs_handle *h, xs_transaction_t t,
777 const char *path, const void *data, unsigned int len)
778 {
779 struct iovec iovec[2];
780
781 iovec[0].iov_base = (void *)path;
782 iovec[0].iov_len = strlen(path) + 1;
783 iovec[1].iov_base = (void *)data;
784 iovec[1].iov_len = len;
785
786 return xs_bool(xs_talkv(h, t, XS_WRITE, iovec,
787 ARRAY_SIZE(iovec), NULL));
788 }
789
790 /* Create a new directory.
791 * Returns false on failure, or success if it already exists.
792 */
xs_mkdir(struct xs_handle * h,xs_transaction_t t,const char * path)793 bool xs_mkdir(struct xs_handle *h, xs_transaction_t t,
794 const char *path)
795 {
796 return xs_bool(xs_single(h, t, XS_MKDIR, path, NULL));
797 }
798
799 /* Destroy a file or directory (directories must be empty).
800 * Returns false on failure, or success if it doesn't exist.
801 */
xs_rm(struct xs_handle * h,xs_transaction_t t,const char * path)802 bool xs_rm(struct xs_handle *h, xs_transaction_t t,
803 const char *path)
804 {
805 return xs_bool(xs_single(h, t, XS_RM, path, NULL));
806 }
807
808 /* Get permissions of node (first element is owner).
809 * Returns malloced array, or NULL: call free() after use.
810 */
xs_get_permissions(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)811 struct xs_permissions *xs_get_permissions(struct xs_handle *h,
812 xs_transaction_t t,
813 const char *path, unsigned int *num)
814 {
815 char *strings;
816 unsigned int len;
817 struct xs_permissions *ret;
818
819 strings = xs_single(h, t, XS_GET_PERMS, path, &len);
820 if (!strings)
821 return NULL;
822
823 /* Count the strings: each one perms then domid. */
824 *num = xenstore_count_strings(strings, len);
825
826 /* Transfer to one big alloc for easy freeing. */
827 ret = malloc(*num * sizeof(struct xs_permissions));
828 if (!ret) {
829 free_no_errno(strings);
830 return NULL;
831 }
832
833 if (!xenstore_strings_to_perms(ret, *num, strings)) {
834 free_no_errno(ret);
835 ret = NULL;
836 }
837
838 free(strings);
839 return ret;
840 }
841
842 /* Set permissions of node (must be owner).
843 * Returns false on failure.
844 */
xs_set_permissions(struct xs_handle * h,xs_transaction_t t,const char * path,struct xs_permissions * perms,unsigned int num_perms)845 bool xs_set_permissions(struct xs_handle *h,
846 xs_transaction_t t,
847 const char *path,
848 struct xs_permissions *perms,
849 unsigned int num_perms)
850 {
851 unsigned int i;
852 struct iovec iov[1+num_perms];
853
854 iov[0].iov_base = (void *)path;
855 iov[0].iov_len = strlen(path) + 1;
856
857 for (i = 0; i < num_perms; i++) {
858 char buffer[MAX_STRLEN(unsigned int)+1];
859
860 if (!xenstore_perm_to_string(&perms[i], buffer, sizeof(buffer)))
861 goto unwind;
862
863 iov[i+1].iov_base = strdup(buffer);
864 iov[i+1].iov_len = strlen(buffer) + 1;
865 if (!iov[i+1].iov_base)
866 goto unwind;
867 }
868
869 if (!xs_bool(xs_talkv(h, t, XS_SET_PERMS, iov, 1+num_perms, NULL)))
870 goto unwind;
871 for (i = 0; i < num_perms; i++)
872 free(iov[i+1].iov_base);
873 return true;
874
875 unwind:
876 num_perms = i;
877 for (i = 0; i < num_perms; i++)
878 free_no_errno(iov[i+1].iov_base);
879 return false;
880 }
881
882 /* Always return false a functionality has been removed in Xen 4.9 */
xs_restrict(struct xs_handle * h,unsigned domid)883 bool xs_restrict(struct xs_handle *h, unsigned domid)
884 {
885 return false;
886 }
887
888 /* Watch a node for changes (poll on fd to detect, or call read_watch()).
889 * When the node (or any child) changes, fd will become readable.
890 * Token is returned when watch is read, to allow matching.
891 * Returns false on failure.
892 */
xs_watch(struct xs_handle * h,const char * path,const char * token)893 bool xs_watch(struct xs_handle *h, const char *path, const char *token)
894 {
895 struct iovec iov[2];
896
897 #ifdef USE_PTHREAD
898 #define DEFAULT_THREAD_STACKSIZE (16 * 1024)
899 /* NetBSD doesn't have PTHREAD_STACK_MIN. */
900 #ifndef PTHREAD_STACK_MIN
901 # define PTHREAD_STACK_MIN 0
902 #endif
903
904 #define READ_THREAD_STACKSIZE \
905 ((DEFAULT_THREAD_STACKSIZE < PTHREAD_STACK_MIN) ? \
906 PTHREAD_STACK_MIN : DEFAULT_THREAD_STACKSIZE)
907
908 /* We dynamically create a reader thread on demand. */
909 mutex_lock(&h->request_mutex);
910 if (!h->read_thr_exists) {
911 sigset_t set, old_set;
912 pthread_attr_t attr;
913 static size_t stack_size;
914 #ifdef USE_DLSYM
915 size_t (*getsz)(pthread_attr_t *attr);
916 #endif
917
918 if (pthread_attr_init(&attr) != 0) {
919 mutex_unlock(&h->request_mutex);
920 return false;
921 }
922 if (!stack_size) {
923 #ifdef USE_DLSYM
924 getsz = dlsym(RTLD_DEFAULT, "__pthread_get_minstack");
925 if (getsz)
926 stack_size = getsz(&attr);
927 #endif
928 if (stack_size < READ_THREAD_STACKSIZE)
929 stack_size = READ_THREAD_STACKSIZE;
930 }
931 if (pthread_attr_setstacksize(&attr, stack_size) != 0) {
932 pthread_attr_destroy(&attr);
933 mutex_unlock(&h->request_mutex);
934 return false;
935 }
936
937 sigfillset(&set);
938 pthread_sigmask(SIG_SETMASK, &set, &old_set);
939
940 if (pthread_create(&h->read_thr, &attr, read_thread, h) != 0) {
941 pthread_sigmask(SIG_SETMASK, &old_set, NULL);
942 pthread_attr_destroy(&attr);
943 mutex_unlock(&h->request_mutex);
944 return false;
945 }
946 h->read_thr_exists = 1;
947 pthread_sigmask(SIG_SETMASK, &old_set, NULL);
948 pthread_attr_destroy(&attr);
949 }
950 mutex_unlock(&h->request_mutex);
951 #endif
952
953 iov[0].iov_base = (void *)path;
954 iov[0].iov_len = strlen(path) + 1;
955 iov[1].iov_base = (void *)token;
956 iov[1].iov_len = strlen(token) + 1;
957
958 return xs_bool(xs_talkv(h, XBT_NULL, XS_WATCH, iov,
959 ARRAY_SIZE(iov), NULL));
960 }
961
962
963 /* Clear the pipe token if there are no more pending watchs.
964 * We suppose the watch_mutex is already taken.
965 */
xs_maybe_clear_watch_pipe(struct xs_handle * h)966 static void xs_maybe_clear_watch_pipe(struct xs_handle *h)
967 {
968 char c;
969
970 if (XEN_TAILQ_EMPTY(&h->watch_list) && (h->watch_pipe[0] != -1))
971 while (read(h->watch_pipe[0], &c, 1) != 1)
972 continue;
973 }
974
975 /* Find out what node change was on (will block if nothing pending).
976 * Returns array of two pointers: path and token, or NULL.
977 * Call free() after use.
978 */
read_watch_internal(struct xs_handle * h,unsigned int * num,int nonblocking)979 static char **read_watch_internal(struct xs_handle *h, unsigned int *num,
980 int nonblocking)
981 {
982 struct xs_stored_msg *msg;
983 char **ret, *strings;
984 unsigned int num_strings, i;
985
986 mutex_lock(&h->watch_mutex);
987
988 #ifdef USE_PTHREAD
989 /* Wait on the condition variable for a watch to fire.
990 * If the reader thread doesn't exist yet, then that's because
991 * we haven't called xs_watch. Presumably the application
992 * will do so later; in the meantime we just block.
993 */
994 while (XEN_TAILQ_EMPTY(&h->watch_list) && h->fd != -1) {
995 if (nonblocking) {
996 mutex_unlock(&h->watch_mutex);
997 errno = EAGAIN;
998 return 0;
999 }
1000 condvar_wait(&h->watch_condvar, &h->watch_mutex);
1001 }
1002 #else /* !defined(USE_PTHREAD) */
1003 /* Read from comms channel ourselves if there are no threads
1004 * and therefore no reader thread. */
1005
1006 assert(!read_thread_exists(h)); /* not threadsafe but worth a check */
1007 if ((read_message(h, nonblocking) == -1))
1008 return NULL;
1009
1010 #endif /* !defined(USE_PTHREAD) */
1011
1012 if (XEN_TAILQ_EMPTY(&h->watch_list)) {
1013 mutex_unlock(&h->watch_mutex);
1014 errno = EINVAL;
1015 return NULL;
1016 }
1017 msg = XEN_TAILQ_FIRST(&h->watch_list);
1018 XEN_TAILQ_REMOVE(&h->watch_list, msg, list);
1019
1020 xs_maybe_clear_watch_pipe(h);
1021 mutex_unlock(&h->watch_mutex);
1022
1023 assert(msg->hdr.type == XS_WATCH_EVENT);
1024
1025 strings = msg->body;
1026 num_strings = xenstore_count_strings(strings, msg->hdr.len);
1027
1028 ret = malloc(sizeof(char*) * num_strings + msg->hdr.len);
1029 if (!ret) {
1030 free_no_errno(strings);
1031 free_no_errno(msg);
1032 return NULL;
1033 }
1034
1035 ret[0] = (char *)(ret + num_strings);
1036 memcpy(ret[0], strings, msg->hdr.len);
1037
1038 free(strings);
1039 free(msg);
1040
1041 for (i = 1; i < num_strings; i++)
1042 ret[i] = ret[i - 1] + strlen(ret[i - 1]) + 1;
1043
1044 *num = num_strings;
1045
1046 return ret;
1047 }
1048
xs_check_watch(struct xs_handle * h)1049 char **xs_check_watch(struct xs_handle *h)
1050 {
1051 unsigned int num;
1052 char **ret;
1053 ret = read_watch_internal(h, &num, 1);
1054 if (ret) assert(num >= 2);
1055 return ret;
1056 }
1057
1058 /* Find out what node change was on (will block if nothing pending).
1059 * Returns array of two pointers: path and token, or NULL.
1060 * Call free() after use.
1061 */
xs_read_watch(struct xs_handle * h,unsigned int * num)1062 char **xs_read_watch(struct xs_handle *h, unsigned int *num)
1063 {
1064 return read_watch_internal(h, num, 0);
1065 }
1066
1067 /* Remove a watch on a node.
1068 * Returns false on failure (no watch on that node).
1069 */
xs_unwatch(struct xs_handle * h,const char * path,const char * token)1070 bool xs_unwatch(struct xs_handle *h, const char *path, const char *token)
1071 {
1072 struct iovec iov[2];
1073 struct xs_stored_msg *msg, *tmsg;
1074 bool res;
1075 char *s, *p;
1076 unsigned int i;
1077 char *l_token, *l_path;
1078
1079 iov[0].iov_base = (char *)path;
1080 iov[0].iov_len = strlen(path) + 1;
1081 iov[1].iov_base = (char *)token;
1082 iov[1].iov_len = strlen(token) + 1;
1083
1084 res = xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
1085 ARRAY_SIZE(iov), NULL));
1086
1087 if (!h->unwatch_filter) /* Don't filter the watch list */
1088 return res;
1089
1090
1091 /* Filter the watch list to remove potential message */
1092 mutex_lock(&h->watch_mutex);
1093
1094 if (XEN_TAILQ_EMPTY(&h->watch_list)) {
1095 mutex_unlock(&h->watch_mutex);
1096 return res;
1097 }
1098
1099 XEN_TAILQ_FOREACH_SAFE(msg, &h->watch_list, list, tmsg) {
1100 assert(msg->hdr.type == XS_WATCH_EVENT);
1101
1102 s = msg->body;
1103
1104 l_token = NULL;
1105 l_path = NULL;
1106
1107 for (p = s, i = 0; p < msg->body + msg->hdr.len; p++) {
1108 if (*p == '\0')
1109 {
1110 if (i == XS_WATCH_TOKEN)
1111 l_token = s;
1112 else if (i == XS_WATCH_PATH)
1113 l_path = s;
1114 i++;
1115 s = p + 1;
1116 }
1117 }
1118
1119 if (l_token && !strcmp(token, l_token) &&
1120 l_path && xs_path_is_subpath(path, l_path)) {
1121 XEN_TAILQ_REMOVE(&h->watch_list, msg, list);
1122 free(msg);
1123 }
1124 }
1125
1126 xs_maybe_clear_watch_pipe(h);
1127
1128 mutex_unlock(&h->watch_mutex);
1129
1130 return res;
1131 }
1132
1133 /* Start a transaction: changes by others will not be seen during this
1134 * transaction, and changes will not be visible to others until end.
1135 * Returns XBT_NULL on failure.
1136 */
xs_transaction_start(struct xs_handle * h)1137 xs_transaction_t xs_transaction_start(struct xs_handle *h)
1138 {
1139 char *id_str;
1140 xs_transaction_t id;
1141
1142 id_str = xs_single(h, XBT_NULL, XS_TRANSACTION_START, "", NULL);
1143 if (id_str == NULL)
1144 return XBT_NULL;
1145
1146 id = strtoul(id_str, NULL, 0);
1147 free(id_str);
1148
1149 return id;
1150 }
1151
1152 /* End a transaction.
1153 * If abandon is true, transaction is discarded instead of committed.
1154 * Returns false on failure, which indicates an error: transactions will
1155 * not fail spuriously.
1156 */
xs_transaction_end(struct xs_handle * h,xs_transaction_t t,bool abort)1157 bool xs_transaction_end(struct xs_handle *h, xs_transaction_t t,
1158 bool abort)
1159 {
1160 char abortstr[2];
1161
1162 if (abort)
1163 strcpy(abortstr, "F");
1164 else
1165 strcpy(abortstr, "T");
1166
1167 return xs_bool(xs_single(h, t, XS_TRANSACTION_END, abortstr, NULL));
1168 }
1169
1170 /* Introduce a new domain.
1171 * This tells the store daemon about a shared memory page and event channel
1172 * associated with a domain: the domain uses these to communicate.
1173 */
xs_introduce_domain(struct xs_handle * h,unsigned int domid,unsigned long mfn,unsigned int eventchn)1174 bool xs_introduce_domain(struct xs_handle *h,
1175 unsigned int domid, unsigned long mfn,
1176 unsigned int eventchn)
1177 {
1178 char domid_str[MAX_STRLEN(domid)];
1179 char mfn_str[MAX_STRLEN(mfn)];
1180 char eventchn_str[MAX_STRLEN(eventchn)];
1181 struct iovec iov[3];
1182
1183 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1184 snprintf(mfn_str, sizeof(mfn_str), "%lu", mfn);
1185 snprintf(eventchn_str, sizeof(eventchn_str), "%u", eventchn);
1186
1187 iov[0].iov_base = domid_str;
1188 iov[0].iov_len = strlen(domid_str) + 1;
1189 iov[1].iov_base = mfn_str;
1190 iov[1].iov_len = strlen(mfn_str) + 1;
1191 iov[2].iov_base = eventchn_str;
1192 iov[2].iov_len = strlen(eventchn_str) + 1;
1193
1194 return xs_bool(xs_talkv(h, XBT_NULL, XS_INTRODUCE, iov,
1195 ARRAY_SIZE(iov), NULL));
1196 }
1197
xs_set_target(struct xs_handle * h,unsigned int domid,unsigned int target)1198 bool xs_set_target(struct xs_handle *h,
1199 unsigned int domid, unsigned int target)
1200 {
1201 char domid_str[MAX_STRLEN(domid)];
1202 char target_str[MAX_STRLEN(target)];
1203 struct iovec iov[2];
1204
1205 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1206 snprintf(target_str, sizeof(target_str), "%u", target);
1207
1208 iov[0].iov_base = domid_str;
1209 iov[0].iov_len = strlen(domid_str) + 1;
1210 iov[1].iov_base = target_str;
1211 iov[1].iov_len = strlen(target_str) + 1;
1212
1213 return xs_bool(xs_talkv(h, XBT_NULL, XS_SET_TARGET, iov,
1214 ARRAY_SIZE(iov), NULL));
1215 }
1216
single_with_domid(struct xs_handle * h,enum xsd_sockmsg_type type,unsigned int domid)1217 static void * single_with_domid(struct xs_handle *h,
1218 enum xsd_sockmsg_type type,
1219 unsigned int domid)
1220 {
1221 char domid_str[MAX_STRLEN(domid)];
1222
1223 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1224
1225 return xs_single(h, XBT_NULL, type, domid_str, NULL);
1226 }
1227
xs_release_domain(struct xs_handle * h,unsigned int domid)1228 bool xs_release_domain(struct xs_handle *h, unsigned int domid)
1229 {
1230 return xs_bool(single_with_domid(h, XS_RELEASE, domid));
1231 }
1232
1233 /* clear the shutdown bit for the given domain */
xs_resume_domain(struct xs_handle * h,unsigned int domid)1234 bool xs_resume_domain(struct xs_handle *h, unsigned int domid)
1235 {
1236 return xs_bool(single_with_domid(h, XS_RESUME, domid));
1237 }
1238
xs_get_domain_path(struct xs_handle * h,unsigned int domid)1239 char *xs_get_domain_path(struct xs_handle *h, unsigned int domid)
1240 {
1241 char domid_str[MAX_STRLEN(domid)];
1242
1243 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1244
1245 return xs_single(h, XBT_NULL, XS_GET_DOMAIN_PATH, domid_str, NULL);
1246 }
1247
xs_path_is_subpath(const char * parent,const char * child)1248 bool xs_path_is_subpath(const char *parent, const char *child)
1249 {
1250 size_t childlen = strlen(child);
1251 size_t parentlen = strlen(parent);
1252
1253 if (childlen < parentlen)
1254 return false;
1255
1256 if (memcmp(child, parent, parentlen))
1257 return false;
1258
1259 if (childlen > parentlen && child[parentlen] != '/')
1260 return false;
1261
1262 return true;
1263 }
1264
xs_is_domain_introduced(struct xs_handle * h,unsigned int domid)1265 bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid)
1266 {
1267 char *domain = single_with_domid(h, XS_IS_DOMAIN_INTRODUCED, domid);
1268 bool rc = false;
1269
1270 if (!domain)
1271 return rc;
1272
1273 rc = strcmp("F", domain) != 0;
1274
1275 free(domain);
1276 return rc;
1277 }
1278
xs_suspend_evtchn_port(int domid)1279 int xs_suspend_evtchn_port(int domid)
1280 {
1281 char path[128];
1282 char *portstr;
1283 int port;
1284 unsigned int plen;
1285 struct xs_handle *xs;
1286
1287 xs = xs_daemon_open();
1288 if (!xs)
1289 return -1;
1290
1291 sprintf(path, "/local/domain/%d/device/suspend/event-channel", domid);
1292 portstr = xs_read(xs, XBT_NULL, path, &plen);
1293 xs_daemon_close(xs);
1294
1295 if (!portstr || !plen) {
1296 port = -1;
1297 goto out;
1298 }
1299
1300 port = atoi(portstr);
1301
1302 out:
1303 free(portstr);
1304 return port;
1305 }
1306
xs_control_command(struct xs_handle * h,const char * cmd,void * data,unsigned int len)1307 char *xs_control_command(struct xs_handle *h, const char *cmd,
1308 void *data, unsigned int len)
1309 {
1310 struct iovec iov[2];
1311
1312 iov[0].iov_base = (void *)cmd;
1313 iov[0].iov_len = strlen(cmd) + 1;
1314 iov[1].iov_base = data;
1315 iov[1].iov_len = len;
1316
1317 return xs_talkv(h, XBT_NULL, XS_CONTROL, iov,
1318 ARRAY_SIZE(iov), NULL);
1319 }
1320
xs_debug_command(struct xs_handle * h,const char * cmd,void * data,unsigned int len)1321 char *xs_debug_command(struct xs_handle *h, const char *cmd,
1322 void *data, unsigned int len)
1323 {
1324 return xs_control_command(h, cmd, data, len);
1325 }
1326
read_message(struct xs_handle * h,int nonblocking)1327 static int read_message(struct xs_handle *h, int nonblocking)
1328 {
1329 /* IMPORTANT: It is forbidden to call this function without
1330 * acquiring the request lock and checking that h->read_thr_exists
1331 * is false. See "Lock discipline" in struct xs_handle, above. */
1332
1333 /* If nonblocking==1, this function will always read either
1334 * nothing, returning -1 and setting errno==EAGAIN, or we read
1335 * whole amount requested. Ie as soon as we have the start of
1336 * the message we block until we get all of it.
1337 */
1338
1339 struct xs_stored_msg *msg = NULL;
1340 char *body = NULL;
1341 int saved_errno = 0;
1342 int ret = -1;
1343
1344 /* Allocate message structure and read the message header. */
1345 msg = malloc(sizeof(*msg));
1346 if (msg == NULL)
1347 goto error;
1348 cleanup_push_heap(msg);
1349 if (!read_all(h->fd, &msg->hdr, sizeof(msg->hdr), nonblocking)) { /* Cancellation point */
1350 saved_errno = errno;
1351 goto error_freemsg;
1352 }
1353
1354 /* Sanity check message body length. */
1355 if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
1356 saved_errno = E2BIG;
1357 goto error_freemsg;
1358 }
1359
1360 /* Allocate and read the message body. */
1361 body = msg->body = malloc(msg->hdr.len + 1);
1362 if (body == NULL)
1363 goto error_freemsg;
1364 cleanup_push_heap(body);
1365 if (!read_all(h->fd, body, msg->hdr.len, 0)) { /* Cancellation point */
1366 saved_errno = errno;
1367 goto error_freebody;
1368 }
1369
1370 body[msg->hdr.len] = '\0';
1371
1372 if (msg->hdr.type == XS_WATCH_EVENT) {
1373 mutex_lock(&h->watch_mutex);
1374 cleanup_push(pthread_mutex_unlock, &h->watch_mutex);
1375
1376 /* Kick users out of their select() loop. */
1377 if (XEN_TAILQ_EMPTY(&h->watch_list) &&
1378 (h->watch_pipe[1] != -1))
1379 while (write(h->watch_pipe[1], body, 1) != 1) /* Cancellation point */
1380 continue;
1381
1382 XEN_TAILQ_INSERT_TAIL(&h->watch_list, msg, list);
1383
1384 condvar_signal(&h->watch_condvar);
1385
1386 cleanup_pop(1);
1387 } else {
1388 mutex_lock(&h->reply_mutex);
1389
1390 /* There should only ever be one response pending! */
1391 if (!XEN_TAILQ_EMPTY(&h->reply_list)) {
1392 mutex_unlock(&h->reply_mutex);
1393 saved_errno = EEXIST;
1394 goto error_freebody;
1395 }
1396
1397 XEN_TAILQ_INSERT_TAIL(&h->reply_list, msg, list);
1398 condvar_signal(&h->reply_condvar);
1399
1400 mutex_unlock(&h->reply_mutex);
1401 }
1402
1403 ret = 0;
1404
1405 error_freebody:
1406 cleanup_pop_heap(ret == -1, body);
1407 error_freemsg:
1408 cleanup_pop_heap(ret == -1, msg);
1409 error:
1410 errno = saved_errno;
1411
1412 return ret;
1413 }
1414
xs_daemon_socket(void)1415 const char *xs_daemon_socket(void)
1416 {
1417 return xenstore_daemon_path();
1418 }
1419
xs_daemon_socket_ro(void)1420 const char *xs_daemon_socket_ro(void)
1421 {
1422 return xs_daemon_socket();
1423 }
1424
xs_daemon_rundir(void)1425 const char *xs_daemon_rundir(void)
1426 {
1427 return xenstore_daemon_rundir();
1428 }
1429
xs_strings_to_perms(struct xs_permissions * perms,unsigned int num,const char * strings)1430 bool xs_strings_to_perms(struct xs_permissions *perms, unsigned int num,
1431 const char *strings)
1432 {
1433 return xenstore_strings_to_perms(perms, num, strings);
1434 }
1435
1436 #ifdef USE_PTHREAD
read_thread(void * arg)1437 static void *read_thread(void *arg)
1438 {
1439 struct xs_handle *h = arg;
1440 int fd;
1441
1442 while (read_message(h, 0) != -1)
1443 continue;
1444
1445 /* An error return from read_message leaves the socket in an undefined
1446 * state; we might have read only the header and not the message after
1447 * it, or (more commonly) the other end has closed the connection.
1448 * Since further communication is unsafe, close the socket.
1449 */
1450 fd = h->fd;
1451 h->fd = -1;
1452 close(fd);
1453
1454 /* wake up all waiters */
1455 pthread_mutex_lock(&h->reply_mutex);
1456 pthread_cond_broadcast(&h->reply_condvar);
1457 pthread_mutex_unlock(&h->reply_mutex);
1458
1459 pthread_mutex_lock(&h->watch_mutex);
1460 pthread_cond_broadcast(&h->watch_condvar);
1461 pthread_mutex_unlock(&h->watch_mutex);
1462
1463 return NULL;
1464 }
1465 #endif
1466
1467 /*
1468 * Local variables:
1469 * mode: C
1470 * c-file-style: "linux"
1471 * indent-tabs-mode: t
1472 * c-basic-offset: 8
1473 * tab-width: 8
1474 * End:
1475 */
1476