1 /*
2 Xen Store Daemon interface providing simple tree-like database.
3 Copyright (C) 2005 Rusty Russell IBM Corporation
4
5 This library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 This library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <sys/types.h>
20 #include <sys/stat.h>
21 #include <fcntl.h>
22 #include <sys/uio.h>
23 #include <sys/socket.h>
24 #include <sys/un.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <stdbool.h>
28 #include <stdlib.h>
29 #include <assert.h>
30 #include <stdio.h>
31 #include <signal.h>
32 #include <stdint.h>
33 #include <errno.h>
34 #include "xenstore.h"
35 #include "list.h"
36 #include "utils.h"
37
38 #include <xentoolcore_internal.h>
39
40 struct xs_stored_msg {
41 struct list_head list;
42 struct xsd_sockmsg hdr;
43 char *body;
44 };
45
46 #ifdef USE_PTHREAD
47
48 #include <pthread.h>
49
50 struct xs_handle {
51 /* Communications channel to xenstore daemon. */
52 int fd;
53 Xentoolcore__Active_Handle tc_ah; /* for restrict */
54
55 /*
56 * A read thread which pulls messages off the comms channel and
57 * signals waiters.
58 */
59 pthread_t read_thr;
60 int read_thr_exists;
61
62 /*
63 * A list of fired watch messages, protected by a mutex. Users can
64 * wait on the conditional variable until a watch is pending.
65 */
66 struct list_head watch_list;
67 pthread_mutex_t watch_mutex;
68 pthread_cond_t watch_condvar;
69
70 /* Clients can select() on this pipe to wait for a watch to fire. */
71 int watch_pipe[2];
72 /* Filtering watch event in unwatch function? */
73 bool unwatch_filter;
74
75 /*
76 * A list of replies. Currently only one will ever be outstanding
77 * because we serialise requests. The requester can wait on the
78 * conditional variable for its response.
79 */
80 struct list_head reply_list;
81 pthread_mutex_t reply_mutex;
82 pthread_cond_t reply_condvar;
83
84 /* One request at a time. */
85 pthread_mutex_t request_mutex;
86
87 /* Lock discipline:
88 * Only holder of the request lock may write to h->fd.
89 * Only holder of the request lock may access read_thr_exists.
90 * If read_thr_exists==0, only holder of request lock may read h->fd;
91 * If read_thr_exists==1, only the read thread may read h->fd.
92 * Only holder of the reply lock may access reply_list.
93 * Only holder of the watch lock may access watch_list.
94 * Lock hierarchy:
95 * The order in which to acquire locks is
96 * request_mutex
97 * reply_mutex
98 * watch_mutex
99 */
100 };
101
102 #define mutex_lock(m) pthread_mutex_lock(m)
103 #define mutex_unlock(m) pthread_mutex_unlock(m)
104 #define condvar_signal(c) pthread_cond_signal(c)
105 #define condvar_wait(c,m) pthread_cond_wait(c,m)
106 #define cleanup_push(f, a) \
107 pthread_cleanup_push((void (*)(void *))(f), (void *)(a))
108 /*
109 * Some definitions of pthread_cleanup_pop() are a macro starting with an
110 * end-brace. GCC then complains if we immediately precede that with a label.
111 * Hence we insert a dummy statement to appease the compiler in this situation.
112 */
113 #define cleanup_pop(run) ((void)0); pthread_cleanup_pop(run)
114
115 #define read_thread_exists(h) (h->read_thr_exists)
116
117 /* Because pthread_cleanup_p* are not available when USE_PTHREAD is
118 * disabled, use these macros which convert appropriately. */
119 #define cleanup_push_heap(p) cleanup_push(free, p)
120 #define cleanup_pop_heap(run, p) cleanup_pop((run))
121
122 static void *read_thread(void *arg);
123
124 #else /* !defined(USE_PTHREAD) */
125
126 struct xs_handle {
127 int fd;
128 Xentoolcore__Active_Handle tc_ah; /* for restrict */
129 struct list_head reply_list;
130 struct list_head watch_list;
131 /* Clients can select() on this pipe to wait for a watch to fire. */
132 int watch_pipe[2];
133 /* Filtering watch event in unwatch function? */
134 bool unwatch_filter;
135 };
136
137 #define mutex_lock(m) ((void)0)
138 #define mutex_unlock(m) ((void)0)
139 #define condvar_signal(c) ((void)0)
140 #define condvar_wait(c,m) ((void)0)
141 #define cleanup_push(f, a) ((void)0)
142 #define cleanup_pop(run) ((void)0)
143 #define read_thread_exists(h) (0)
144
145 #define cleanup_push_heap(p) ((void)0)
146 #define cleanup_pop_heap(run, p) do { if ((run)) free(p); } while(0)
147
148 #endif
149
150 static int read_message(struct xs_handle *h, int nonblocking);
151
setnonblock(int fd,int nonblock)152 static bool setnonblock(int fd, int nonblock) {
153 int flags = fcntl(fd, F_GETFL);
154 if (flags == -1)
155 return false;
156
157 if (nonblock)
158 flags |= O_NONBLOCK;
159 else
160 flags &= ~O_NONBLOCK;
161
162 if (fcntl(fd, F_SETFL, flags) == -1)
163 return false;
164
165 return true;
166 }
167
xs_fileno(struct xs_handle * h)168 int xs_fileno(struct xs_handle *h)
169 {
170 char c = 0;
171
172 mutex_lock(&h->watch_mutex);
173
174 if ((h->watch_pipe[0] == -1) && (pipe(h->watch_pipe) != -1)) {
175 /* Kick things off if the watch list is already non-empty. */
176 if (!list_empty(&h->watch_list))
177 while (write(h->watch_pipe[1], &c, 1) != 1)
178 continue;
179 }
180
181 mutex_unlock(&h->watch_mutex);
182
183 return h->watch_pipe[0];
184 }
185
get_socket(const char * connect_to)186 static int get_socket(const char *connect_to)
187 {
188 struct sockaddr_un addr;
189 int sock, saved_errno, flags;
190
191 sock = socket(PF_UNIX, SOCK_STREAM, 0);
192 if (sock < 0)
193 return -1;
194
195 if ((flags = fcntl(sock, F_GETFD)) < 0)
196 goto error;
197 flags |= FD_CLOEXEC;
198 if (fcntl(sock, F_SETFD, flags) < 0)
199 goto error;
200
201 addr.sun_family = AF_UNIX;
202 if(strlen(connect_to) >= sizeof(addr.sun_path)) {
203 errno = EINVAL;
204 goto error;
205 }
206 strcpy(addr.sun_path, connect_to);
207
208 if (connect(sock, (struct sockaddr *)&addr, sizeof(addr)) != 0)
209 goto error;
210
211 return sock;
212
213 error:
214 saved_errno = errno;
215 close(sock);
216 errno = saved_errno;
217 return -1;
218 }
219
get_dev(const char * connect_to)220 static int get_dev(const char *connect_to)
221 {
222 /* We cannot open read-only because requests are writes */
223 return open(connect_to, O_RDWR);
224 }
225
all_restrict_cb(Xentoolcore__Active_Handle * ah,domid_t domid)226 static int all_restrict_cb(Xentoolcore__Active_Handle *ah, domid_t domid) {
227 struct xs_handle *h = CONTAINER_OF(ah, *h, tc_ah);
228 return xentoolcore__restrict_by_dup2_null(h->fd);
229 }
230
get_handle(const char * connect_to)231 static struct xs_handle *get_handle(const char *connect_to)
232 {
233 struct stat buf;
234 struct xs_handle *h = NULL;
235 int saved_errno;
236
237 h = malloc(sizeof(*h));
238 if (h == NULL)
239 goto err;
240
241 memset(h, 0, sizeof(*h));
242 h->fd = -1;
243
244 h->tc_ah.restrict_callback = all_restrict_cb;
245 xentoolcore__register_active_handle(&h->tc_ah);
246
247 if (stat(connect_to, &buf) != 0)
248 goto err;
249
250 if (S_ISSOCK(buf.st_mode))
251 h->fd = get_socket(connect_to);
252 else
253 h->fd = get_dev(connect_to);
254
255 if (h->fd == -1)
256 goto err;
257
258 INIT_LIST_HEAD(&h->reply_list);
259 INIT_LIST_HEAD(&h->watch_list);
260
261 /* Watch pipe is allocated on demand in xs_fileno(). */
262 h->watch_pipe[0] = h->watch_pipe[1] = -1;
263
264 h->unwatch_filter = false;
265
266 #ifdef USE_PTHREAD
267 pthread_mutex_init(&h->watch_mutex, NULL);
268 pthread_cond_init(&h->watch_condvar, NULL);
269
270 pthread_mutex_init(&h->reply_mutex, NULL);
271 pthread_cond_init(&h->reply_condvar, NULL);
272
273 pthread_mutex_init(&h->request_mutex, NULL);
274 #endif
275
276 return h;
277
278 err:
279 saved_errno = errno;
280
281 if (h) {
282 xentoolcore__deregister_active_handle(&h->tc_ah);
283 if (h->fd >= 0)
284 close(h->fd);
285 }
286 free(h);
287
288 errno = saved_errno;
289 return NULL;
290 }
291
xs_daemon_open(void)292 struct xs_handle *xs_daemon_open(void)
293 {
294 return xs_open(0);
295 }
296
xs_daemon_open_readonly(void)297 struct xs_handle *xs_daemon_open_readonly(void)
298 {
299 return xs_open(XS_OPEN_READONLY);
300 }
301
xs_domain_open(void)302 struct xs_handle *xs_domain_open(void)
303 {
304 return xs_open(0);
305 }
306
xs_open(unsigned long flags)307 struct xs_handle *xs_open(unsigned long flags)
308 {
309 struct xs_handle *xsh = NULL;
310
311 if (flags & XS_OPEN_READONLY)
312 xsh = get_handle(xs_daemon_socket_ro());
313 else
314 xsh = get_handle(xs_daemon_socket());
315
316 if (!xsh && !(flags & XS_OPEN_SOCKETONLY))
317 xsh = get_handle(xs_domain_dev());
318
319 if (xsh && (flags & XS_UNWATCH_FILTER))
320 xsh->unwatch_filter = true;
321
322 return xsh;
323 }
324
close_free_msgs(struct xs_handle * h)325 static void close_free_msgs(struct xs_handle *h) {
326 struct xs_stored_msg *msg, *tmsg;
327
328 list_for_each_entry_safe(msg, tmsg, &h->reply_list, list) {
329 free(msg->body);
330 free(msg);
331 }
332
333 list_for_each_entry_safe(msg, tmsg, &h->watch_list, list) {
334 free(msg->body);
335 free(msg);
336 }
337 }
338
close_fds_free(struct xs_handle * h)339 static void close_fds_free(struct xs_handle *h) {
340 if (h->watch_pipe[0] != -1) {
341 close(h->watch_pipe[0]);
342 close(h->watch_pipe[1]);
343 }
344
345 xentoolcore__deregister_active_handle(&h->tc_ah);
346 close(h->fd);
347
348 free(h);
349 }
350
xs_daemon_destroy_postfork(struct xs_handle * h)351 void xs_daemon_destroy_postfork(struct xs_handle *h)
352 {
353 close_free_msgs(h);
354 close_fds_free(h);
355 }
356
xs_daemon_close(struct xs_handle * h)357 void xs_daemon_close(struct xs_handle *h)
358 {
359 #ifdef USE_PTHREAD
360 if (h->read_thr_exists) {
361 pthread_cancel(h->read_thr);
362 pthread_join(h->read_thr, NULL);
363 }
364 #endif
365
366 mutex_lock(&h->request_mutex);
367 mutex_lock(&h->reply_mutex);
368 mutex_lock(&h->watch_mutex);
369
370 close_free_msgs(h);
371
372 mutex_unlock(&h->request_mutex);
373 mutex_unlock(&h->reply_mutex);
374 mutex_unlock(&h->watch_mutex);
375
376 close_fds_free(h);
377 }
378
xs_close(struct xs_handle * xsh)379 void xs_close(struct xs_handle* xsh)
380 {
381 if (xsh)
382 xs_daemon_close(xsh);
383 }
384
read_all(int fd,void * data,unsigned int len,int nonblocking)385 static bool read_all(int fd, void *data, unsigned int len, int nonblocking)
386 /* With nonblocking, either reads either everything requested,
387 * or nothing. */
388 {
389 if (!len)
390 return true;
391
392 if (nonblocking && !setnonblock(fd, 1))
393 return false;
394
395 while (len) {
396 int done;
397
398 done = read(fd, data, len);
399 if (done < 0) {
400 if (errno == EINTR)
401 continue;
402 goto out_false;
403 }
404 if (done == 0) {
405 /* It closed fd on us? EBADF is appropriate. */
406 errno = EBADF;
407 goto out_false;
408 }
409 data += done;
410 len -= done;
411
412 if (nonblocking) {
413 nonblocking = 0;
414 if (!setnonblock(fd, 0))
415 goto out_false;
416 }
417 }
418
419 return true;
420
421 out_false:
422 if (nonblocking)
423 setnonblock(fd, 0);
424 return false;
425 }
426
427 #ifdef XSTEST
428 #define read_all read_all_choice
429 #define xs_write_all write_all_choice
430 #endif
431
get_error(const char * errorstring)432 static int get_error(const char *errorstring)
433 {
434 unsigned int i;
435
436 for (i = 0; !streq(errorstring, xsd_errors[i].errstring); i++)
437 if (i == ARRAY_SIZE(xsd_errors) - 1)
438 return EINVAL;
439 return xsd_errors[i].errnum;
440 }
441
442 /* Adds extra nul terminator, because we generally (always?) hold strings. */
read_reply(struct xs_handle * h,enum xsd_sockmsg_type * type,unsigned int * len)443 static void *read_reply(
444 struct xs_handle *h, enum xsd_sockmsg_type *type, unsigned int *len)
445 {
446 struct xs_stored_msg *msg;
447 char *body;
448 int read_from_thread;
449
450 read_from_thread = read_thread_exists(h);
451
452 /* Read from comms channel ourselves if there is no reader thread. */
453 if (!read_from_thread && (read_message(h, 0) == -1))
454 return NULL;
455
456 mutex_lock(&h->reply_mutex);
457 #ifdef USE_PTHREAD
458 while (list_empty(&h->reply_list) && read_from_thread && h->fd != -1)
459 condvar_wait(&h->reply_condvar, &h->reply_mutex);
460 #endif
461 if (list_empty(&h->reply_list)) {
462 mutex_unlock(&h->reply_mutex);
463 errno = EINVAL;
464 return NULL;
465 }
466 msg = list_top(&h->reply_list, struct xs_stored_msg, list);
467 list_del(&msg->list);
468 assert(list_empty(&h->reply_list));
469 mutex_unlock(&h->reply_mutex);
470
471 *type = msg->hdr.type;
472 if (len)
473 *len = msg->hdr.len;
474 body = msg->body;
475
476 free(msg);
477
478 return body;
479 }
480
481 /* Send message to xs, get malloc'ed reply. NULL and set errno on error. */
xs_talkv(struct xs_handle * h,xs_transaction_t t,enum xsd_sockmsg_type type,const struct iovec * iovec,unsigned int num_vecs,unsigned int * len)482 static void *xs_talkv(struct xs_handle *h, xs_transaction_t t,
483 enum xsd_sockmsg_type type,
484 const struct iovec *iovec,
485 unsigned int num_vecs,
486 unsigned int *len)
487 {
488 struct xsd_sockmsg msg;
489 void *ret = NULL;
490 int saved_errno;
491 unsigned int i;
492 struct sigaction ignorepipe, oldact;
493
494 msg.tx_id = t;
495 msg.req_id = 0;
496 msg.type = type;
497 msg.len = 0;
498 for (i = 0; i < num_vecs; i++)
499 msg.len += iovec[i].iov_len;
500
501 if (msg.len > XENSTORE_PAYLOAD_MAX) {
502 errno = E2BIG;
503 return 0;
504 }
505
506 ignorepipe.sa_handler = SIG_IGN;
507 sigemptyset(&ignorepipe.sa_mask);
508 ignorepipe.sa_flags = 0;
509 sigaction(SIGPIPE, &ignorepipe, &oldact);
510
511 mutex_lock(&h->request_mutex);
512
513 if (!xs_write_all(h->fd, &msg, sizeof(msg)))
514 goto fail;
515
516 for (i = 0; i < num_vecs; i++)
517 if (!xs_write_all(h->fd, iovec[i].iov_base, iovec[i].iov_len))
518 goto fail;
519
520 ret = read_reply(h, &msg.type, len);
521 if (!ret)
522 goto fail;
523
524 mutex_unlock(&h->request_mutex);
525
526 sigaction(SIGPIPE, &oldact, NULL);
527 if (msg.type == XS_ERROR) {
528 saved_errno = get_error(ret);
529 free(ret);
530 errno = saved_errno;
531 return NULL;
532 }
533
534 if (msg.type != type) {
535 free(ret);
536 saved_errno = EBADF;
537 goto close_fd;
538 }
539 return ret;
540
541 fail:
542 /* We're in a bad state, so close fd. */
543 saved_errno = errno;
544 mutex_unlock(&h->request_mutex);
545 sigaction(SIGPIPE, &oldact, NULL);
546 close_fd:
547 close(h->fd);
548 h->fd = -1;
549 errno = saved_errno;
550 return NULL;
551 }
552
553 /* free(), but don't change errno. */
free_no_errno(void * p)554 static void free_no_errno(void *p)
555 {
556 int saved_errno = errno;
557 free(p);
558 errno = saved_errno;
559 }
560
561 /* Simplified version of xs_talkv: single message. */
xs_single(struct xs_handle * h,xs_transaction_t t,enum xsd_sockmsg_type type,const char * string,unsigned int * len)562 static void *xs_single(struct xs_handle *h, xs_transaction_t t,
563 enum xsd_sockmsg_type type,
564 const char *string,
565 unsigned int *len)
566 {
567 struct iovec iovec;
568
569 iovec.iov_base = (void *)string;
570 iovec.iov_len = strlen(string) + 1;
571 return xs_talkv(h, t, type, &iovec, 1, len);
572 }
573
xs_bool(char * reply)574 static bool xs_bool(char *reply)
575 {
576 if (!reply)
577 return false;
578 free(reply);
579 return true;
580 }
581
xs_directory_common(char * strings,unsigned int len,unsigned int * num)582 static char **xs_directory_common(char *strings, unsigned int len,
583 unsigned int *num)
584 {
585 char *p, **ret;
586
587 /* Count the strings. */
588 *num = xs_count_strings(strings, len);
589
590 /* Transfer to one big alloc for easy freeing. */
591 ret = malloc(*num * sizeof(char *) + len);
592 if (!ret) {
593 free_no_errno(strings);
594 return NULL;
595 }
596 memcpy(&ret[*num], strings, len);
597 free_no_errno(strings);
598
599 strings = (char *)&ret[*num];
600 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
601 ret[(*num)++] = p;
602 return ret;
603 }
604
xs_directory_part(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)605 static char **xs_directory_part(struct xs_handle *h, xs_transaction_t t,
606 const char *path, unsigned int *num)
607 {
608 unsigned int off, result_len;
609 char gen[24], offstr[8];
610 struct iovec iovec[2];
611 char *result = NULL, *strings = NULL;
612
613 memset(gen, 0, sizeof(gen));
614 iovec[0].iov_base = (void *)path;
615 iovec[0].iov_len = strlen(path) + 1;
616
617 for (off = 0;;) {
618 snprintf(offstr, sizeof(offstr), "%u", off);
619 iovec[1].iov_base = (void *)offstr;
620 iovec[1].iov_len = strlen(offstr) + 1;
621 result = xs_talkv(h, t, XS_DIRECTORY_PART, iovec, 2,
622 &result_len);
623
624 /* If XS_DIRECTORY_PART isn't supported return E2BIG. */
625 if (!result) {
626 if (errno == ENOSYS)
627 errno = E2BIG;
628 return NULL;
629 }
630
631 if (off) {
632 if (strcmp(gen, result)) {
633 free(result);
634 free(strings);
635 strings = NULL;
636 off = 0;
637 continue;
638 }
639 } else
640 strncpy(gen, result, sizeof(gen) - 1);
641
642 result_len -= strlen(result) + 1;
643 strings = realloc(strings, off + result_len);
644 memcpy(strings + off, result + strlen(result) + 1, result_len);
645 free(result);
646 off += result_len;
647
648 if (off <= 1 || strings[off - 2] == 0)
649 break;
650 }
651
652 if (off > 1)
653 off--;
654
655 return xs_directory_common(strings, off, num);
656 }
657
xs_directory(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)658 char **xs_directory(struct xs_handle *h, xs_transaction_t t,
659 const char *path, unsigned int *num)
660 {
661 char *strings;
662 unsigned int len;
663
664 strings = xs_single(h, t, XS_DIRECTORY, path, &len);
665 if (!strings) {
666 if (errno != E2BIG)
667 return NULL;
668 return xs_directory_part(h, t, path, num);
669 }
670
671 return xs_directory_common(strings, len, num);
672 }
673
674 /* Get the value of a single file, nul terminated.
675 * Returns a malloced value: call free() on it after use.
676 * len indicates length in bytes, not including the nul.
677 */
xs_read(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * len)678 void *xs_read(struct xs_handle *h, xs_transaction_t t,
679 const char *path, unsigned int *len)
680 {
681 return xs_single(h, t, XS_READ, path, len);
682 }
683
684 /* Write the value of a single file.
685 * Returns false on failure.
686 */
xs_write(struct xs_handle * h,xs_transaction_t t,const char * path,const void * data,unsigned int len)687 bool xs_write(struct xs_handle *h, xs_transaction_t t,
688 const char *path, const void *data, unsigned int len)
689 {
690 struct iovec iovec[2];
691
692 iovec[0].iov_base = (void *)path;
693 iovec[0].iov_len = strlen(path) + 1;
694 iovec[1].iov_base = (void *)data;
695 iovec[1].iov_len = len;
696
697 return xs_bool(xs_talkv(h, t, XS_WRITE, iovec,
698 ARRAY_SIZE(iovec), NULL));
699 }
700
701 /* Create a new directory.
702 * Returns false on failure, or success if it already exists.
703 */
xs_mkdir(struct xs_handle * h,xs_transaction_t t,const char * path)704 bool xs_mkdir(struct xs_handle *h, xs_transaction_t t,
705 const char *path)
706 {
707 return xs_bool(xs_single(h, t, XS_MKDIR, path, NULL));
708 }
709
710 /* Destroy a file or directory (directories must be empty).
711 * Returns false on failure, or success if it doesn't exist.
712 */
xs_rm(struct xs_handle * h,xs_transaction_t t,const char * path)713 bool xs_rm(struct xs_handle *h, xs_transaction_t t,
714 const char *path)
715 {
716 return xs_bool(xs_single(h, t, XS_RM, path, NULL));
717 }
718
719 /* Get permissions of node (first element is owner).
720 * Returns malloced array, or NULL: call free() after use.
721 */
xs_get_permissions(struct xs_handle * h,xs_transaction_t t,const char * path,unsigned int * num)722 struct xs_permissions *xs_get_permissions(struct xs_handle *h,
723 xs_transaction_t t,
724 const char *path, unsigned int *num)
725 {
726 char *strings;
727 unsigned int len;
728 struct xs_permissions *ret;
729
730 strings = xs_single(h, t, XS_GET_PERMS, path, &len);
731 if (!strings)
732 return NULL;
733
734 /* Count the strings: each one perms then domid. */
735 *num = xs_count_strings(strings, len);
736
737 /* Transfer to one big alloc for easy freeing. */
738 ret = malloc(*num * sizeof(struct xs_permissions));
739 if (!ret) {
740 free_no_errno(strings);
741 return NULL;
742 }
743
744 if (!xs_strings_to_perms(ret, *num, strings)) {
745 free_no_errno(ret);
746 ret = NULL;
747 }
748
749 free(strings);
750 return ret;
751 }
752
753 /* Set permissions of node (must be owner).
754 * Returns false on failure.
755 */
xs_set_permissions(struct xs_handle * h,xs_transaction_t t,const char * path,struct xs_permissions * perms,unsigned int num_perms)756 bool xs_set_permissions(struct xs_handle *h,
757 xs_transaction_t t,
758 const char *path,
759 struct xs_permissions *perms,
760 unsigned int num_perms)
761 {
762 unsigned int i;
763 struct iovec iov[1+num_perms];
764
765 iov[0].iov_base = (void *)path;
766 iov[0].iov_len = strlen(path) + 1;
767
768 for (i = 0; i < num_perms; i++) {
769 char buffer[MAX_STRLEN(unsigned int)+1];
770
771 if (!xs_perm_to_string(&perms[i], buffer, sizeof(buffer)))
772 goto unwind;
773
774 iov[i+1].iov_base = strdup(buffer);
775 iov[i+1].iov_len = strlen(buffer) + 1;
776 if (!iov[i+1].iov_base)
777 goto unwind;
778 }
779
780 if (!xs_bool(xs_talkv(h, t, XS_SET_PERMS, iov, 1+num_perms, NULL)))
781 goto unwind;
782 for (i = 0; i < num_perms; i++)
783 free(iov[i+1].iov_base);
784 return true;
785
786 unwind:
787 num_perms = i;
788 for (i = 0; i < num_perms; i++)
789 free_no_errno(iov[i+1].iov_base);
790 return false;
791 }
792
793 /* Watch a node for changes (poll on fd to detect, or call read_watch()).
794 * When the node (or any child) changes, fd will become readable.
795 * Token is returned when watch is read, to allow matching.
796 * Returns false on failure.
797 */
xs_watch(struct xs_handle * h,const char * path,const char * token)798 bool xs_watch(struct xs_handle *h, const char *path, const char *token)
799 {
800 struct iovec iov[2];
801
802 #ifdef USE_PTHREAD
803 #define DEFAULT_THREAD_STACKSIZE (16 * 1024)
804 #define READ_THREAD_STACKSIZE \
805 ((DEFAULT_THREAD_STACKSIZE < PTHREAD_STACK_MIN) ? \
806 PTHREAD_STACK_MIN : DEFAULT_THREAD_STACKSIZE)
807
808 /* We dynamically create a reader thread on demand. */
809 mutex_lock(&h->request_mutex);
810 if (!h->read_thr_exists) {
811 sigset_t set, old_set;
812 pthread_attr_t attr;
813
814 if (pthread_attr_init(&attr) != 0) {
815 mutex_unlock(&h->request_mutex);
816 return false;
817 }
818 if (pthread_attr_setstacksize(&attr, READ_THREAD_STACKSIZE) != 0) {
819 pthread_attr_destroy(&attr);
820 mutex_unlock(&h->request_mutex);
821 return false;
822 }
823
824 sigfillset(&set);
825 pthread_sigmask(SIG_SETMASK, &set, &old_set);
826
827 if (pthread_create(&h->read_thr, &attr, read_thread, h) != 0) {
828 pthread_sigmask(SIG_SETMASK, &old_set, NULL);
829 pthread_attr_destroy(&attr);
830 mutex_unlock(&h->request_mutex);
831 return false;
832 }
833 h->read_thr_exists = 1;
834 pthread_sigmask(SIG_SETMASK, &old_set, NULL);
835 pthread_attr_destroy(&attr);
836 }
837 mutex_unlock(&h->request_mutex);
838 #endif
839
840 iov[0].iov_base = (void *)path;
841 iov[0].iov_len = strlen(path) + 1;
842 iov[1].iov_base = (void *)token;
843 iov[1].iov_len = strlen(token) + 1;
844
845 return xs_bool(xs_talkv(h, XBT_NULL, XS_WATCH, iov,
846 ARRAY_SIZE(iov), NULL));
847 }
848
849
850 /* Clear the pipe token if there are no more pending watchs.
851 * We suppose the watch_mutex is already taken.
852 */
xs_maybe_clear_watch_pipe(struct xs_handle * h)853 static void xs_maybe_clear_watch_pipe(struct xs_handle *h)
854 {
855 char c;
856
857 if (list_empty(&h->watch_list) && (h->watch_pipe[0] != -1))
858 while (read(h->watch_pipe[0], &c, 1) != 1)
859 continue;
860 }
861
862 /* Find out what node change was on (will block if nothing pending).
863 * Returns array of two pointers: path and token, or NULL.
864 * Call free() after use.
865 */
read_watch_internal(struct xs_handle * h,unsigned int * num,int nonblocking)866 static char **read_watch_internal(struct xs_handle *h, unsigned int *num,
867 int nonblocking)
868 {
869 struct xs_stored_msg *msg;
870 char **ret, *strings;
871 unsigned int num_strings, i;
872
873 mutex_lock(&h->watch_mutex);
874
875 #ifdef USE_PTHREAD
876 /* Wait on the condition variable for a watch to fire.
877 * If the reader thread doesn't exist yet, then that's because
878 * we haven't called xs_watch. Presumably the application
879 * will do so later; in the meantime we just block.
880 */
881 while (list_empty(&h->watch_list) && h->fd != -1) {
882 if (nonblocking) {
883 mutex_unlock(&h->watch_mutex);
884 errno = EAGAIN;
885 return 0;
886 }
887 condvar_wait(&h->watch_condvar, &h->watch_mutex);
888 }
889 #else /* !defined(USE_PTHREAD) */
890 /* Read from comms channel ourselves if there are no threads
891 * and therefore no reader thread. */
892
893 assert(!read_thread_exists(h)); /* not threadsafe but worth a check */
894 if ((read_message(h, nonblocking) == -1))
895 return NULL;
896
897 #endif /* !defined(USE_PTHREAD) */
898
899 if (list_empty(&h->watch_list)) {
900 mutex_unlock(&h->watch_mutex);
901 errno = EINVAL;
902 return NULL;
903 }
904 msg = list_top(&h->watch_list, struct xs_stored_msg, list);
905 list_del(&msg->list);
906
907 xs_maybe_clear_watch_pipe(h);
908 mutex_unlock(&h->watch_mutex);
909
910 assert(msg->hdr.type == XS_WATCH_EVENT);
911
912 strings = msg->body;
913 num_strings = xs_count_strings(strings, msg->hdr.len);
914
915 ret = malloc(sizeof(char*) * num_strings + msg->hdr.len);
916 if (!ret) {
917 free_no_errno(strings);
918 free_no_errno(msg);
919 return NULL;
920 }
921
922 ret[0] = (char *)(ret + num_strings);
923 memcpy(ret[0], strings, msg->hdr.len);
924
925 free(strings);
926 free(msg);
927
928 for (i = 1; i < num_strings; i++)
929 ret[i] = ret[i - 1] + strlen(ret[i - 1]) + 1;
930
931 *num = num_strings;
932
933 return ret;
934 }
935
xs_check_watch(struct xs_handle * h)936 char **xs_check_watch(struct xs_handle *h)
937 {
938 unsigned int num;
939 char **ret;
940 ret = read_watch_internal(h, &num, 1);
941 if (ret) assert(num >= 2);
942 return ret;
943 }
944
945 /* Find out what node change was on (will block if nothing pending).
946 * Returns array of two pointers: path and token, or NULL.
947 * Call free() after use.
948 */
xs_read_watch(struct xs_handle * h,unsigned int * num)949 char **xs_read_watch(struct xs_handle *h, unsigned int *num)
950 {
951 return read_watch_internal(h, num, 0);
952 }
953
954 /* Remove a watch on a node.
955 * Returns false on failure (no watch on that node).
956 */
xs_unwatch(struct xs_handle * h,const char * path,const char * token)957 bool xs_unwatch(struct xs_handle *h, const char *path, const char *token)
958 {
959 struct iovec iov[2];
960 struct xs_stored_msg *msg, *tmsg;
961 bool res;
962 char *s, *p;
963 unsigned int i;
964 char *l_token, *l_path;
965
966 iov[0].iov_base = (char *)path;
967 iov[0].iov_len = strlen(path) + 1;
968 iov[1].iov_base = (char *)token;
969 iov[1].iov_len = strlen(token) + 1;
970
971 res = xs_bool(xs_talkv(h, XBT_NULL, XS_UNWATCH, iov,
972 ARRAY_SIZE(iov), NULL));
973
974 if (!h->unwatch_filter) /* Don't filter the watch list */
975 return res;
976
977
978 /* Filter the watch list to remove potential message */
979 mutex_lock(&h->watch_mutex);
980
981 if (list_empty(&h->watch_list)) {
982 mutex_unlock(&h->watch_mutex);
983 return res;
984 }
985
986 list_for_each_entry_safe(msg, tmsg, &h->watch_list, list) {
987 assert(msg->hdr.type == XS_WATCH_EVENT);
988
989 s = msg->body;
990
991 l_token = NULL;
992 l_path = NULL;
993
994 for (p = s, i = 0; p < msg->body + msg->hdr.len; p++) {
995 if (*p == '\0')
996 {
997 if (i == XS_WATCH_TOKEN)
998 l_token = s;
999 else if (i == XS_WATCH_PATH)
1000 l_path = s;
1001 i++;
1002 s = p + 1;
1003 }
1004 }
1005
1006 if (l_token && !strcmp(token, l_token) &&
1007 l_path && xs_path_is_subpath(path, l_path)) {
1008 list_del(&msg->list);
1009 free(msg);
1010 }
1011 }
1012
1013 xs_maybe_clear_watch_pipe(h);
1014
1015 mutex_unlock(&h->watch_mutex);
1016
1017 return res;
1018 }
1019
1020 /* Start a transaction: changes by others will not be seen during this
1021 * transaction, and changes will not be visible to others until end.
1022 * Returns XBT_NULL on failure.
1023 */
xs_transaction_start(struct xs_handle * h)1024 xs_transaction_t xs_transaction_start(struct xs_handle *h)
1025 {
1026 char *id_str;
1027 xs_transaction_t id;
1028
1029 id_str = xs_single(h, XBT_NULL, XS_TRANSACTION_START, "", NULL);
1030 if (id_str == NULL)
1031 return XBT_NULL;
1032
1033 id = strtoul(id_str, NULL, 0);
1034 free(id_str);
1035
1036 return id;
1037 }
1038
1039 /* End a transaction.
1040 * If abandon is true, transaction is discarded instead of committed.
1041 * Returns false on failure, which indicates an error: transactions will
1042 * not fail spuriously.
1043 */
xs_transaction_end(struct xs_handle * h,xs_transaction_t t,bool abort)1044 bool xs_transaction_end(struct xs_handle *h, xs_transaction_t t,
1045 bool abort)
1046 {
1047 char abortstr[2];
1048
1049 if (abort)
1050 strcpy(abortstr, "F");
1051 else
1052 strcpy(abortstr, "T");
1053
1054 return xs_bool(xs_single(h, t, XS_TRANSACTION_END, abortstr, NULL));
1055 }
1056
1057 /* Introduce a new domain.
1058 * This tells the store daemon about a shared memory page and event channel
1059 * associated with a domain: the domain uses these to communicate.
1060 */
xs_introduce_domain(struct xs_handle * h,unsigned int domid,unsigned long mfn,unsigned int eventchn)1061 bool xs_introduce_domain(struct xs_handle *h,
1062 unsigned int domid, unsigned long mfn,
1063 unsigned int eventchn)
1064 {
1065 char domid_str[MAX_STRLEN(domid)];
1066 char mfn_str[MAX_STRLEN(mfn)];
1067 char eventchn_str[MAX_STRLEN(eventchn)];
1068 struct iovec iov[3];
1069
1070 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1071 snprintf(mfn_str, sizeof(mfn_str), "%lu", mfn);
1072 snprintf(eventchn_str, sizeof(eventchn_str), "%u", eventchn);
1073
1074 iov[0].iov_base = domid_str;
1075 iov[0].iov_len = strlen(domid_str) + 1;
1076 iov[1].iov_base = mfn_str;
1077 iov[1].iov_len = strlen(mfn_str) + 1;
1078 iov[2].iov_base = eventchn_str;
1079 iov[2].iov_len = strlen(eventchn_str) + 1;
1080
1081 return xs_bool(xs_talkv(h, XBT_NULL, XS_INTRODUCE, iov,
1082 ARRAY_SIZE(iov), NULL));
1083 }
1084
xs_set_target(struct xs_handle * h,unsigned int domid,unsigned int target)1085 bool xs_set_target(struct xs_handle *h,
1086 unsigned int domid, unsigned int target)
1087 {
1088 char domid_str[MAX_STRLEN(domid)];
1089 char target_str[MAX_STRLEN(target)];
1090 struct iovec iov[2];
1091
1092 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1093 snprintf(target_str, sizeof(target_str), "%u", target);
1094
1095 iov[0].iov_base = domid_str;
1096 iov[0].iov_len = strlen(domid_str) + 1;
1097 iov[1].iov_base = target_str;
1098 iov[1].iov_len = strlen(target_str) + 1;
1099
1100 return xs_bool(xs_talkv(h, XBT_NULL, XS_SET_TARGET, iov,
1101 ARRAY_SIZE(iov), NULL));
1102 }
1103
single_with_domid(struct xs_handle * h,enum xsd_sockmsg_type type,unsigned int domid)1104 static void * single_with_domid(struct xs_handle *h,
1105 enum xsd_sockmsg_type type,
1106 unsigned int domid)
1107 {
1108 char domid_str[MAX_STRLEN(domid)];
1109
1110 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1111
1112 return xs_single(h, XBT_NULL, type, domid_str, NULL);
1113 }
1114
xs_release_domain(struct xs_handle * h,unsigned int domid)1115 bool xs_release_domain(struct xs_handle *h, unsigned int domid)
1116 {
1117 return xs_bool(single_with_domid(h, XS_RELEASE, domid));
1118 }
1119
1120 /* clear the shutdown bit for the given domain */
xs_resume_domain(struct xs_handle * h,unsigned int domid)1121 bool xs_resume_domain(struct xs_handle *h, unsigned int domid)
1122 {
1123 return xs_bool(single_with_domid(h, XS_RESUME, domid));
1124 }
1125
xs_get_domain_path(struct xs_handle * h,unsigned int domid)1126 char *xs_get_domain_path(struct xs_handle *h, unsigned int domid)
1127 {
1128 char domid_str[MAX_STRLEN(domid)];
1129
1130 snprintf(domid_str, sizeof(domid_str), "%u", domid);
1131
1132 return xs_single(h, XBT_NULL, XS_GET_DOMAIN_PATH, domid_str, NULL);
1133 }
1134
xs_path_is_subpath(const char * parent,const char * child)1135 bool xs_path_is_subpath(const char *parent, const char *child)
1136 {
1137 size_t childlen = strlen(child);
1138 size_t parentlen = strlen(parent);
1139
1140 if (childlen < parentlen)
1141 return false;
1142
1143 if (memcmp(child, parent, parentlen))
1144 return false;
1145
1146 if (childlen > parentlen && child[parentlen] != '/')
1147 return false;
1148
1149 return true;
1150 }
1151
xs_is_domain_introduced(struct xs_handle * h,unsigned int domid)1152 bool xs_is_domain_introduced(struct xs_handle *h, unsigned int domid)
1153 {
1154 char *domain = single_with_domid(h, XS_IS_DOMAIN_INTRODUCED, domid);
1155 int rc = strcmp("F", domain);
1156
1157 free(domain);
1158 return rc;
1159 }
1160
xs_suspend_evtchn_port(int domid)1161 int xs_suspend_evtchn_port(int domid)
1162 {
1163 char path[128];
1164 char *portstr;
1165 int port;
1166 unsigned int plen;
1167 struct xs_handle *xs;
1168
1169 xs = xs_daemon_open();
1170 if (!xs)
1171 return -1;
1172
1173 sprintf(path, "/local/domain/%d/device/suspend/event-channel", domid);
1174 portstr = xs_read(xs, XBT_NULL, path, &plen);
1175 xs_daemon_close(xs);
1176
1177 if (!portstr || !plen) {
1178 port = -1;
1179 goto out;
1180 }
1181
1182 port = atoi(portstr);
1183
1184 out:
1185 free(portstr);
1186 return port;
1187 }
1188
xs_control_command(struct xs_handle * h,const char * cmd,void * data,unsigned int len)1189 char *xs_control_command(struct xs_handle *h, const char *cmd,
1190 void *data, unsigned int len)
1191 {
1192 struct iovec iov[2];
1193
1194 iov[0].iov_base = (void *)cmd;
1195 iov[0].iov_len = strlen(cmd) + 1;
1196 iov[1].iov_base = data;
1197 iov[1].iov_len = len;
1198
1199 return xs_talkv(h, XBT_NULL, XS_CONTROL, iov,
1200 ARRAY_SIZE(iov), NULL);
1201 }
1202
xs_debug_command(struct xs_handle * h,const char * cmd,void * data,unsigned int len)1203 char *xs_debug_command(struct xs_handle *h, const char *cmd,
1204 void *data, unsigned int len)
1205 {
1206 return xs_control_command(h, cmd, data, len);
1207 }
1208
read_message(struct xs_handle * h,int nonblocking)1209 static int read_message(struct xs_handle *h, int nonblocking)
1210 {
1211 /* IMPORTANT: It is forbidden to call this function without
1212 * acquiring the request lock and checking that h->read_thr_exists
1213 * is false. See "Lock discipline" in struct xs_handle, above. */
1214
1215 /* If nonblocking==1, this function will always read either
1216 * nothing, returning -1 and setting errno==EAGAIN, or we read
1217 * whole amount requested. Ie as soon as we have the start of
1218 * the message we block until we get all of it.
1219 */
1220
1221 struct xs_stored_msg *msg = NULL;
1222 char *body = NULL;
1223 int saved_errno = 0;
1224 int ret = -1;
1225
1226 /* Allocate message structure and read the message header. */
1227 msg = malloc(sizeof(*msg));
1228 if (msg == NULL)
1229 goto error;
1230 cleanup_push_heap(msg);
1231 if (!read_all(h->fd, &msg->hdr, sizeof(msg->hdr), nonblocking)) { /* Cancellation point */
1232 saved_errno = errno;
1233 goto error_freemsg;
1234 }
1235
1236 /* Sanity check message body length. */
1237 if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
1238 saved_errno = E2BIG;
1239 goto error_freemsg;
1240 }
1241
1242 /* Allocate and read the message body. */
1243 body = msg->body = malloc(msg->hdr.len + 1);
1244 if (body == NULL)
1245 goto error_freemsg;
1246 cleanup_push_heap(body);
1247 if (!read_all(h->fd, body, msg->hdr.len, 0)) { /* Cancellation point */
1248 saved_errno = errno;
1249 goto error_freebody;
1250 }
1251
1252 body[msg->hdr.len] = '\0';
1253
1254 if (msg->hdr.type == XS_WATCH_EVENT) {
1255 mutex_lock(&h->watch_mutex);
1256 cleanup_push(pthread_mutex_unlock, &h->watch_mutex);
1257
1258 /* Kick users out of their select() loop. */
1259 if (list_empty(&h->watch_list) &&
1260 (h->watch_pipe[1] != -1))
1261 while (write(h->watch_pipe[1], body, 1) != 1) /* Cancellation point */
1262 continue;
1263
1264 list_add_tail(&msg->list, &h->watch_list);
1265
1266 condvar_signal(&h->watch_condvar);
1267
1268 cleanup_pop(1);
1269 } else {
1270 mutex_lock(&h->reply_mutex);
1271
1272 /* There should only ever be one response pending! */
1273 if (!list_empty(&h->reply_list)) {
1274 mutex_unlock(&h->reply_mutex);
1275 saved_errno = EEXIST;
1276 goto error_freebody;
1277 }
1278
1279 list_add_tail(&msg->list, &h->reply_list);
1280 condvar_signal(&h->reply_condvar);
1281
1282 mutex_unlock(&h->reply_mutex);
1283 }
1284
1285 ret = 0;
1286
1287 error_freebody:
1288 cleanup_pop_heap(ret == -1, body);
1289 error_freemsg:
1290 cleanup_pop_heap(ret == -1, msg);
1291 error:
1292 errno = saved_errno;
1293
1294 return ret;
1295 }
1296
1297 #ifdef USE_PTHREAD
read_thread(void * arg)1298 static void *read_thread(void *arg)
1299 {
1300 struct xs_handle *h = arg;
1301 int fd;
1302
1303 while (read_message(h, 0) != -1)
1304 continue;
1305
1306 /* An error return from read_message leaves the socket in an undefined
1307 * state; we might have read only the header and not the message after
1308 * it, or (more commonly) the other end has closed the connection.
1309 * Since further communication is unsafe, close the socket.
1310 */
1311 fd = h->fd;
1312 h->fd = -1;
1313 close(fd);
1314
1315 /* wake up all waiters */
1316 pthread_mutex_lock(&h->reply_mutex);
1317 pthread_cond_broadcast(&h->reply_condvar);
1318 pthread_mutex_unlock(&h->reply_mutex);
1319
1320 pthread_mutex_lock(&h->watch_mutex);
1321 pthread_cond_broadcast(&h->watch_condvar);
1322 pthread_mutex_unlock(&h->watch_mutex);
1323
1324 return NULL;
1325 }
1326 #endif
1327
expanding_buffer_ensure(struct expanding_buffer * ebuf,int min_avail)1328 char *expanding_buffer_ensure(struct expanding_buffer *ebuf, int min_avail)
1329 {
1330 int want;
1331 char *got;
1332
1333 if (ebuf->avail >= min_avail)
1334 return ebuf->buf;
1335
1336 if (min_avail >= INT_MAX/3)
1337 return 0;
1338
1339 want = ebuf->avail + min_avail + 10;
1340 got = realloc(ebuf->buf, want);
1341 if (!got)
1342 return 0;
1343
1344 ebuf->buf = got;
1345 ebuf->avail = want;
1346 return ebuf->buf;
1347 }
1348
sanitise_value(struct expanding_buffer * ebuf,const char * val,unsigned len)1349 char *sanitise_value(struct expanding_buffer *ebuf,
1350 const char *val, unsigned len)
1351 {
1352 int used, remain, c;
1353 unsigned char *ip;
1354
1355 #define ADD(c) (ebuf->buf[used++] = (c))
1356 #define ADDF(f,c) (used += sprintf(ebuf->buf+used, (f), (c)))
1357
1358 assert(len < INT_MAX/5);
1359
1360 ip = (unsigned char *)val;
1361 used = 0;
1362 remain = len;
1363
1364 if (!expanding_buffer_ensure(ebuf, remain + 1))
1365 return NULL;
1366
1367 while (remain-- > 0) {
1368 c= *ip++;
1369
1370 if (c >= ' ' && c <= '~' && c != '\\') {
1371 ADD(c);
1372 continue;
1373 }
1374
1375 if (!expanding_buffer_ensure(ebuf, used + remain + 5))
1376 /* for "<used>\\nnn<remain>\0" */
1377 return 0;
1378
1379 ADD('\\');
1380 switch (c) {
1381 case '\t': ADD('t'); break;
1382 case '\n': ADD('n'); break;
1383 case '\r': ADD('r'); break;
1384 case '\\': ADD('\\'); break;
1385 default:
1386 if (c < 010) ADDF("%03o", c);
1387 else ADDF("x%02x", c);
1388 }
1389 }
1390
1391 ADD(0);
1392 assert(used <= ebuf->avail);
1393 return ebuf->buf;
1394
1395 #undef ADD
1396 #undef ADDF
1397 }
1398
unsanitise_value(char * out,unsigned * out_len_r,const char * in)1399 void unsanitise_value(char *out, unsigned *out_len_r, const char *in)
1400 {
1401 const char *ip;
1402 char *op;
1403 unsigned c;
1404 int n;
1405
1406 for (ip = in, op = out; (c = *ip++); *op++ = c) {
1407 if (c == '\\') {
1408 c = *ip++;
1409
1410 #define GETF(f) do { \
1411 n = 0; \
1412 sscanf(ip, f "%n", &c, &n); \
1413 ip += n; \
1414 } while (0)
1415
1416 switch (c) {
1417 case 't': c= '\t'; break;
1418 case 'n': c= '\n'; break;
1419 case 'r': c= '\r'; break;
1420 case '\\': c= '\\'; break;
1421 case 'x': GETF("%2x"); break;
1422 case '0': case '4':
1423 case '1': case '5':
1424 case '2': case '6':
1425 case '3': case '7': --ip; GETF("%3o"); break;
1426 case 0: --ip; break;
1427 default:;
1428 }
1429 #undef GETF
1430 }
1431 }
1432
1433 *op = 0;
1434
1435 if (out_len_r)
1436 *out_len_r = op - out;
1437 }
1438
1439 /*
1440 * Local variables:
1441 * c-file-style: "linux"
1442 * indent-tabs-mode: t
1443 * c-indent-level: 8
1444 * c-basic-offset: 8
1445 * tab-width: 8
1446 * End:
1447 */
1448