1 /*
2  * Project Acrn
3  * Acrn-dm: pm-vuart
4  *
5  * Copyright (C) 2019-2022 Intel Corporation.
6  *
7  * SPDX-License-Identifier: BSD-3-Clause
8  *
9  */
10 
11 /* vuart can be used communication between Service VM and User VM, here it is used as power manager control. */
12 
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <stdbool.h>
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <assert.h>
20 #include <pthread.h>
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <netinet/in.h>
24 #include <arpa/inet.h>
25 #include <termios.h>
26 #include <errno.h>
27 
28 #include "vmmapi.h"
29 #include "monitor.h"
30 #include "pty_vuart.h"
31 #include "log.h"
32 
33 #define SHUTDOWN_CMD  "shutdown"
34 #define CMD_LEN 16
35 #define MAX_NODE_PATH  128
36 #define SERVICE_VM_SOCKET_PORT 0x2000
37 
38 static const char * const node_name[] = {
39 	"pty",
40 	"tty",
41 };
42 
43 enum node_type_t {
44 	PTY_NODE,
45 	TTY_NODE,
46 	MAX_NODE_CNT,
47 };
48 
49 static bool allow_trigger_s5;
50 static uint8_t node_index = MAX_NODE_CNT;
51 static char node_path[MAX_NODE_PATH];
52 static int node_fd = -1;
53 static int socket_fd = -1;
54 static pthread_t pm_monitor_thread;
55 static pthread_mutex_t pm_vuart_lock = PTHREAD_MUTEX_INITIALIZER;
56 
57 static int vm_stop_handler(void *arg);
58 static struct monitor_vm_ops vm_ops = {
59 	.stop = vm_stop_handler,
60 };
61 
62 /* it read from vuart, and if end is '\0' or '\n' or len = buff-len it will return */
read_bytes(int fd,uint8_t * buffer,int buf_len,int * count,bool * eof)63 static bool read_bytes(int fd, uint8_t *buffer, int buf_len, int *count, bool *eof)
64 {
65 	bool ready = false;
66 	int rc = -1;
67 
68 	if (buf_len <= (*count)) {
69 		*count = buf_len;
70 		ready = true;
71 		goto out;
72 	}
73 
74 	do {
75 		rc = read(fd, buffer + (*count), buf_len - (*count));
76 		if (rc > 0) {
77 			*count += rc;
78 			if ((buffer[*count - 1] == '\0') || (buffer[*count - 1] == '\n') ||
79 					(*count == buf_len))
80 				ready = true;
81 		}
82 	} while (rc > 0 && !ready);
83 
84 out:
85 	*eof = (rc == 0);
86 	return ready;
87 }
88 
pm_setup_socket(void)89 static int pm_setup_socket(void)
90 {
91 	struct sockaddr_in socket_addr;
92 
93 	socket_fd = socket(AF_INET, SOCK_STREAM, 0);
94 	if (socket_fd == -1) {
95 		pr_err("create a socket endpoint error\n");
96 		return -1;
97 	}
98 
99 	memset(&socket_addr, 0, sizeof(struct sockaddr_in));
100 	socket_addr.sin_family = AF_INET;
101 	socket_addr.sin_port = htons(SERVICE_VM_SOCKET_PORT);
102 	socket_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
103 
104 	if (connect(socket_fd, (struct sockaddr *)&socket_addr, sizeof(socket_addr)) == -1) {
105 		pr_err("initiate a connection on a socket error\n");
106 		close(socket_fd);
107 		socket_fd = -1;
108 		return -1;
109 	}
110 
111 	return 0;
112 }
113 
pm_monitor_loop(void * arg)114 static void *pm_monitor_loop(void *arg)
115 {
116 	int rc;
117 	bool eof;
118 	char buf_node[CMD_LEN+1], buf_socket[CMD_LEN+1];
119 	int max_fd, count_node = 0, count_socket = 0;
120 	fd_set read_fd;
121 
122 	buf_node[CMD_LEN] = buf_socket[CMD_LEN] = '\0';
123 	max_fd = (socket_fd > node_fd) ? (socket_fd + 1) : (node_fd + 1);
124 
125 	while (1) {
126 		FD_ZERO(&read_fd);
127 		FD_SET(socket_fd, &read_fd);
128 		FD_SET(node_fd, &read_fd);
129 
130 		rc = select(max_fd, &read_fd, NULL, NULL, NULL);
131 		if (rc > 0) {
132 			if (FD_ISSET(node_fd, &read_fd)) {
133 				if (read_bytes(node_fd, (uint8_t *)buf_node, CMD_LEN,
134 						&count_node, &eof)) {
135 					pr_info("Received msg[%s] from User VM, count=%d\r\n",
136 						buf_node, count_node);
137 					rc = write(socket_fd, buf_node, count_node);
138 
139 					if (rc != count_node) {
140 						pr_err("%s:%u: write error ret_val = %d\r\n",
141 							__func__, __LINE__, rc);
142 						break;
143 					}
144 					count_node = 0;
145 				}
146 			}
147 			if (FD_ISSET(socket_fd, &read_fd)) {
148 				if (read_bytes(socket_fd, (uint8_t *)buf_socket, CMD_LEN,
149 						&count_socket, &eof)) {
150 					pr_info("Received msg[%s] from life_mngr on Service VM, count=%d\r\n",
151 						buf_socket, count_socket);
152 					pthread_mutex_lock(&pm_vuart_lock);
153 					rc = write(node_fd, buf_socket, count_socket);
154 					pthread_mutex_unlock(&pm_vuart_lock);
155 
156 					if (rc != count_socket) {
157 						pr_err("%s:%u: write error ret_val = %d\r\n",
158 							__func__, __LINE__, rc);
159 						break;
160 					}
161 					count_socket = 0;
162 				} else if (eof) {
163 					pr_err("socket connection to life-cycle manager closed\n");
164 					break;
165 				}
166 			}
167 		}
168 	}
169 
170 	/* power off this VM if we get here */
171 	raise(SIGHUP);
172 	/* cleanup will be done in pm_by_vuart_deinit() */
173 	return NULL;
174 }
175 
start_pm_monitor_thread(void)176 static int start_pm_monitor_thread(void)
177 {
178 	int ret;
179 
180 	if (pm_setup_socket()) {
181 		pr_err("create socket to connect life-cycle manager failed\n");
182 		return -1;
183 	}
184 
185 	if ((ret = pthread_create(&pm_monitor_thread, NULL, pm_monitor_loop, NULL))) {
186 		pr_err("%s: pthread_create error: %s\n", __func__, strerror(ret));
187 		close(socket_fd);
188 		socket_fd = -1;
189 		return -1;
190 	}
191 
192 	pthread_setname_np(pm_monitor_thread, "pm_monitor");
193 	return 0;
194 }
195 
196 /*
197  * --pm_vuart configuration is in the following 2 forms:
198  * A: pty-link, like: pty,/run/acrn/vuart-vm1, (also set it in -l com2,/run/acrn/vuart-vm1)
199  * the Service VM and User VM will communicate by: (Service VM):pty-link-node <--> (Service VM):com2 <--> (User VM): /dev/ttyS1
200  * B: tty-node, like: tty,/dev/ttyS1, (Service VM) and (User VM) communicate by: (Service VM):ttyS1 <--> HV <-->(User VM):ttySn
201  */
parse_pm_by_vuart(const char * opts)202 int parse_pm_by_vuart(const char *opts)
203 {
204 	int i, error = -1;
205 	char *str, *cpy, *type;
206 
207 	str = cpy = strdup(opts);
208 	if (!str) {
209 		pr_err("Function strdup return %d in %s line %d!\n", errno, __func__, __LINE__);
210 		return error;
211 	}
212 	type = strsep(&str, ",");
213 
214 	if (type != NULL) {
215 		for (i = 0; i < MAX_NODE_CNT; i++) {
216 			if (strcasecmp(type, node_name[i]) == 0) {
217 				node_index = i;
218 				error = 0;
219 				break;
220 			}
221 		}
222 	}
223 
224 	pr_dbg("pm by vuart node-index = %d\n", node_index);
225 	strncpy(node_path, str, MAX_NODE_PATH - 1);
226 
227 	free(cpy);
228 	return error;
229 }
230 
set_tty_attr(int fd,int speed)231 static int set_tty_attr(int fd, int speed)
232 {
233 	struct termios tty;
234 
235 	if (tcgetattr(fd, &tty) < 0) {
236 		pr_err("error from tcgetattr\n");
237 		return -1;
238 	}
239 	cfsetospeed(&tty, (speed_t)speed);
240 	cfsetispeed(&tty, (speed_t)speed);
241 
242 	/* set input-mode */
243 	tty.c_iflag &= ~(IGNBRK | BRKINT | PARMRK |
244 			ISTRIP | INLCR | IGNCR | ICRNL | IXON);
245 	/* set output-mode */
246 	tty.c_oflag &= ~OPOST;
247 
248 	/* set control-mode */
249 	tty.c_cflag |= (CLOCAL | CREAD | CS8);
250 	tty.c_cflag &= ~(CSIZE | PARENB | CSTOPB | CRTSCTS);
251 
252 	/* set local-mode */
253 	tty.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
254 
255 	/* block until one char read, set next char's timeout */
256 	tty.c_cc[VMIN] = 1;
257 	tty.c_cc[VTIME] = 1;
258 
259 	tcflush(fd, TCIOFLUSH);
260 
261 	if (tcsetattr(fd, TCSANOW, &tty) != 0) {
262 		pr_err("error from tcsetattr\n");
263 		return -1;
264 	}
265 
266 	return 0;
267 }
268 
pm_by_vuart_init(struct vmctx * ctx,bool trigger_s5)269 int pm_by_vuart_init(struct vmctx *ctx, bool trigger_s5)
270 {
271 	assert(node_index < MAX_NODE_CNT);
272 
273 	allow_trigger_s5 = trigger_s5;
274 	pr_info("%s: allow_trigger_s5: %u, idx: %u, path: %s\r\n",
275 			__func__, trigger_s5, node_index, node_path);
276 
277 	if (node_index == PTY_NODE)
278 		node_fd = pty_open_virtual_uart(node_path);
279 	else if (node_index == TTY_NODE)
280 		node_fd = open(node_path, O_RDWR | O_NOCTTY | O_NONBLOCK);
281 
282 	if (node_fd >= 0) {
283 		if (node_index == TTY_NODE)
284 			set_tty_attr(node_fd, B115200);
285 		if (monitor_register_vm_ops(&vm_ops, ctx, "pm-vuart") < 0) {
286 			pr_err("%s: pm-vuart register to VM monitor failed\n", node_path);
287 			close(node_fd);
288 			node_fd = -1;
289 			return -1;
290 		}
291 	} else {
292 		pr_err("%s open failed, fd=%d\n", node_path, node_fd);
293 		return -1;
294 	}
295 
296 	if (trigger_s5 && start_pm_monitor_thread()) {
297 		close(node_fd);
298 		node_fd = -1;
299 		return -1;
300 	}
301 
302 	return 0;
303 }
304 
pm_by_vuart_deinit(struct vmctx * ctx)305 void pm_by_vuart_deinit(struct vmctx *ctx)
306 {
307 	if (allow_trigger_s5) {
308 		pthread_cancel(pm_monitor_thread);
309 		pthread_join(pm_monitor_thread, NULL);
310 		close(socket_fd);
311 		socket_fd = -1;
312 	}
313 	close(node_fd);
314 	node_fd = -1;
315 }
316 
317 /* called when acrn-dm receive stop command */
vm_stop_handler(void * arg)318 static int vm_stop_handler(void *arg)
319 {
320 	int ret;
321 
322 	pr_info("pm-vuart stop handler called: node-index=%d\n", node_index);
323 	assert(node_index < MAX_NODE_CNT);
324 
325 	if (node_fd <= 0) {
326 		pr_err("no vuart node opened!\n");
327 		return -1;
328 	}
329 
330 	pthread_mutex_lock(&pm_vuart_lock);
331 	ret = write(node_fd, SHUTDOWN_CMD, sizeof(SHUTDOWN_CMD));
332 	pthread_mutex_unlock(&pm_vuart_lock);
333 	if (ret != sizeof(SHUTDOWN_CMD)) {
334 		/* no need to resend shutdown here, will resend in pm_monitor thread */
335 		pr_err("send shutdown command to User VM failed\r\n");
336 	}
337 
338 	return 0;
339 }
340