1 /*
2  * Copyright (C) 2018-2023 Intel Corporation.
3  * SPDX-License-Identifier: BSD-3-Clause
4  */
5 
6 #include <errno.h>
7 #include <stdlib.h>
8 #include <stdio.h>
9 #include <stdbool.h>
10 #include <fcntl.h>
11 #include <unistd.h>
12 #include <sys/epoll.h>
13 #include <sys/queue.h>
14 #include <pthread.h>
15 #include <sys/ioctl.h>
16 #include <sys/eventfd.h>
17 #include <acrn_common.h>
18 
19 #include "vm_event.h"
20 #include "hsm_ioctl_defs.h"
21 #include "sbuf.h"
22 #include "log.h"
23 #include <cjson/cJSON.h>
24 #include "monitor.h"
25 #include "timer.h"
26 
27 #define VM_EVENT_ELE_SIZE (sizeof(struct vm_event))
28 
29 #define HV_VM_EVENT_TUNNEL 0
30 #define DM_VM_EVENT_TUNNEL 1
31 #define MAX_VM_EVENT_TUNNELS 2
32 #define MAX_EPOLL_EVENTS MAX_VM_EVENT_TUNNELS
33 
34 #define THROTTLE_WINDOW	1U /* time window for throttle counter, in secs*/
35 
36 #define BROKEN_TIME ((time_t)-1)
37 
38 typedef void (*vm_event_handler)(struct vmctx *ctx, struct vm_event *event);
39 typedef void (*vm_event_generate_jdata)(cJSON *event_obj, struct vm_event *event);
40 
41 static int epoll_fd;
42 static bool started = false;
43 static char hv_vm_event_page[4096] __aligned(4096);
44 static char dm_vm_event_page[4096] __aligned(4096);
45 static pthread_t vm_event_tid;
46 
47 static void general_event_handler(struct vmctx *ctx, struct vm_event *event);
48 static void rtc_chg_event_handler(struct vmctx *ctx, struct vm_event *event);
49 
50 static void gen_rtc_chg_jdata(cJSON *event_obj, struct vm_event *event);
51 
52 enum event_source_type {
53 	EVENT_SOURCE_TYPE_HV,
54 	EVENT_SOURCE_TYPE_DM,
55 };
56 
57 struct vm_event_tunnel {
58 	enum event_source_type type;
59 	struct shared_buf *sbuf;
60 	uint32_t sbuf_size;
61 	int kick_fd;
62 	pthread_mutex_t mtx;
63 	bool enabled;
64 };
65 
66 struct event_throttle_ctl {
67 	struct acrn_timer timer;
68 	pthread_mutex_t mtx;
69 	uint32_t event_counter;
70 	uint32_t throttle_count;	/* how many events has been throttled(dropped) */
71 	bool	is_up;
72 };
73 
74 struct vm_event_proc {
75 	vm_event_handler ve_handler;
76 	uint32_t	throttle_rate; /* how many events allowed per sec */
77 	struct event_throttle_ctl throttle_ctl;
78 	vm_event_generate_jdata gen_jdata_handler; /* how to transtfer vm_event data to json txt */
79 };
80 
81 static struct vm_event_proc ve_proc[VM_EVENT_COUNT] = {
82 	[VM_EVENT_RTC_CHG] = {
83 		.ve_handler = rtc_chg_event_handler,
84 		.gen_jdata_handler = gen_rtc_chg_jdata,
85 		.throttle_rate = 1,
86 	},
87 	[VM_EVENT_POWEROFF] = {
88 		.ve_handler = general_event_handler,
89 		.gen_jdata_handler = NULL,
90 		.throttle_rate = 1,
91 	},
92 	[VM_EVENT_TRIPLE_FAULT] = {
93 		.ve_handler = general_event_handler,
94 		.gen_jdata_handler = NULL,
95 		.throttle_rate = 1,
96 	},
97 };
98 
get_vm_event_proc(struct vm_event * event)99 static inline struct vm_event_proc *get_vm_event_proc(struct vm_event *event)
100 {
101 	struct vm_event_proc *proc = NULL;
102 	if (event->type < VM_EVENT_COUNT) {
103 		proc = &ve_proc[event->type];
104 	}
105 	return proc;
106 }
107 
event_throttle(struct vm_event * event)108 static bool event_throttle(struct vm_event *event)
109 {
110 	struct vm_event_proc *proc;
111 	struct event_throttle_ctl *ctl;
112 	uint32_t current_rate;
113 	bool ret = false;
114 
115 	proc = get_vm_event_proc(event);
116 	if (proc) {
117 		ctl = &proc->throttle_ctl;
118 		if (ctl->is_up) {
119 			pthread_mutex_lock(&ctl->mtx);
120 			current_rate = ctl->event_counter / THROTTLE_WINDOW;
121 			if (current_rate < proc->throttle_rate) {
122 				ctl->event_counter++;
123 				ret = false;
124 			} else {
125 				ret = true;
126 				ctl->throttle_count++;
127 				pr_notice("event %d throttle: %d dropped\n",
128 					event->type, ctl->throttle_count);
129 			}
130 			pthread_mutex_unlock(&ctl->mtx);
131 		}
132 	}
133 	return ret;
134 }
135 
throttle_timer_cb(void * arg,uint64_t nexp)136 void throttle_timer_cb(void *arg, uint64_t nexp)
137 {
138 	struct event_throttle_ctl *ctl = (struct event_throttle_ctl *)arg;
139 	pthread_mutex_lock(&ctl->mtx);
140 	ctl->event_counter = 0;
141 	pthread_mutex_unlock(&ctl->mtx);
142 }
143 
vm_event_throttle_init(struct vmctx * ctx)144 static void vm_event_throttle_init(struct vmctx *ctx)
145 {
146 	int i;
147 	struct event_throttle_ctl *ctl;
148 	int ret = 0;
149 	struct itimerspec timer_spec;
150 
151 	for (i = 0; i < ARRAY_SIZE(ve_proc); i++) {
152 		ctl = &ve_proc[i].throttle_ctl;
153 		ctl->event_counter = 0U;
154 		ctl->throttle_count = 0U;
155 		ctl->is_up = false;
156 		pthread_mutex_init(&ctl->mtx, NULL);
157 		ctl->timer.clockid = CLOCK_MONOTONIC;
158 		ret = acrn_timer_init(&ctl->timer, throttle_timer_cb, ctl);
159 		if (ret < 0) {
160 			pr_warn("failed to create timer for vm_event %d, throttle disabled\n", i);
161 			continue;
162 		}
163 		timer_spec.it_value.tv_sec = THROTTLE_WINDOW;
164 		timer_spec.it_value.tv_nsec = 0;
165 		timer_spec.it_interval.tv_sec = THROTTLE_WINDOW;
166 		timer_spec.it_interval.tv_nsec = 0;
167 		ret = acrn_timer_settime(&ctl->timer, &timer_spec);
168 		if (ret < 0) {
169 			pr_warn("failed to set timer for vm_event %d, throttle disabled\n", i);
170 			continue;
171 		}
172 		ctl->is_up = true;
173 	}
174 }
175 
vm_event_throttle_deinit(void)176 static void vm_event_throttle_deinit(void)
177 {
178 	int i;
179 	struct event_throttle_ctl *ctl;
180 
181 	for (i = 0; i < ARRAY_SIZE(ve_proc); i++) {
182 		ctl = &ve_proc[i].throttle_ctl;
183 		if (ctl->timer.fd != -1) {
184 			acrn_timer_deinit(&ctl->timer);
185 		}
186 	}
187 }
188 
generate_vm_event_message(struct vm_event * event)189 static char *generate_vm_event_message(struct vm_event *event)
190 {
191 	char *event_msg = NULL;
192 	cJSON *val;
193 	cJSON *event_obj = cJSON_CreateObject();
194 	struct vm_event_proc *proc;
195 
196 	if (event_obj == NULL)
197 		return NULL;
198 	val = cJSON_CreateNumber(event->type);
199 	if (val == NULL)
200 		return NULL;
201 	cJSON_AddItemToObject(event_obj, "vm_event", val);
202 
203 	proc = get_vm_event_proc(event);
204 	if (proc && proc->gen_jdata_handler) {
205 		(proc->gen_jdata_handler)(event_obj, event);
206 	}
207 
208 	event_msg = cJSON_Print(event_obj);
209 	if (event_msg == NULL)
210 		fprintf(stderr, "Failed to generate vm_event message.\n");
211 
212 	cJSON_Delete(event_obj);
213 
214 	return event_msg;
215 }
216 
emit_vm_event(struct vmctx * ctx,struct vm_event * event)217 static void emit_vm_event(struct vmctx *ctx, struct vm_event *event)
218 {
219 	if (!event_throttle(event)) {
220 		char *msg = generate_vm_event_message(event);
221 		if (msg != NULL) {
222 			vm_monitor_send_vm_event(msg);
223 			free(msg);
224 		}
225 	}
226 }
227 
general_event_handler(struct vmctx * ctx,struct vm_event * event)228 static void general_event_handler(struct vmctx *ctx, struct vm_event *event)
229 {
230 	emit_vm_event(ctx, event);
231 }
232 
gen_rtc_chg_jdata(cJSON * event_obj,struct vm_event * event)233 static void gen_rtc_chg_jdata(cJSON *event_obj, struct vm_event *event)
234 {
235 	struct rtc_change_event_data *data = (struct rtc_change_event_data *)event->event_data;
236 	cJSON *val;
237 
238 	val = cJSON_CreateNumber(data->delta_time);
239 	if (val != NULL) {
240 		cJSON_AddItemToObject(event_obj, "delta_time", val);
241 	}
242 	val = cJSON_CreateNumber(data->last_time);
243 	if (val != NULL) {
244 		cJSON_AddItemToObject(event_obj, "last_time", val);
245 	}
246 }
247 
248 /* assume we only have one unique rtc source */
249 
250 static struct acrn_timer rtc_chg_event_timer = {
251 	.clockid = CLOCK_MONOTONIC,
252 };
253 static pthread_mutex_t rtc_chg_mutex = PTHREAD_MUTEX_INITIALIZER;
254 static struct timespec time_window_start;
255 static time_t last_time_cached = BROKEN_TIME;
256 static time_t delta_time_sum = 0;
257 #define RTC_CHG_WAIT_TIME 1 /* 1 second */
rtc_chg_event_handler(struct vmctx * ctx,struct vm_event * event)258 static void rtc_chg_event_handler(struct vmctx *ctx, struct vm_event *event)
259 {
260 	struct itimerspec timer_spec;
261 	struct rtc_change_event_data *data = (struct rtc_change_event_data *)event->event_data;
262 
263 	/*
264 	 * RTC time is not reliable until guest finishes updating all RTC date/time regs.
265 	 * So wait for some time, if no more change happens, we can conclude that the RTC
266 	 * change has been done.
267 	 */
268 	timer_spec.it_value.tv_sec = RTC_CHG_WAIT_TIME;
269 	timer_spec.it_value.tv_nsec = 0;
270 	timer_spec.it_interval.tv_sec = 0;
271 	timer_spec.it_interval.tv_nsec = 0;
272 	pthread_mutex_lock(&rtc_chg_mutex);
273 	if (last_time_cached == BROKEN_TIME) {
274 		last_time_cached = data->last_time;
275 	}
276 	delta_time_sum += data->delta_time;
277 	/* The last timer will be overwriten if it is not triggered yet. */
278 	acrn_timer_settime(&rtc_chg_event_timer, &timer_spec);
279 	clock_gettime(CLOCK_MONOTONIC, &time_window_start);
280 	pthread_mutex_unlock(&rtc_chg_mutex);
281 }
282 
rtc_chg_timer_cb(void * arg,uint64_t nexp)283 static void rtc_chg_timer_cb(void *arg, uint64_t nexp)
284 {
285 	struct timespec now, delta;
286 	struct timespec time_window_size = {RTC_CHG_WAIT_TIME, 0};
287 	struct vmctx *ctx = arg;
288 	struct vm_event send_event;
289 	struct rtc_change_event_data *data = (struct rtc_change_event_data *)send_event.event_data;
290 
291 	pthread_mutex_lock(&rtc_chg_mutex);
292 	clock_gettime(CLOCK_MONOTONIC, &now);
293 	delta = now;
294 	timespecsub(&delta, &time_window_start);
295 	/* possible racing problem here. make sure this is the right timer cb for the vm_event */
296 	if (timespeccmp(&delta, &time_window_size, >=)) {
297 		data->delta_time = delta_time_sum;
298 		data->last_time = last_time_cached;
299 		emit_vm_event(ctx, &send_event);
300 		last_time_cached = BROKEN_TIME;
301 		delta_time_sum = 0;
302 	}
303 	pthread_mutex_unlock(&rtc_chg_mutex);
304 }
305 
vm_event_thread(void * param)306 static void *vm_event_thread(void *param)
307 {
308 	int n, i;
309 	struct vm_event ve;
310 	eventfd_t val;
311 	struct vm_event_tunnel *tunnel;
312 	struct vmctx *ctx = param;
313 
314 	struct epoll_event eventlist[MAX_EPOLL_EVENTS];
315 
316 	while (started) {
317 		n = epoll_wait(epoll_fd, eventlist, MAX_EPOLL_EVENTS, -1);
318 		if (n < 0) {
319 			if (errno != EINTR) {
320 				pr_err("%s: epoll failed %d\n", __func__, errno);
321 			}
322 			continue;
323 		}
324 		for (i = 0; i < n; i++) {
325 			if (i < MAX_EPOLL_EVENTS) {
326 				tunnel = eventlist[i].data.ptr;
327 				eventfd_read(tunnel->kick_fd, &val);
328 				if (tunnel && tunnel->enabled) {
329 					while (!sbuf_is_empty(tunnel->sbuf)) {
330 						struct vm_event_proc *proc;
331 						sbuf_get(tunnel->sbuf, (uint8_t*)&ve);
332 						pr_dbg("%ld vm event from%d %d\n", val, tunnel->type, ve.type);
333 						proc = get_vm_event_proc(&ve);
334 						if (proc && proc->ve_handler) {
335 							(proc->ve_handler)(ctx, &ve);
336 						} else {
337 							pr_warn("%s: unhandled vm event type %d\n", __func__, ve.type);
338 						}
339 
340 					}
341 				}
342 			}
343 		}
344 	}
345 	return NULL;
346 }
347 
348 static struct vm_event_tunnel ve_tunnel[MAX_VM_EVENT_TUNNELS] = {
349 	{
350 		.type = EVENT_SOURCE_TYPE_HV,
351 		.sbuf = (struct shared_buf *)hv_vm_event_page,
352 		.sbuf_size = 4096,
353 		.enabled = false,
354 	},
355 	{
356 		.type = EVENT_SOURCE_TYPE_DM,
357 		.sbuf = (struct shared_buf *)dm_vm_event_page,
358 		.sbuf_size = 4096,
359 		.enabled = false,
360 	},
361 };
362 
create_event_tunnel(struct vmctx * ctx,struct vm_event_tunnel * tunnel,int epoll_fd)363 static int create_event_tunnel(struct vmctx *ctx, struct vm_event_tunnel *tunnel, int epoll_fd)
364 {
365 	struct epoll_event ev;
366 	enum event_source_type type = tunnel->type;
367 	struct shared_buf *sbuf = tunnel->sbuf;
368 	int kick_fd = -1;
369 	int error;
370 
371 	sbuf_init(sbuf, tunnel->sbuf_size, VM_EVENT_ELE_SIZE);
372 
373 	if (type == EVENT_SOURCE_TYPE_HV) {
374 		error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_RING, sbuf);
375 		if (error) {
376 			pr_err("%s: Setting vm_event ring failed %d\n", __func__, error);
377 			goto out;
378 		}
379 	}
380 
381 	kick_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
382 	if (kick_fd < 0) {
383 		pr_err("%s: eventfd failed %d\n", __func__, errno);
384 		goto out;
385 	}
386 
387 	if (type == EVENT_SOURCE_TYPE_HV) {
388 		error = ioctl(ctx->fd, ACRN_IOCTL_SETUP_VM_EVENT_FD, kick_fd);
389 		if (error) {
390 			pr_err("%s: Setting vm_event fd failed %d\n", __func__, error);
391 			goto out;
392 		}
393 	}
394 
395 	ev.events = EPOLLIN;
396 	ev.data.ptr = tunnel;
397 	error = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, kick_fd, &ev);
398 	if (error < 0) {
399 		pr_err("%s: failed to add fd, error is %d\n", __func__, errno);
400 		goto out;
401 	}
402 
403 	tunnel->kick_fd = kick_fd;
404 	pthread_mutex_init(&tunnel->mtx, NULL);
405 	tunnel->enabled = true;
406 
407 	return 0;
408 
409 out:
410 	if (kick_fd >= 0) {
411 		close(kick_fd);
412 	}
413 	return -1;
414 }
415 
destory_event_tunnel(struct vm_event_tunnel * tunnel)416 void destory_event_tunnel(struct vm_event_tunnel *tunnel)
417 {
418 	if (tunnel->enabled) {
419 		close(tunnel->kick_fd);
420 		tunnel->enabled = false;
421 		pthread_mutex_destroy(&tunnel->mtx);
422 	}
423 }
424 
vm_event_init(struct vmctx * ctx)425 int vm_event_init(struct vmctx *ctx)
426 {
427 	int error;
428 
429 	epoll_fd = epoll_create1(0);
430 	if (epoll_fd < 0) {
431 		pr_err("%s: failed to create epoll %d\n", __func__, errno);
432 		goto out;
433 	}
434 
435 	error = create_event_tunnel(ctx, &ve_tunnel[HV_VM_EVENT_TUNNEL], epoll_fd);
436 	if (error) {
437 		goto out;
438 	}
439 
440 	error = create_event_tunnel(ctx, &ve_tunnel[DM_VM_EVENT_TUNNEL], epoll_fd);
441 	if (error) {
442 		goto out;
443 	}
444 
445 	vm_event_throttle_init(ctx);
446 
447 	error = pthread_create(&vm_event_tid, NULL, vm_event_thread, ctx);
448 	if (error) {
449 		pr_err("%s: vm_event create failed %d\n", __func__, errno);
450 		goto out;
451 	}
452 
453 	acrn_timer_init(&rtc_chg_event_timer, rtc_chg_timer_cb, ctx);
454 
455 	started = true;
456 	return 0;
457 
458 out:
459 	if (epoll_fd >= 0) {
460 		close(epoll_fd);
461 	}
462 	destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
463 	destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
464 	return -1;
465 }
466 
vm_event_deinit(void)467 int vm_event_deinit(void)
468 {
469 	void *jval;
470 
471 	if (started) {
472 		started = false;
473 		vm_event_throttle_deinit();
474 		pthread_kill(vm_event_tid, SIGCONT);
475 		pthread_join(vm_event_tid, &jval);
476 		close(epoll_fd);
477 		destory_event_tunnel(&ve_tunnel[HV_VM_EVENT_TUNNEL]);
478 		destory_event_tunnel(&ve_tunnel[DM_VM_EVENT_TUNNEL]);
479 	}
480 	return 0;
481 }
482 
483 /* Send a dm generated vm_event by putting it to sbuf.
484  * A thread will receive and process those events.
485  * Events will be dropped if sbuf is full.
486  * They also maight be dropped due to event throttle control in receive thread.
487  */
dm_send_vm_event(struct vm_event * event)488 int dm_send_vm_event(struct vm_event *event)
489 {
490 	struct vm_event_tunnel *tunnel = &ve_tunnel[DM_VM_EVENT_TUNNEL];
491 	struct shared_buf *sbuf;
492 	int32_t ret = -1;
493 	uint32_t size_sent;
494 
495 	if (!tunnel->enabled) {
496 		return -1;
497 	}
498 	sbuf = tunnel->sbuf;
499 
500 	if (sbuf != NULL) {
501 		pthread_mutex_lock(&tunnel->mtx);
502 		size_sent = sbuf_put(sbuf, (uint8_t *)event, sizeof(*event));
503 		pthread_mutex_unlock(&tunnel->mtx);
504 		if (size_sent == VM_EVENT_ELE_SIZE) {
505 			eventfd_write(tunnel->kick_fd, 1UL);
506 			ret = 0;
507 		}
508 	}
509 	return ret;
510 }
511