1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <errno.h>
9 #include <asm/io.h>
10 #include <asm/msr.h>
11 #include <asm/apicreg.h>
12 #include <asm/cpuid.h>
13 #include <asm/cpu_caps.h>
14 #include <softirq.h>
15 #include <trace.h>
16 #include <asm/irq.h>
17 #include <ticks.h>
18 #include <hw/hw_timer.h>
19 
20 #define MAX_TIMER_ACTIONS	32U
21 #define MIN_TIMER_PERIOD_US	500U
22 
timer_expired(const struct hv_timer * timer,uint64_t now,uint64_t * delta)23 bool timer_expired(const struct hv_timer *timer, uint64_t now, uint64_t *delta)
24 {
25 	bool ret = true;
26 	uint64_t delt = 0UL;
27 
28 	if  ((timer->timeout != 0UL) && (now < timer->timeout)) {
29 		ret = false;
30 		delt = timer->timeout - now;
31 	}
32 
33 	if (delta != NULL) {
34 		*delta = delt;
35 	}
36 
37 	return ret;
38 }
39 
timer_is_started(const struct hv_timer * timer)40 bool timer_is_started(const struct hv_timer *timer)
41 {
42 	return (!list_empty(&timer->node));
43 }
44 
run_timer(const struct hv_timer * timer)45 static void run_timer(const struct hv_timer *timer)
46 {
47 	/* deadline = 0 means stop timer, we should skip */
48 	if ((timer->func != NULL) && (timer->timeout != 0UL)) {
49 		timer->func(timer->priv_data);
50 	}
51 
52 	TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->timeout, 0UL);
53 }
54 
update_physical_timer(struct per_cpu_timers * cpu_timer)55 static inline void update_physical_timer(struct per_cpu_timers *cpu_timer)
56 {
57 	struct hv_timer *timer = NULL;
58 
59 	/* find the next event timer */
60 	if (!list_empty(&cpu_timer->timer_list)) {
61 		timer = container_of((&cpu_timer->timer_list)->next,
62 			struct hv_timer, node);
63 
64 		/* it is okay to program a expired time */
65 		msr_write(MSR_IA32_TSC_DEADLINE, timer->timeout);
66 	}
67 }
68 
69 /*
70  * return true if we add the timer on the timer_list head
71  */
local_add_timer(struct per_cpu_timers * cpu_timer,struct hv_timer * timer)72 static bool local_add_timer(struct per_cpu_timers *cpu_timer,
73 			struct hv_timer *timer)
74 {
75 	struct list_head *pos, *prev;
76 	struct hv_timer *tmp;
77 	uint64_t tsc = timer->timeout;
78 
79 	prev = &cpu_timer->timer_list;
80 	list_for_each(pos, &cpu_timer->timer_list) {
81 		tmp = container_of(pos, struct hv_timer, node);
82 		if (tmp->timeout < tsc) {
83 			prev = &tmp->node;
84 		} else {
85 			break;
86 		}
87 	}
88 
89 	list_add(&timer->node, prev);
90 
91 	return (prev == &cpu_timer->timer_list);
92 }
93 
add_timer(struct hv_timer * timer)94 int32_t add_timer(struct hv_timer *timer)
95 {
96 	struct per_cpu_timers *cpu_timer;
97 	uint16_t pcpu_id;
98 	int32_t ret = 0;
99 	uint64_t rflags;
100 
101 	if ((timer == NULL) || (timer->func == NULL) || (timer->timeout == 0UL)) {
102 		ret = -EINVAL;
103 	} else {
104 		ASSERT(list_empty(&timer->node), "add timer again!\n");
105 
106 		/* limit minimal periodic timer cycle period */
107 		if (timer->mode == TICK_MODE_PERIODIC) {
108 			timer->period_in_cycle = max(timer->period_in_cycle, us_to_ticks(MIN_TIMER_PERIOD_US));
109 		}
110 
111 		pcpu_id  = get_pcpu_id();
112 		cpu_timer = &per_cpu(cpu_timers, pcpu_id);
113 
114 		CPU_INT_ALL_DISABLE(&rflags);
115 		/* update the physical timer if we're on the timer_list head */
116 		if (local_add_timer(cpu_timer, timer)) {
117 			update_physical_timer(cpu_timer);
118 		}
119 		CPU_INT_ALL_RESTORE(rflags);
120 
121 		TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->timeout, 0UL);
122 	}
123 
124 	return ret;
125 
126 }
127 
initialize_timer(struct hv_timer * timer,timer_handle_t func,void * priv_data,uint64_t timeout,uint64_t period_in_cycle)128 void initialize_timer(struct hv_timer *timer,
129 			timer_handle_t func, void *priv_data,
130 			uint64_t timeout, uint64_t period_in_cycle)
131 {
132 	if (timer != NULL) {
133 		timer->func = func;
134 		timer->priv_data = priv_data;
135 		timer->timeout = timeout;
136 		if (period_in_cycle > 0UL) {
137 			timer->mode = TICK_MODE_PERIODIC;
138 			timer->period_in_cycle = period_in_cycle;
139 		} else {
140 			timer->mode = TICK_MODE_ONESHOT;
141 			timer->period_in_cycle = 0UL;
142 		}
143 		INIT_LIST_HEAD(&timer->node);
144 	}
145 }
146 
update_timer(struct hv_timer * timer,uint64_t timeout,uint64_t period)147 void update_timer(struct hv_timer *timer, uint64_t timeout, uint64_t period)
148 {
149 	if (timer != NULL) {
150 		timer->timeout = timeout;
151 		if (period > 0UL) {
152 			timer->mode = TICK_MODE_PERIODIC;
153 			timer->period_in_cycle = period;
154 		} else {
155 			timer->mode = TICK_MODE_ONESHOT;
156 			timer->period_in_cycle = 0UL;
157 		}
158 	}
159 }
160 
del_timer(struct hv_timer * timer)161 void del_timer(struct hv_timer *timer)
162 {
163 	uint64_t rflags;
164 
165 	CPU_INT_ALL_DISABLE(&rflags);
166 	if ((timer != NULL) && !list_empty(&timer->node)) {
167 		list_del_init(&timer->node);
168 	}
169 	CPU_INT_ALL_RESTORE(rflags);
170 }
171 
init_percpu_timer(uint16_t pcpu_id)172 static void init_percpu_timer(uint16_t pcpu_id)
173 {
174 	struct per_cpu_timers *cpu_timer;
175 
176 	cpu_timer = &per_cpu(cpu_timers, pcpu_id);
177 	INIT_LIST_HEAD(&cpu_timer->timer_list);
178 }
179 
timer_softirq(uint16_t pcpu_id)180 static void timer_softirq(uint16_t pcpu_id)
181 {
182 	struct per_cpu_timers *cpu_timer;
183 	struct hv_timer *timer;
184 	const struct list_head *pos, *n;
185 	uint32_t tries = MAX_TIMER_ACTIONS;
186 	uint64_t current_tsc = cpu_ticks();
187 
188 	/* handle passed timer */
189 	cpu_timer = &per_cpu(cpu_timers, pcpu_id);
190 
191 	/* This is to make sure we are not blocked due to delay inside func()
192 	 * force to exit irq handler after we serviced >31 timers
193 	 * caller used to local_add_timer() for periodic timer, if there is a delay
194 	 * inside func(), it will infinitely loop here, because new added timer
195 	 * already passed due to previously func()'s delay.
196 	 */
197 	list_for_each_safe(pos, n, &cpu_timer->timer_list) {
198 		timer = container_of(pos, struct hv_timer, node);
199 		/* timer expried */
200 		tries--;
201 		if ((timer->timeout <= current_tsc) && (tries != 0U)) {
202 			del_timer(timer);
203 
204 			run_timer(timer);
205 
206 			if (timer->mode == TICK_MODE_PERIODIC) {
207 				/* update periodic timer fire tsc */
208 				timer->timeout += timer->period_in_cycle;
209 				(void)local_add_timer(cpu_timer, timer);
210 			} else {
211 				timer->timeout = 0UL;
212 			}
213 		} else {
214 			break;
215 		}
216 	}
217 
218 	/* update nearest timer */
219 	update_physical_timer(cpu_timer);
220 }
221 
timer_init(void)222 void timer_init(void)
223 {
224 	uint16_t pcpu_id = get_pcpu_id();
225 
226 	init_percpu_timer(pcpu_id);
227 
228 	if (pcpu_id == BSP_CPU_ID) {
229 		register_softirq(SOFTIRQ_TIMER, timer_softirq);
230 	}
231 
232 	init_hw_timer();
233 }
234