1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <rtl.h>
8 #include <list.h>
9 #include <asm/lib/bits.h>
10 #include <asm/cpu.h>
11 #include <asm/per_cpu.h>
12 #include <asm/lapic.h>
13 #include <schedule.h>
14 #include <sprintf.h>
15 #include <asm/irq.h>
16 #include <trace.h>
17 
is_idle_thread(const struct thread_object * obj)18 bool is_idle_thread(const struct thread_object *obj)
19 {
20 	uint16_t pcpu_id = obj->pcpu_id;
21 	return (obj == &per_cpu(idle, pcpu_id));
22 }
23 
is_blocked(const struct thread_object * obj)24 static inline bool is_blocked(const struct thread_object *obj)
25 {
26 	return obj->status == THREAD_STS_BLOCKED;
27 }
28 
is_running(const struct thread_object * obj)29 static inline bool is_running(const struct thread_object *obj)
30 {
31 	return obj->status == THREAD_STS_RUNNING;
32 }
33 
set_thread_status(struct thread_object * obj,enum thread_object_state status)34 static inline void set_thread_status(struct thread_object *obj, enum thread_object_state status)
35 {
36 	obj->status = status;
37 }
38 
obtain_schedule_lock(uint16_t pcpu_id,uint64_t * rflag)39 void obtain_schedule_lock(uint16_t pcpu_id, uint64_t *rflag)
40 {
41 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
42 	spinlock_irqsave_obtain(&ctl->scheduler_lock, rflag);
43 }
44 
release_schedule_lock(uint16_t pcpu_id,uint64_t rflag)45 void release_schedule_lock(uint16_t pcpu_id, uint64_t rflag)
46 {
47 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
48 	spinlock_irqrestore_release(&ctl->scheduler_lock, rflag);
49 }
50 
get_scheduler(uint16_t pcpu_id)51 static struct acrn_scheduler *get_scheduler(uint16_t pcpu_id)
52 {
53 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
54 	return ctl->scheduler;
55 }
56 
57 /**
58  * @pre obj != NULL
59  */
sched_get_pcpuid(const struct thread_object * obj)60 uint16_t sched_get_pcpuid(const struct thread_object *obj)
61 {
62 	return obj->pcpu_id;
63 }
64 
init_sched(uint16_t pcpu_id)65 void init_sched(uint16_t pcpu_id)
66 {
67 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
68 
69 	per_cpu(mode_to_idle, pcpu_id) = IDLE_MODE_HLT;
70 	per_cpu(mode_to_kick_pcpu, pcpu_id) = DEL_MODE_IPI;
71 
72 	spinlock_init(&ctl->scheduler_lock);
73 	ctl->flags = 0UL;
74 	ctl->curr_obj = NULL;
75 	ctl->pcpu_id = pcpu_id;
76 #ifdef CONFIG_SCHED_NOOP
77 	ctl->scheduler = &sched_noop;
78 #endif
79 #ifdef CONFIG_SCHED_IORR
80 	ctl->scheduler = &sched_iorr;
81 #endif
82 #ifdef CONFIG_SCHED_BVT
83 	ctl->scheduler = &sched_bvt;
84 #endif
85 #ifdef CONFIG_SCHED_PRIO
86 	ctl->scheduler = &sched_prio;
87 #endif
88 	if (ctl->scheduler->init != NULL) {
89 		ctl->scheduler->init(ctl);
90 	}
91 }
92 
deinit_sched(uint16_t pcpu_id)93 void deinit_sched(uint16_t pcpu_id)
94 {
95 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
96 
97 	if (ctl->scheduler->deinit != NULL) {
98 		ctl->scheduler->deinit(ctl);
99 	}
100 }
101 
suspend_sched(void)102 void suspend_sched(void)
103 {
104 	struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID);
105 
106 	if (ctl->scheduler->suspend != NULL) {
107 		ctl->scheduler->suspend(ctl);
108 	}
109 }
110 
resume_sched(void)111 void resume_sched(void)
112 {
113 	struct sched_control *ctl = &per_cpu(sched_ctl, BSP_CPU_ID);
114 
115 	if (ctl->scheduler->resume != NULL) {
116 		ctl->scheduler->resume(ctl);
117 	}
118 }
119 
init_thread_data(struct thread_object * obj,struct sched_params * params)120 void init_thread_data(struct thread_object *obj, struct sched_params *params)
121 {
122 	struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
123 	uint64_t rflag;
124 
125 	obtain_schedule_lock(obj->pcpu_id, &rflag);
126 	if (scheduler->init_data != NULL) {
127 		scheduler->init_data(obj, params);
128 	}
129 	/* initial as BLOCKED status, so we can wake it up to run */
130 	set_thread_status(obj, THREAD_STS_BLOCKED);
131 	release_schedule_lock(obj->pcpu_id, rflag);
132 }
133 
deinit_thread_data(struct thread_object * obj)134 void deinit_thread_data(struct thread_object *obj)
135 {
136 	struct acrn_scheduler *scheduler = get_scheduler(obj->pcpu_id);
137 
138 	if (scheduler->deinit_data != NULL) {
139 		scheduler->deinit_data(obj);
140 	}
141 }
142 
sched_get_current(uint16_t pcpu_id)143 struct thread_object *sched_get_current(uint16_t pcpu_id)
144 {
145 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
146 	return ctl->curr_obj;
147 }
148 
149 /**
150  * @pre delmode == DEL_MODE_IPI || delmode == DEL_MODE_INIT
151  */
make_reschedule_request(uint16_t pcpu_id)152 void make_reschedule_request(uint16_t pcpu_id)
153 {
154 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
155 
156 	bitmap_set_lock(NEED_RESCHEDULE, &ctl->flags);
157 	if (get_pcpu_id() != pcpu_id) {
158 		kick_pcpu(pcpu_id);
159 	}
160 }
161 
need_reschedule(uint16_t pcpu_id)162 bool need_reschedule(uint16_t pcpu_id)
163 {
164 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
165 
166 	return bitmap_test(NEED_RESCHEDULE, &ctl->flags);
167 }
168 
schedule(void)169 void schedule(void)
170 {
171 	uint16_t pcpu_id = get_pcpu_id();
172 	struct sched_control *ctl = &per_cpu(sched_ctl, pcpu_id);
173 	struct thread_object *next = &per_cpu(idle, pcpu_id);
174 	struct thread_object *prev = ctl->curr_obj;
175 	uint64_t rflag;
176 	char name[16];
177 
178 	obtain_schedule_lock(pcpu_id, &rflag);
179 	if (ctl->scheduler->pick_next != NULL) {
180 		next = ctl->scheduler->pick_next(ctl);
181 	}
182 	bitmap_clear_lock(NEED_RESCHEDULE, &ctl->flags);
183 
184 	/* If we picked different sched object, switch context */
185 	if (prev != next) {
186 		if (prev != NULL) {
187 			memcpy_erms(name, prev->name, 4);
188 			memcpy_erms(name + 4, next->name, 4);
189 			memset(name + 8, 0, sizeof(name) - 8);
190 			TRACE_16STR(TRACE_SCHED_NEXT, name);
191 			if (prev->switch_out != NULL) {
192 				prev->switch_out(prev);
193 			}
194 			set_thread_status(prev, prev->be_blocking ? THREAD_STS_BLOCKED : THREAD_STS_RUNNABLE);
195 			prev->be_blocking = false;
196 		}
197 
198 		if (next->switch_in != NULL) {
199 			next->switch_in(next);
200 		}
201 		set_thread_status(next, THREAD_STS_RUNNING);
202 
203 		ctl->curr_obj = next;
204 		release_schedule_lock(pcpu_id, rflag);
205 		arch_switch_to(&prev->host_sp, &next->host_sp);
206 	} else {
207 		release_schedule_lock(pcpu_id, rflag);
208 	}
209 }
210 
sleep_thread(struct thread_object * obj)211 void sleep_thread(struct thread_object *obj)
212 {
213 	uint16_t pcpu_id = obj->pcpu_id;
214 	struct acrn_scheduler *scheduler = get_scheduler(pcpu_id);
215 	uint64_t rflag;
216 
217 	obtain_schedule_lock(pcpu_id, &rflag);
218 	if (scheduler->sleep != NULL) {
219 		scheduler->sleep(obj);
220 	}
221 	if (is_running(obj)) {
222 		make_reschedule_request(pcpu_id);
223 		obj->be_blocking = true;
224 	} else {
225 		set_thread_status(obj, THREAD_STS_BLOCKED);
226 	}
227 	release_schedule_lock(pcpu_id, rflag);
228 }
229 
sleep_thread_sync(struct thread_object * obj)230 void sleep_thread_sync(struct thread_object *obj)
231 {
232 	sleep_thread(obj);
233 	while (!is_blocked(obj)) {
234 		asm_pause();
235 	}
236 }
237 
wake_thread(struct thread_object * obj)238 void wake_thread(struct thread_object *obj)
239 {
240 	uint16_t pcpu_id = obj->pcpu_id;
241 	struct acrn_scheduler *scheduler;
242 	uint64_t rflag;
243 
244 	obtain_schedule_lock(pcpu_id, &rflag);
245 	if (is_blocked(obj) || obj->be_blocking) {
246 		scheduler = get_scheduler(pcpu_id);
247 		if (scheduler->wake != NULL) {
248 			scheduler->wake(obj);
249 		}
250 		if (is_blocked(obj)) {
251 			set_thread_status(obj, THREAD_STS_RUNNABLE);
252 			make_reschedule_request(pcpu_id);
253 		}
254 		obj->be_blocking = false;
255 	}
256 	release_schedule_lock(pcpu_id, rflag);
257 }
258 
yield_current(void)259 void yield_current(void)
260 {
261 	make_reschedule_request(get_pcpu_id());
262 }
263 
run_thread(struct thread_object * obj)264 void run_thread(struct thread_object *obj)
265 {
266 	uint64_t rflag;
267 
268 	obtain_schedule_lock(obj->pcpu_id, &rflag);
269 	get_cpu_var(sched_ctl).curr_obj = obj;
270 	set_thread_status(obj, THREAD_STS_RUNNING);
271 	release_schedule_lock(obj->pcpu_id, rflag);
272 
273 	if (obj->thread_entry != NULL) {
274 		obj->thread_entry(obj);
275 	}
276 }
277