1 /*
2  * Copyright (C) 2019-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <list.h>
8 #include <asm/per_cpu.h>
9 #include <schedule.h>
10 #include <ticks.h>
11 
12 #define CONFIG_SLICE_MS 10UL
13 struct sched_iorr_data {
14 	/* keep list as the first item */
15 	struct list_head list;
16 
17 	uint64_t slice_cycles;
18 	uint64_t last_cycles;
19 	int64_t  left_cycles;
20 };
21 
22 /*
23  * @pre obj != NULL
24  * @pre obj->data != NULL
25  */
is_inqueue(struct thread_object * obj)26 bool is_inqueue(struct thread_object *obj)
27 {
28 	struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data;
29 	return !list_empty(&data->list);
30 }
31 
32 /*
33  * @pre obj != NULL
34  * @pre obj->data != NULL
35  * @pre obj->sched_ctl != NULL
36  * @pre obj->sched_ctl->priv != NULL
37  */
runqueue_add_head(struct thread_object * obj)38 void runqueue_add_head(struct thread_object *obj)
39 {
40 	struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)obj->sched_ctl->priv;
41 	struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data;
42 
43 	if (!is_inqueue(obj)) {
44 		list_add(&data->list, &iorr_ctl->runqueue);
45 	}
46 }
47 
48 /*
49  * @pre obj != NULL
50  * @pre obj->data != NULL
51  * @pre obj->sched_ctl != NULL
52  * @pre obj->sched_ctl->priv != NULL
53  */
runqueue_add_tail(struct thread_object * obj)54 void runqueue_add_tail(struct thread_object *obj)
55 {
56 	struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)obj->sched_ctl->priv;
57 	struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data;
58 
59 	if (!is_inqueue(obj)) {
60 		list_add_tail(&data->list, &iorr_ctl->runqueue);
61 	}
62 }
63 
64 /*
65  * @pre obj != NULL
66  * @pre obj->data != NULL
67  */
runqueue_remove(struct thread_object * obj)68 void runqueue_remove(struct thread_object *obj)
69 {
70 	struct sched_iorr_data *data = (struct sched_iorr_data *)obj->data;
71 	list_del_init(&data->list);
72 }
73 
sched_tick_handler(void * param)74 static void sched_tick_handler(void *param)
75 {
76 	struct sched_control  *ctl = (struct sched_control *)param;
77 	struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv;
78 	struct sched_iorr_data *data;
79 	struct thread_object *current;
80 	uint16_t pcpu_id = get_pcpu_id();
81 	uint64_t now = cpu_ticks();
82 	uint64_t rflags;
83 
84 	obtain_schedule_lock(pcpu_id, &rflags);
85 	current = ctl->curr_obj;
86 	/* If no vCPU start scheduling, ignore this tick */
87 	if (current != NULL ) {
88 		if (!(is_idle_thread(current) && list_empty(&iorr_ctl->runqueue))) {
89 			data = (struct sched_iorr_data *)current->data;
90 			/* consume the left_cycles of current thread_object if it is not idle */
91 			if (!is_idle_thread(current)) {
92 				data->left_cycles -= now - data->last_cycles;
93 				data->last_cycles = now;
94 			}
95 			/* make reschedule request if current ran out of its cycles */
96 			if (is_idle_thread(current) || data->left_cycles <= 0) {
97 				make_reschedule_request(pcpu_id);
98 			}
99 		}
100 	}
101 	release_schedule_lock(pcpu_id, rflags);
102 }
103 
sched_iorr_add_timer(struct sched_control * ctl)104 int sched_iorr_add_timer(struct sched_control *ctl)
105 {
106 	struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id);
107 	uint64_t tick_period = TICKS_PER_MS;
108 	int ret = 0;
109 
110 	/* The tick_timer is periodically */
111 	initialize_timer(&iorr_ctl->tick_timer, sched_tick_handler, ctl,
112 			cpu_ticks() + tick_period, tick_period);
113 
114 	if (add_timer(&iorr_ctl->tick_timer) < 0) {
115 		pr_err("Failed to add schedule tick timer!");
116 		ret = -1;
117 	}
118 	return ret;
119 }
120 
sched_iorr_del_timer(struct sched_control * ctl)121 static int sched_iorr_del_timer(struct sched_control *ctl)
122 {
123 	struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv;
124 	del_timer(&iorr_ctl->tick_timer);
125 }
126 
127 /*
128  * @pre ctl->pcpu_id == get_pcpu_id()
129  */
sched_iorr_init(struct sched_control * ctl)130 int sched_iorr_init(struct sched_control *ctl)
131 {
132 	struct sched_iorr_control *iorr_ctl = &per_cpu(sched_iorr_ctl, ctl->pcpu_id);
133 
134 	ASSERT(get_pcpu_id() == ctl->pcpu_id, "Init scheduler on wrong CPU!");
135 
136 	ctl->priv = iorr_ctl;
137 	INIT_LIST_HEAD(&iorr_ctl->runqueue);
138 	return sched_iorr_add_timer(ctl);
139 }
140 
sched_iorr_deinit(struct sched_control * ctl)141 void sched_iorr_deinit(struct sched_control *ctl)
142 {
143 	sched_iorr_del_timer(ctl);
144 }
145 
sched_iorr_suspend(struct sched_control * ctl)146 static void sched_iorr_suspend(struct sched_control *ctl)
147 {
148 	sched_iorr_del_timer(ctl);
149 }
150 
sched_iorr_resume(struct sched_control * ctl)151 static void sched_iorr_resume(struct sched_control *ctl)
152 {
153 	sched_iorr_add_timer(ctl);
154 }
155 
sched_iorr_init_data(struct thread_object * obj,__unused struct sched_params * params)156 void sched_iorr_init_data(struct thread_object *obj, __unused struct sched_params * params)
157 {
158 	struct sched_iorr_data *data;
159 
160 	data = (struct sched_iorr_data *)obj->data;
161 	INIT_LIST_HEAD(&data->list);
162 	data->left_cycles = data->slice_cycles = CONFIG_SLICE_MS * TICKS_PER_MS;
163 }
164 
sched_iorr_pick_next(struct sched_control * ctl)165 static struct thread_object *sched_iorr_pick_next(struct sched_control *ctl)
166 {
167 	struct sched_iorr_control *iorr_ctl = (struct sched_iorr_control *)ctl->priv;
168 	struct thread_object *next = NULL;
169 	struct thread_object *current = NULL;
170 	struct sched_iorr_data *data;
171 	uint64_t now = cpu_ticks();
172 
173 	current = ctl->curr_obj;
174 	data = (struct sched_iorr_data *)current->data;
175 	/* Ignore the idle object, inactive objects */
176 	if (!is_idle_thread(current) && is_inqueue(current)) {
177 		data->left_cycles -= now - data->last_cycles;
178 		if (data->left_cycles <= 0) {
179 			/*  replenish thread_object with slice_cycles */
180 			data->left_cycles += data->slice_cycles;
181 		}
182 		/* move the thread_object to tail */
183 		runqueue_remove(current);
184 		runqueue_add_tail(current);
185 	}
186 
187 	/*
188 	 * Pick the next runnable sched object
189 	 * 1) get the first item in runqueue firstly
190 	 * 2) if object picked has no time_cycles, replenish it pick this one
191 	 * 3) At least take one idle sched object if we have no runnable one after step 1) and 2)
192 	 */
193 	if (!list_empty(&iorr_ctl->runqueue)) {
194 		next = get_first_item(&iorr_ctl->runqueue, struct thread_object, data);
195 		data = (struct sched_iorr_data *)next->data;
196 		data->last_cycles = now;
197 		while (data->left_cycles <= 0) {
198 			data->left_cycles += data->slice_cycles;
199 		}
200 	} else {
201 		next = &get_cpu_var(idle);
202 	}
203 
204 	return next;
205 }
206 
sched_iorr_sleep(struct thread_object * obj)207 static void sched_iorr_sleep(struct thread_object *obj)
208 {
209 	runqueue_remove(obj);
210 }
211 
sched_iorr_wake(struct thread_object * obj)212 static void sched_iorr_wake(struct thread_object *obj)
213 {
214 	runqueue_add_head(obj);
215 }
216 
217 struct acrn_scheduler sched_iorr = {
218 	.name		= "sched_iorr",
219 	.init		= sched_iorr_init,
220 	.init_data	= sched_iorr_init_data,
221 	.pick_next	= sched_iorr_pick_next,
222 	.sleep		= sched_iorr_sleep,
223 	.wake		= sched_iorr_wake,
224 	.deinit		= sched_iorr_deinit,
225 	.suspend	= sched_iorr_suspend,
226 	.resume		= sched_iorr_resume,
227 };
228