1 /*
2  * Copyright 2024 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/timer_mgmt.h"
10 
11 #include "hf/arch/timer.h"
12 
13 #include "hf/api.h"
14 #include "hf/check.h"
15 #include "hf/cpu.h"
16 #include "hf/std.h"
17 #include "hf/vcpu.h"
18 
timer_list_add_vcpu(struct cpu * cpu,struct vcpu * vcpu)19 static void timer_list_add_vcpu(struct cpu *cpu, struct vcpu *vcpu)
20 {
21 	struct timer_pending_vcpu_list *timer_list;
22 
23 	assert(vcpu != NULL && cpu != NULL);
24 
25 	timer_list = &cpu->pending_timer_vcpus_list;
26 	sl_lock(&cpu->lock);
27 
28 	/* Add the vCPU's timer entry if not already part of any list. */
29 	if (list_empty(&vcpu->timer_node)) {
30 		/* `root_entry` is also the tail of the timer list. */
31 		list_prepend(&timer_list->root_entry, &vcpu->timer_node);
32 	}
33 
34 	sl_unlock(&cpu->lock);
35 }
36 
timer_list_remove_vcpu(struct cpu * cpu,struct vcpu * vcpu)37 static void timer_list_remove_vcpu(struct cpu *cpu, struct vcpu *vcpu)
38 {
39 	assert(vcpu != NULL && cpu != NULL);
40 
41 	sl_lock(&cpu->lock);
42 	list_remove(&vcpu->timer_node);
43 	sl_unlock(&cpu->lock);
44 }
45 
46 /**
47  * Depending on the state of the vCPU's arch timer, either track or untrack it
48  * through the timer list on current CPU.
49  */
timer_vcpu_manage(struct vcpu * vcpu)50 void timer_vcpu_manage(struct vcpu *vcpu)
51 {
52 	assert(vcpu != NULL);
53 
54 	if (arch_timer_enabled(&vcpu->regs)) {
55 		/*
56 		 * Add it to the list maintained by partition manager for this
57 		 * CPU.
58 		 */
59 		timer_list_add_vcpu(vcpu->cpu, vcpu);
60 	} else {
61 		timer_list_remove_vcpu(vcpu->cpu, vcpu);
62 	}
63 }
64 
65 /**
66  * A vCPU's timer entry is the last entry in the list if it's `next` field
67  * points to `root_entry` of the list.
68  */
timer_is_list_end(struct vcpu * vcpu,struct timer_pending_vcpu_list * timer_list)69 static inline bool timer_is_list_end(struct vcpu *vcpu,
70 				     struct timer_pending_vcpu_list *timer_list)
71 {
72 	return (vcpu->timer_node.next == &timer_list->root_entry);
73 }
74 
75 /**
76  * Find the vCPU with the nearest timer deadline, being tracked by partition
77  * manager, on current CPU.
78  */
timer_find_vcpu_nearest_deadline(struct cpu * cpu)79 struct vcpu *timer_find_vcpu_nearest_deadline(struct cpu *cpu)
80 {
81 	struct vcpu *vcpu_with_deadline = NULL;
82 	struct vcpu *it_vcpu = NULL;
83 	struct timer_pending_vcpu_list *timer_list;
84 	uint64_t near_deadline = UINT64_MAX;
85 	struct list_entry *next_timer_entry;
86 
87 	assert(cpu != NULL);
88 
89 	timer_list = &cpu->pending_timer_vcpus_list;
90 	sl_lock(&cpu->lock);
91 
92 	if (list_empty(&timer_list->root_entry)) {
93 		goto out;
94 	}
95 
96 	next_timer_entry = timer_list->root_entry.next;
97 
98 	/* Iterate to find the vCPU with nearest deadline. */
99 	do {
100 		uint64_t expiry_ns;
101 
102 		/* vCPU iterator. */
103 		it_vcpu =
104 			CONTAINER_OF(next_timer_entry, struct vcpu, timer_node);
105 		assert(arch_timer_enabled(&it_vcpu->regs));
106 
107 		expiry_ns = arch_timer_remaining_ns(&it_vcpu->regs);
108 
109 		if (expiry_ns < near_deadline) {
110 			near_deadline = expiry_ns;
111 			vcpu_with_deadline = it_vcpu;
112 		}
113 
114 		/* Look at the next entry in the list. */
115 		next_timer_entry = it_vcpu->timer_node.next;
116 	} while (!timer_is_list_end(it_vcpu, timer_list));
117 
118 out:
119 	sl_unlock(&cpu->lock);
120 	return vcpu_with_deadline;
121 }
122 
123 /**
124  * Find the vCPU whose timer deadline has expired and needs to be resumed at
125  * the earliest.
126  */
timer_find_target_vcpu(struct vcpu * current)127 struct vcpu *timer_find_target_vcpu(struct vcpu *current)
128 {
129 	struct vcpu *target_vcpu;
130 
131 	/*
132 	 * There are three possible scenarios here when execution was brought
133 	 * back from NWd to SPMC as soon as host timer expired:
134 	 * 1. The vCPU that was being tracked by SPMC on this CPUx has expired
135 	 * timer. This will be the target vCPU.
136 	 *
137 	 * However, it is likely that this vCPU could have been migrated by
138 	 * NWd driver to another CPU(lets say CPUy). The S-EL2 host timer on
139 	 * CPUy will take care of signaling the virtual timer interrupt
140 	 * eventually.
141 	 *
142 	 * 2. If there is another vCPU with expired timer in the list maintained
143 	 *    by SPMC on present CPUx, SPMC will pick that vCPU to be
144 	 *    target_vcpu.
145 	 * 3. If none of the vCPUs have expired timer, simply resume the normal
146 	 *    world i.e., target_vcpu will be NULL.
147 	 */
148 	if (current->vm->id == HF_OTHER_WORLD_ID) {
149 		target_vcpu = timer_find_vcpu_nearest_deadline(current->cpu);
150 		if (target_vcpu != NULL) {
151 			if (arch_timer_remaining_ns(&target_vcpu->regs) == 0) {
152 				/*
153 				 * SPMC will either signal or queue the virtual
154 				 * timer interrupt to the target vCPU. No
155 				 * need to track this vcpu anymore.
156 				 */
157 				timer_list_remove_vcpu(current->cpu,
158 						       target_vcpu);
159 			} else {
160 				target_vcpu = NULL;
161 			}
162 		}
163 	} else {
164 		target_vcpu = current;
165 	}
166 
167 	return target_vcpu;
168 }
169 
timer_migrate_to_other_cpu(struct cpu * to_cpu,struct vcpu_locked migrate_vcpu_locked)170 void timer_migrate_to_other_cpu(struct cpu *to_cpu,
171 				struct vcpu_locked migrate_vcpu_locked)
172 {
173 	struct cpu *from_cpu;
174 	struct vcpu *migrate_vcpu;
175 
176 	assert(to_cpu != NULL);
177 
178 	migrate_vcpu = migrate_vcpu_locked.vcpu;
179 	from_cpu = migrate_vcpu->cpu;
180 
181 	if (from_cpu != NULL && (to_cpu != from_cpu)) {
182 		if (!list_empty(&migrate_vcpu->timer_node)) {
183 			assert(arch_timer_enabled(&migrate_vcpu->regs));
184 
185 			/*
186 			 * Remove vcpu from timer list maintained by SPMC for
187 			 * old CPU.
188 			 */
189 			timer_list_remove_vcpu(from_cpu, migrate_vcpu);
190 
191 			/*
192 			 * Add vcpu to timer list maintained by SPMC for new
193 			 * CPU.
194 			 */
195 			timer_list_add_vcpu(to_cpu, migrate_vcpu);
196 		}
197 	}
198 }
199