1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <hash.h>
8 #include <asm/per_cpu.h>
9 #include <asm/guest/vm.h>
10 #include <softirq.h>
11 #include <ptdev.h>
12 #include <irq.h>
13 #include <logmsg.h>
14 #include <asm/vtd.h>
15 #include <ticks.h>
16 
17 #define PTIRQ_ENTRY_HASHBITS	9U
18 #define PTIRQ_ENTRY_HASHSIZE	(1U << PTIRQ_ENTRY_HASHBITS)
19 
20 #define PTIRQ_BITMAP_ARRAY_SIZE	INT_DIV_ROUNDUP(CONFIG_MAX_PT_IRQ_ENTRIES, 64U)
21 struct ptirq_remapping_info ptirq_entries[CONFIG_MAX_PT_IRQ_ENTRIES];
22 static uint64_t ptirq_entry_bitmaps[PTIRQ_BITMAP_ARRAY_SIZE];
23 spinlock_t ptdev_lock = { .head = 0U, .tail = 0U, };
24 
25 /* lookup mapping info from phyical sid, hashing from sid + acrn_vm structure address (NULL) */
26 static struct hlist_head phys_sid_htable[PTIRQ_ENTRY_HASHSIZE];
27 /* lookup mapping info from virtual sid within a vm, hashing from sid + acrn_vm structure address */
28 static struct hlist_head virt_sid_htable[PTIRQ_ENTRY_HASHSIZE];
29 
ptirq_alloc_entry_id(void)30 static inline uint16_t ptirq_alloc_entry_id(void)
31 {
32 	uint16_t id = (uint16_t)ffz64_ex(ptirq_entry_bitmaps, CONFIG_MAX_PT_IRQ_ENTRIES);
33 
34 	while (id < CONFIG_MAX_PT_IRQ_ENTRIES) {
35 		if (!bitmap_test_and_set_lock((id & 0x3FU), &ptirq_entry_bitmaps[id >> 6U])) {
36 			break;
37 		}
38 		id = (uint16_t)ffz64_ex(ptirq_entry_bitmaps, CONFIG_MAX_PT_IRQ_ENTRIES);
39 	}
40 
41 	return (id < CONFIG_MAX_PT_IRQ_ENTRIES) ? id: INVALID_PTDEV_ENTRY_ID;
42 }
43 
44 /*
45  * get the hash key when looking up ptirq_remapping_info from virtual
46  * source id in a VM, or just physical source id (vm == NULL).
47  * Hashing from source id value and acrn_vm structure address can decrease
48  * the probability of hash collisions as different VMs may have equal
49  * virtual source ids.
50  */
ptirq_hash_key(const struct acrn_vm * vm,const union source_id * sid)51 static inline uint64_t ptirq_hash_key(const struct acrn_vm *vm,
52 		const union source_id *sid)
53 {
54 	return hash64(sid->value + (uint64_t)vm, PTIRQ_ENTRY_HASHBITS);
55 }
56 
57 /*
58  * to find ptirq_remapping_info from phyical source id (vm == NULL) or
59  * virtual source id in a vm.
60  */
find_ptirq_entry(uint32_t intr_type,const union source_id * sid,const struct acrn_vm * vm)61 struct ptirq_remapping_info *find_ptirq_entry(uint32_t intr_type,
62 		const union source_id *sid, const struct acrn_vm *vm)
63 {
64 	struct hlist_node *p;
65 	struct hlist_head *b;
66 	struct ptirq_remapping_info *n, *entry = NULL;
67 	uint64_t key = ptirq_hash_key(vm, sid);
68 
69 	if (vm == NULL) {
70 		b = &(phys_sid_htable[key]);
71 
72 		hlist_for_each(p, b) {
73 			n = hlist_entry(p, struct ptirq_remapping_info, phys_link);
74 			if (is_entry_active(n)) {
75 				if ((intr_type == n->intr_type) && (sid->value == n->phys_sid.value)) {
76 					entry = n;
77 					break;
78 				}
79 			}
80 		}
81 	} else {
82 		b = &(virt_sid_htable[key]);
83 		hlist_for_each(p, b) {
84 			n = hlist_entry(p, struct ptirq_remapping_info, virt_link);
85 			if (is_entry_active(n)) {
86 				if ((intr_type == n->intr_type) && (sid->value == n->virt_sid.value) && (vm == n->vm)) {
87 					entry = n;
88 					break;
89 				}
90 			}
91 		}
92 	}
93 
94 	return entry;
95 }
96 
ptirq_enqueue_softirq(struct ptirq_remapping_info * entry)97 static void ptirq_enqueue_softirq(struct ptirq_remapping_info *entry)
98 {
99 	uint64_t rflags;
100 
101 	/* enqueue request in order, SOFTIRQ_PTDEV will pickup */
102 	CPU_INT_ALL_DISABLE(&rflags);
103 
104 	/* avoid adding recursively */
105 	list_del(&entry->softirq_node);
106 	/* TODO: assert if entry already in list */
107 	list_add_tail(&entry->softirq_node, &get_cpu_var(softirq_dev_entry_list));
108 	CPU_INT_ALL_RESTORE(rflags);
109 	fire_softirq(SOFTIRQ_PTDEV);
110 }
111 
ptirq_intr_delay_callback(void * data)112 static void ptirq_intr_delay_callback(void *data)
113 {
114 	struct ptirq_remapping_info *entry = (struct ptirq_remapping_info *) data;
115 
116 	ptirq_enqueue_softirq(entry);
117 }
118 
ptirq_dequeue_softirq(uint16_t pcpu_id)119 struct ptirq_remapping_info *ptirq_dequeue_softirq(uint16_t pcpu_id)
120 {
121 	uint64_t rflags;
122 	struct ptirq_remapping_info *entry = NULL;
123 
124 	CPU_INT_ALL_DISABLE(&rflags);
125 
126 	while (!list_empty(&get_cpu_var(softirq_dev_entry_list))) {
127 		entry = get_first_item(&per_cpu(softirq_dev_entry_list, pcpu_id), struct ptirq_remapping_info, softirq_node);
128 
129 		list_del_init(&entry->softirq_node);
130 
131 		/* if Service VM, just dequeue, if User VM, check delay timer */
132 		if (is_service_vm(entry->vm) || timer_expired(&entry->intr_delay_timer, cpu_ticks(), NULL)) {
133 			break;
134 		} else {
135 			/* add it into timer list; dequeue next one */
136 			(void)add_timer(&entry->intr_delay_timer);
137 			entry = NULL;
138 		}
139 	}
140 
141 	CPU_INT_ALL_RESTORE(rflags);
142 	return entry;
143 }
144 
ptirq_alloc_entry(struct acrn_vm * vm,uint32_t intr_type)145 struct ptirq_remapping_info *ptirq_alloc_entry(struct acrn_vm *vm, uint32_t intr_type)
146 {
147 	struct ptirq_remapping_info *entry = NULL;
148 	uint16_t ptirq_id = ptirq_alloc_entry_id();
149 
150 	if (ptirq_id < CONFIG_MAX_PT_IRQ_ENTRIES) {
151 		entry = &ptirq_entries[ptirq_id];
152 		(void)memset((void *)entry, 0U, sizeof(struct ptirq_remapping_info));
153 		entry->ptdev_entry_id = ptirq_id;
154 		entry->intr_type = intr_type;
155 		entry->vm = vm;
156 		entry->intr_count = 0UL;
157 		entry->irte_idx = INVALID_IRTE_ID;
158 
159 		INIT_LIST_HEAD(&entry->softirq_node);
160 
161 		initialize_timer(&entry->intr_delay_timer, ptirq_intr_delay_callback, entry, 0UL, 0UL);
162 
163 		entry->active = false;
164 	} else {
165 		pr_err("Alloc ptdev irq entry failed");
166 	}
167 
168 	return entry;
169 }
170 
ptirq_release_entry(struct ptirq_remapping_info * entry)171 void ptirq_release_entry(struct ptirq_remapping_info *entry)
172 {
173 	uint64_t rflags;
174 
175 	CPU_INT_ALL_DISABLE(&rflags);
176 	list_del_init(&entry->softirq_node);
177 	del_timer(&entry->intr_delay_timer);
178 	CPU_INT_ALL_RESTORE(rflags);
179 
180 	bitmap_clear_lock((entry->ptdev_entry_id) & 0x3FU, &ptirq_entry_bitmaps[entry->ptdev_entry_id >> 6U]);
181 
182 	(void)memset((void *)entry, 0U, sizeof(struct ptirq_remapping_info));
183 }
184 
185 /* interrupt context */
ptirq_interrupt_handler(__unused uint32_t irq,void * data)186 static void ptirq_interrupt_handler(__unused uint32_t irq, void *data)
187 {
188 	struct ptirq_remapping_info *entry = (struct ptirq_remapping_info *) data;
189 	bool to_enqueue = true;
190 
191 	/*
192 	 * "interrupt storm" detection & delay intr injection just for User VM
193 	 * pass-thru devices, collect its data and delay injection if needed
194 	 */
195 	if (!is_service_vm(entry->vm)) {
196 		entry->intr_count++;
197 
198 		/* if delta > 0, set the delay TSC, dequeue to handle */
199 		if (entry->vm->intr_inject_delay_delta > 0UL) {
200 
201 			/* if the timer started (entry is in timer-list), not need enqueue again */
202 			if (timer_is_started(&entry->intr_delay_timer)) {
203 				to_enqueue = false;
204 			} else {
205 				update_timer(&entry->intr_delay_timer,
206 					     cpu_ticks() + entry->vm->intr_inject_delay_delta, 0UL);
207 			}
208 		} else {
209 			update_timer(&entry->intr_delay_timer, 0UL, 0UL);
210 		}
211 	}
212 
213 	if (to_enqueue) {
214 		ptirq_enqueue_softirq(entry);
215 	}
216 }
217 
218 /* active intr with irq registering */
ptirq_activate_entry(struct ptirq_remapping_info * entry,uint32_t phys_irq)219 int32_t ptirq_activate_entry(struct ptirq_remapping_info *entry, uint32_t phys_irq)
220 {
221 	int32_t ret = 0;
222 	uint32_t irq = IRQ_INVALID;
223 	uint64_t key;
224 
225 	if ((entry->intr_type == PTDEV_INTR_INTX) || !is_pi_capable(entry->vm)) {
226 		/* register and allocate host vector/irq */
227 		ret = request_irq(phys_irq, ptirq_interrupt_handler, (void *)entry, IRQF_PT);
228 		if (ret >=0) {
229 			irq = (uint32_t)ret;
230 		} else {
231 			pr_err("request irq failed, please check!, phys-irq=%d", phys_irq);
232 		}
233 	}
234 
235 	if (ret >=0) {
236 		entry->allocated_pirq = irq;
237 		entry->active = true;
238 
239 		key = ptirq_hash_key(NULL, &(entry->phys_sid));
240 		hlist_add_head(&entry->phys_link, &(phys_sid_htable[key]));
241 		key = ptirq_hash_key(entry->vm, &(entry->virt_sid));
242 		hlist_add_head(&entry->virt_link, &(virt_sid_htable[key]));
243 	}
244 
245 	return ret;
246 }
247 
ptirq_deactivate_entry(struct ptirq_remapping_info * entry)248 void ptirq_deactivate_entry(struct ptirq_remapping_info *entry)
249 {
250 	hlist_del(&entry->phys_link);
251 	hlist_del(&entry->virt_link);
252 	entry->active = false;
253 	if (entry->allocated_pirq != IRQ_INVALID) {
254 		free_irq(entry->allocated_pirq);
255 	}
256 }
257 
ptdev_init(void)258 void ptdev_init(void)
259 {
260 	if (get_pcpu_id() == BSP_CPU_ID) {
261 		register_softirq(SOFTIRQ_PTDEV, ptirq_softirq);
262 	}
263 	INIT_LIST_HEAD(&get_cpu_var(softirq_dev_entry_list));
264 }
265 
ptdev_release_all_entries(const struct acrn_vm * vm)266 void ptdev_release_all_entries(const struct acrn_vm *vm)
267 {
268 	struct ptirq_remapping_info *entry;
269 	uint16_t idx;
270 
271 	/* VM already down */
272 	for (idx = 0U; idx < CONFIG_MAX_PT_IRQ_ENTRIES; idx++) {
273 		entry = &ptirq_entries[idx];
274 		if ((entry->vm == vm) && is_entry_active(entry)) {
275 			spinlock_obtain(&ptdev_lock);
276 			if (entry->release_cb != NULL) {
277 				entry->release_cb(entry);
278 			}
279 			ptirq_deactivate_entry(entry);
280 			ptirq_release_entry(entry);
281 			spinlock_release(&ptdev_lock);
282 		}
283 	}
284 
285 }
286 
ptirq_get_intr_data(const struct acrn_vm * target_vm,uint64_t * buffer,uint32_t buffer_cnt)287 uint32_t ptirq_get_intr_data(const struct acrn_vm *target_vm, uint64_t *buffer, uint32_t buffer_cnt)
288 {
289 	uint32_t index = 0U;
290 	uint16_t i;
291 	struct ptirq_remapping_info *entry;
292 
293 	for (i = 0U; i < CONFIG_MAX_PT_IRQ_ENTRIES; i++) {
294 		entry = &ptirq_entries[i];
295 		if (!is_entry_active(entry) || (entry->allocated_pirq == IRQ_INVALID)) {
296 			continue;
297 		}
298 		if (entry->vm == target_vm) {
299 			buffer[index] = entry->allocated_pirq;
300 			buffer[index + 1U] = entry->intr_count;
301 
302 			index += 2U;
303 			if (index > (buffer_cnt - 2U)) {
304 				break;
305 			}
306 		}
307 	}
308 
309 	return index;
310 }
311