1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * vgic_irq.c - Test userspace injection of IRQs
4 *
5 * This test validates the injection of IRQs from userspace using various
6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that
8 * it received it.
9 */
10 #include <asm/kvm.h>
11 #include <asm/kvm_para.h>
12 #include <sys/eventfd.h>
13 #include <linux/sizes.h>
14
15 #include "processor.h"
16 #include "test_util.h"
17 #include "kvm_util.h"
18 #include "gic.h"
19 #include "gic_v3.h"
20 #include "vgic.h"
21
22 /*
23 * Stores the user specified args; it's passed to the guest and to every test
24 * function.
25 */
26 struct test_args {
27 uint32_t nr_irqs; /* number of KVM supported IRQs. */
28 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
29 bool level_sensitive; /* 1 is level, 0 is edge */
30 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
31 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
32 };
33
34 /*
35 * KVM implements 32 priority levels:
36 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
37 *
38 * Note that these macros will still be correct in the case that KVM implements
39 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
40 */
41 #define KVM_NUM_PRIOS 32
42 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
43 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
44 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
45 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
46 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
47 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
48
49 /*
50 * The kvm_inject_* utilities are used by the guest to ask the host to inject
51 * interrupts (e.g., using the KVM_IRQ_LINE ioctl).
52 */
53
54 typedef enum {
55 KVM_INJECT_EDGE_IRQ_LINE = 1,
56 KVM_SET_IRQ_LINE,
57 KVM_SET_IRQ_LINE_HIGH,
58 KVM_SET_LEVEL_INFO_HIGH,
59 KVM_INJECT_IRQFD,
60 KVM_WRITE_ISPENDR,
61 KVM_WRITE_ISACTIVER,
62 } kvm_inject_cmd;
63
64 struct kvm_inject_args {
65 kvm_inject_cmd cmd;
66 uint32_t first_intid;
67 uint32_t num;
68 int level;
69 bool expect_failure;
70 };
71
72 /* Used on the guest side to perform the hypercall. */
73 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
74 uint32_t num, int level, bool expect_failure);
75
76 /* Used on the host side to get the hypercall info. */
77 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
78 struct kvm_inject_args *args);
79
80 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
81 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
82
83 #define KVM_INJECT_MULTI(cmd, intid, num) \
84 _KVM_INJECT_MULTI(cmd, intid, num, false)
85
86 #define _KVM_INJECT(cmd, intid, expect_failure) \
87 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
88
89 #define KVM_INJECT(cmd, intid) \
90 _KVM_INJECT_MULTI(cmd, intid, 1, false)
91
92 #define KVM_ACTIVATE(cmd, intid) \
93 kvm_inject_call(cmd, intid, 1, 1, false);
94
95 struct kvm_inject_desc {
96 kvm_inject_cmd cmd;
97 /* can inject PPIs, PPIs, and/or SPIs. */
98 bool sgi, ppi, spi;
99 };
100
101 static struct kvm_inject_desc inject_edge_fns[] = {
102 /* sgi ppi spi */
103 { KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
104 { KVM_INJECT_IRQFD, false, false, true },
105 { KVM_WRITE_ISPENDR, true, false, true },
106 { 0, },
107 };
108
109 static struct kvm_inject_desc inject_level_fns[] = {
110 /* sgi ppi spi */
111 { KVM_SET_IRQ_LINE_HIGH, false, true, true },
112 { KVM_SET_LEVEL_INFO_HIGH, false, true, true },
113 { KVM_INJECT_IRQFD, false, false, true },
114 { KVM_WRITE_ISPENDR, false, true, true },
115 { 0, },
116 };
117
118 static struct kvm_inject_desc set_active_fns[] = {
119 /* sgi ppi spi */
120 { KVM_WRITE_ISACTIVER, true, true, true },
121 { 0, },
122 };
123
124 #define for_each_inject_fn(t, f) \
125 for ((f) = (t); (f)->cmd; (f)++)
126
127 #define for_each_supported_inject_fn(args, t, f) \
128 for_each_inject_fn(t, f) \
129 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
130
131 #define for_each_supported_activate_fn(args, t, f) \
132 for_each_supported_inject_fn((args), (t), (f))
133
134 /* Shared between the guest main thread and the IRQ handlers. */
135 volatile uint64_t irq_handled;
136 volatile uint32_t irqnr_received[MAX_SPI + 1];
137
reset_stats(void)138 static void reset_stats(void)
139 {
140 int i;
141
142 irq_handled = 0;
143 for (i = 0; i <= MAX_SPI; i++)
144 irqnr_received[i] = 0;
145 }
146
gic_read_ap1r0(void)147 static uint64_t gic_read_ap1r0(void)
148 {
149 uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1);
150
151 dsb(sy);
152 return reg;
153 }
154
gic_write_ap1r0(uint64_t val)155 static void gic_write_ap1r0(uint64_t val)
156 {
157 write_sysreg_s(val, SYS_ICC_AP1R0_EL1);
158 isb();
159 }
160
161 static void guest_set_irq_line(uint32_t intid, uint32_t level);
162
guest_irq_generic_handler(bool eoi_split,bool level_sensitive)163 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
164 {
165 uint32_t intid = gic_get_and_ack_irq();
166
167 if (intid == IAR_SPURIOUS)
168 return;
169
170 GUEST_ASSERT(gic_irq_get_active(intid));
171
172 if (!level_sensitive)
173 GUEST_ASSERT(!gic_irq_get_pending(intid));
174
175 if (level_sensitive)
176 guest_set_irq_line(intid, 0);
177
178 GUEST_ASSERT(intid < MAX_SPI);
179 irqnr_received[intid] += 1;
180 irq_handled += 1;
181
182 gic_set_eoi(intid);
183 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
184 if (eoi_split)
185 gic_set_dir(intid);
186
187 GUEST_ASSERT(!gic_irq_get_active(intid));
188 GUEST_ASSERT(!gic_irq_get_pending(intid));
189 }
190
kvm_inject_call(kvm_inject_cmd cmd,uint32_t first_intid,uint32_t num,int level,bool expect_failure)191 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
192 uint32_t num, int level, bool expect_failure)
193 {
194 struct kvm_inject_args args = {
195 .cmd = cmd,
196 .first_intid = first_intid,
197 .num = num,
198 .level = level,
199 .expect_failure = expect_failure,
200 };
201 GUEST_SYNC(&args);
202 }
203
204 #define GUEST_ASSERT_IAR_EMPTY() \
205 do { \
206 uint32_t _intid; \
207 _intid = gic_get_and_ack_irq(); \
208 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
209 } while (0)
210
211 #define CAT_HELPER(a, b) a ## b
212 #define CAT(a, b) CAT_HELPER(a, b)
213 #define PREFIX guest_irq_handler_
214 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
215 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
216 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
217 { \
218 guest_irq_generic_handler(split, lev); \
219 }
220
221 GENERATE_GUEST_IRQ_HANDLER(0, 0);
222 GENERATE_GUEST_IRQ_HANDLER(0, 1);
223 GENERATE_GUEST_IRQ_HANDLER(1, 0);
224 GENERATE_GUEST_IRQ_HANDLER(1, 1);
225
226 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
227 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
228 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
229 };
230
reset_priorities(struct test_args * args)231 static void reset_priorities(struct test_args *args)
232 {
233 int i;
234
235 for (i = 0; i < args->nr_irqs; i++)
236 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
237 }
238
guest_set_irq_line(uint32_t intid,uint32_t level)239 static void guest_set_irq_line(uint32_t intid, uint32_t level)
240 {
241 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
242 }
243
test_inject_fail(struct test_args * args,uint32_t intid,kvm_inject_cmd cmd)244 static void test_inject_fail(struct test_args *args,
245 uint32_t intid, kvm_inject_cmd cmd)
246 {
247 reset_stats();
248
249 _KVM_INJECT(cmd, intid, true);
250 /* no IRQ to handle on entry */
251
252 GUEST_ASSERT_EQ(irq_handled, 0);
253 GUEST_ASSERT_IAR_EMPTY();
254 }
255
guest_inject(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)256 static void guest_inject(struct test_args *args,
257 uint32_t first_intid, uint32_t num,
258 kvm_inject_cmd cmd)
259 {
260 uint32_t i;
261
262 reset_stats();
263
264 /* Cycle over all priorities to make things more interesting. */
265 for (i = first_intid; i < num + first_intid; i++)
266 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
267
268 asm volatile("msr daifset, #2" : : : "memory");
269 KVM_INJECT_MULTI(cmd, first_intid, num);
270
271 while (irq_handled < num) {
272 wfi();
273 local_irq_enable();
274 isb(); /* handle IRQ */
275 local_irq_disable();
276 }
277 local_irq_enable();
278
279 GUEST_ASSERT_EQ(irq_handled, num);
280 for (i = first_intid; i < num + first_intid; i++)
281 GUEST_ASSERT_EQ(irqnr_received[i], 1);
282 GUEST_ASSERT_IAR_EMPTY();
283
284 reset_priorities(args);
285 }
286
287 /*
288 * Restore the active state of multiple concurrent IRQs (given by
289 * concurrent_irqs). This does what a live-migration would do on the
290 * destination side assuming there are some active IRQs that were not
291 * deactivated yet.
292 */
guest_restore_active(struct test_args * args,uint32_t first_intid,uint32_t num,kvm_inject_cmd cmd)293 static void guest_restore_active(struct test_args *args,
294 uint32_t first_intid, uint32_t num,
295 kvm_inject_cmd cmd)
296 {
297 uint32_t prio, intid, ap1r;
298 int i;
299
300 /*
301 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
302 * in descending order, so intid+1 can preempt intid.
303 */
304 for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
305 GUEST_ASSERT(prio >= 0);
306 intid = i + first_intid;
307 gic_set_priority(intid, prio);
308 }
309
310 /*
311 * In a real migration, KVM would restore all GIC state before running
312 * guest code.
313 */
314 for (i = 0; i < num; i++) {
315 intid = i + first_intid;
316 KVM_ACTIVATE(cmd, intid);
317 ap1r = gic_read_ap1r0();
318 ap1r |= 1U << i;
319 gic_write_ap1r0(ap1r);
320 }
321
322 /* This is where the "migration" would occur. */
323
324 /* finish handling the IRQs starting with the highest priority one. */
325 for (i = 0; i < num; i++) {
326 intid = num - i - 1 + first_intid;
327 gic_set_eoi(intid);
328 if (args->eoi_split)
329 gic_set_dir(intid);
330 }
331
332 for (i = 0; i < num; i++)
333 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
334 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
335 GUEST_ASSERT_IAR_EMPTY();
336 }
337
338 /*
339 * Polls the IAR until it's not a spurious interrupt.
340 *
341 * This function should only be used in test_inject_preemption (with IRQs
342 * masked).
343 */
wait_for_and_activate_irq(void)344 static uint32_t wait_for_and_activate_irq(void)
345 {
346 uint32_t intid;
347
348 do {
349 asm volatile("wfi" : : : "memory");
350 intid = gic_get_and_ack_irq();
351 } while (intid == IAR_SPURIOUS);
352
353 return intid;
354 }
355
356 /*
357 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
358 * handle them without handling the actual exceptions. This is done by masking
359 * interrupts for the whole test.
360 */
test_inject_preemption(struct test_args * args,uint32_t first_intid,int num,kvm_inject_cmd cmd)361 static void test_inject_preemption(struct test_args *args,
362 uint32_t first_intid, int num,
363 kvm_inject_cmd cmd)
364 {
365 uint32_t intid, prio, step = KVM_PRIO_STEPS;
366 int i;
367
368 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
369 * in descending order, so intid+1 can preempt intid.
370 */
371 for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
372 GUEST_ASSERT(prio >= 0);
373 intid = i + first_intid;
374 gic_set_priority(intid, prio);
375 }
376
377 local_irq_disable();
378
379 for (i = 0; i < num; i++) {
380 uint32_t tmp;
381 intid = i + first_intid;
382 KVM_INJECT(cmd, intid);
383 /* Each successive IRQ will preempt the previous one. */
384 tmp = wait_for_and_activate_irq();
385 GUEST_ASSERT_EQ(tmp, intid);
386 if (args->level_sensitive)
387 guest_set_irq_line(intid, 0);
388 }
389
390 /* finish handling the IRQs starting with the highest priority one. */
391 for (i = 0; i < num; i++) {
392 intid = num - i - 1 + first_intid;
393 gic_set_eoi(intid);
394 if (args->eoi_split)
395 gic_set_dir(intid);
396 }
397
398 local_irq_enable();
399
400 for (i = 0; i < num; i++)
401 GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
402 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
403 GUEST_ASSERT_IAR_EMPTY();
404
405 reset_priorities(args);
406 }
407
test_injection(struct test_args * args,struct kvm_inject_desc * f)408 static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
409 {
410 uint32_t nr_irqs = args->nr_irqs;
411
412 if (f->sgi) {
413 guest_inject(args, MIN_SGI, 1, f->cmd);
414 guest_inject(args, 0, 16, f->cmd);
415 }
416
417 if (f->ppi)
418 guest_inject(args, MIN_PPI, 1, f->cmd);
419
420 if (f->spi) {
421 guest_inject(args, MIN_SPI, 1, f->cmd);
422 guest_inject(args, nr_irqs - 1, 1, f->cmd);
423 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
424 }
425 }
426
test_injection_failure(struct test_args * args,struct kvm_inject_desc * f)427 static void test_injection_failure(struct test_args *args,
428 struct kvm_inject_desc *f)
429 {
430 uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
431 int i;
432
433 for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
434 test_inject_fail(args, bad_intid[i], f->cmd);
435 }
436
test_preemption(struct test_args * args,struct kvm_inject_desc * f)437 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
438 {
439 /*
440 * Test up to 4 levels of preemption. The reason is that KVM doesn't
441 * currently implement the ability to have more than the number-of-LRs
442 * number of concurrently active IRQs. The number of LRs implemented is
443 * IMPLEMENTATION DEFINED, however, it seems that most implement 4.
444 */
445 if (f->sgi)
446 test_inject_preemption(args, MIN_SGI, 4, f->cmd);
447
448 if (f->ppi)
449 test_inject_preemption(args, MIN_PPI, 4, f->cmd);
450
451 if (f->spi)
452 test_inject_preemption(args, MIN_SPI, 4, f->cmd);
453 }
454
test_restore_active(struct test_args * args,struct kvm_inject_desc * f)455 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
456 {
457 /* Test up to 4 active IRQs. Same reason as in test_preemption. */
458 if (f->sgi)
459 guest_restore_active(args, MIN_SGI, 4, f->cmd);
460
461 if (f->ppi)
462 guest_restore_active(args, MIN_PPI, 4, f->cmd);
463
464 if (f->spi)
465 guest_restore_active(args, MIN_SPI, 4, f->cmd);
466 }
467
guest_code(struct test_args * args)468 static void guest_code(struct test_args *args)
469 {
470 uint32_t i, nr_irqs = args->nr_irqs;
471 bool level_sensitive = args->level_sensitive;
472 struct kvm_inject_desc *f, *inject_fns;
473
474 gic_init(GIC_V3, 1);
475
476 for (i = 0; i < nr_irqs; i++)
477 gic_irq_enable(i);
478
479 for (i = MIN_SPI; i < nr_irqs; i++)
480 gic_irq_set_config(i, !level_sensitive);
481
482 gic_set_eoi_split(args->eoi_split);
483
484 reset_priorities(args);
485 gic_set_priority_mask(CPU_PRIO_MASK);
486
487 inject_fns = level_sensitive ? inject_level_fns
488 : inject_edge_fns;
489
490 local_irq_enable();
491
492 /* Start the tests. */
493 for_each_supported_inject_fn(args, inject_fns, f) {
494 test_injection(args, f);
495 test_preemption(args, f);
496 test_injection_failure(args, f);
497 }
498
499 /*
500 * Restore the active state of IRQs. This would happen when live
501 * migrating IRQs in the middle of being handled.
502 */
503 for_each_supported_activate_fn(args, set_active_fns, f)
504 test_restore_active(args, f);
505
506 GUEST_DONE();
507 }
508
kvm_irq_line_check(struct kvm_vm * vm,uint32_t intid,int level,struct test_args * test_args,bool expect_failure)509 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
510 struct test_args *test_args, bool expect_failure)
511 {
512 int ret;
513
514 if (!expect_failure) {
515 kvm_arm_irq_line(vm, intid, level);
516 } else {
517 /* The interface doesn't allow larger intid's. */
518 if (intid > KVM_ARM_IRQ_NUM_MASK)
519 return;
520
521 ret = _kvm_arm_irq_line(vm, intid, level);
522 TEST_ASSERT(ret != 0 && errno == EINVAL,
523 "Bad intid %i did not cause KVM_IRQ_LINE "
524 "error: rc: %i errno: %i", intid, ret, errno);
525 }
526 }
527
kvm_irq_set_level_info_check(int gic_fd,uint32_t intid,int level,bool expect_failure)528 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
529 bool expect_failure)
530 {
531 if (!expect_failure) {
532 kvm_irq_set_level_info(gic_fd, intid, level);
533 } else {
534 int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
535 /*
536 * The kernel silently fails for invalid SPIs and SGIs (which
537 * are not level-sensitive). It only checks for intid to not
538 * spill over 1U << 10 (the max reserved SPI). Also, callers
539 * are supposed to mask the intid with 0x3ff (1023).
540 */
541 if (intid > VGIC_MAX_RESERVED)
542 TEST_ASSERT(ret != 0 && errno == EINVAL,
543 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
544 "error: rc: %i errno: %i", intid, ret, errno);
545 else
546 TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
547 "for intid %i failed, rc: %i errno: %i",
548 intid, ret, errno);
549 }
550 }
551
kvm_set_gsi_routing_irqchip_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)552 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
553 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
554 bool expect_failure)
555 {
556 struct kvm_irq_routing *routing;
557 int ret;
558 uint64_t i;
559
560 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
561
562 routing = kvm_gsi_routing_create();
563 for (i = intid; i < (uint64_t)intid + num; i++)
564 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
565
566 if (!expect_failure) {
567 kvm_gsi_routing_write(vm, routing);
568 } else {
569 ret = _kvm_gsi_routing_write(vm, routing);
570 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */
571 if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS)
572 TEST_ASSERT(ret != 0 && errno == EINVAL,
573 "Bad intid %u did not cause KVM_SET_GSI_ROUTING "
574 "error: rc: %i errno: %i", intid, ret, errno);
575 else
576 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
577 "for intid %i failed, rc: %i errno: %i",
578 intid, ret, errno);
579 }
580 }
581
kvm_irq_write_ispendr_check(int gic_fd,uint32_t intid,struct kvm_vcpu * vcpu,bool expect_failure)582 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
583 struct kvm_vcpu *vcpu,
584 bool expect_failure)
585 {
586 /*
587 * Ignore this when expecting failure as invalid intids will lead to
588 * either trying to inject SGIs when we configured the test to be
589 * level_sensitive (or the reverse), or inject large intids which
590 * will lead to writing above the ISPENDR register space (and we
591 * don't want to do that either).
592 */
593 if (!expect_failure)
594 kvm_irq_write_ispendr(gic_fd, intid, vcpu);
595 }
596
kvm_routing_and_irqfd_check(struct kvm_vm * vm,uint32_t intid,uint32_t num,uint32_t kvm_max_routes,bool expect_failure)597 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
598 uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
599 bool expect_failure)
600 {
601 int fd[MAX_SPI];
602 uint64_t val;
603 int ret, f;
604 uint64_t i;
605
606 /*
607 * There is no way to try injecting an SGI or PPI as the interface
608 * starts counting from the first SPI (above the private ones), so just
609 * exit.
610 */
611 if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
612 return;
613
614 kvm_set_gsi_routing_irqchip_check(vm, intid, num,
615 kvm_max_routes, expect_failure);
616
617 /*
618 * If expect_failure, then just to inject anyway. These
619 * will silently fail. And in any case, the guest will check
620 * that no actual interrupt was injected for those cases.
621 */
622
623 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
624 fd[f] = kvm_new_eventfd();
625
626 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
627 assert(i <= (uint64_t)UINT_MAX);
628 kvm_assign_irqfd(vm, i - MIN_SPI, fd[f]);
629 }
630
631 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
632 val = 1;
633 ret = write(fd[f], &val, sizeof(uint64_t));
634 TEST_ASSERT(ret == sizeof(uint64_t),
635 __KVM_SYSCALL_ERROR("write()", ret));
636 }
637
638 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
639 close(fd[f]);
640 }
641
642 /* handles the valid case: intid=0xffffffff num=1 */
643 #define for_each_intid(first, num, tmp, i) \
644 for ((tmp) = (i) = (first); \
645 (tmp) < (uint64_t)(first) + (uint64_t)(num); \
646 (tmp)++, (i)++)
647
run_guest_cmd(struct kvm_vcpu * vcpu,int gic_fd,struct kvm_inject_args * inject_args,struct test_args * test_args)648 static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd,
649 struct kvm_inject_args *inject_args,
650 struct test_args *test_args)
651 {
652 kvm_inject_cmd cmd = inject_args->cmd;
653 uint32_t intid = inject_args->first_intid;
654 uint32_t num = inject_args->num;
655 int level = inject_args->level;
656 bool expect_failure = inject_args->expect_failure;
657 struct kvm_vm *vm = vcpu->vm;
658 uint64_t tmp;
659 uint32_t i;
660
661 /* handles the valid case: intid=0xffffffff num=1 */
662 assert(intid < UINT_MAX - num || num == 1);
663
664 switch (cmd) {
665 case KVM_INJECT_EDGE_IRQ_LINE:
666 for_each_intid(intid, num, tmp, i)
667 kvm_irq_line_check(vm, i, 1, test_args,
668 expect_failure);
669 for_each_intid(intid, num, tmp, i)
670 kvm_irq_line_check(vm, i, 0, test_args,
671 expect_failure);
672 break;
673 case KVM_SET_IRQ_LINE:
674 for_each_intid(intid, num, tmp, i)
675 kvm_irq_line_check(vm, i, level, test_args,
676 expect_failure);
677 break;
678 case KVM_SET_IRQ_LINE_HIGH:
679 for_each_intid(intid, num, tmp, i)
680 kvm_irq_line_check(vm, i, 1, test_args,
681 expect_failure);
682 break;
683 case KVM_SET_LEVEL_INFO_HIGH:
684 for_each_intid(intid, num, tmp, i)
685 kvm_irq_set_level_info_check(gic_fd, i, 1,
686 expect_failure);
687 break;
688 case KVM_INJECT_IRQFD:
689 kvm_routing_and_irqfd_check(vm, intid, num,
690 test_args->kvm_max_routes,
691 expect_failure);
692 break;
693 case KVM_WRITE_ISPENDR:
694 for (i = intid; i < intid + num; i++)
695 kvm_irq_write_ispendr_check(gic_fd, i, vcpu,
696 expect_failure);
697 break;
698 case KVM_WRITE_ISACTIVER:
699 for (i = intid; i < intid + num; i++)
700 kvm_irq_write_isactiver(gic_fd, i, vcpu);
701 break;
702 default:
703 break;
704 }
705 }
706
kvm_inject_get_call(struct kvm_vm * vm,struct ucall * uc,struct kvm_inject_args * args)707 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
708 struct kvm_inject_args *args)
709 {
710 struct kvm_inject_args *kvm_args_hva;
711 vm_vaddr_t kvm_args_gva;
712
713 kvm_args_gva = uc->args[1];
714 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
715 memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
716 }
717
print_args(struct test_args * args)718 static void print_args(struct test_args *args)
719 {
720 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
721 args->nr_irqs, args->level_sensitive,
722 args->eoi_split);
723 }
724
test_vgic(uint32_t nr_irqs,bool level_sensitive,bool eoi_split)725 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
726 {
727 struct ucall uc;
728 int gic_fd;
729 struct kvm_vcpu *vcpu;
730 struct kvm_vm *vm;
731 struct kvm_inject_args inject_args;
732 vm_vaddr_t args_gva;
733
734 struct test_args args = {
735 .nr_irqs = nr_irqs,
736 .level_sensitive = level_sensitive,
737 .eoi_split = eoi_split,
738 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
739 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
740 };
741
742 print_args(&args);
743
744 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
745
746 vm_init_descriptor_tables(vm);
747 vcpu_init_descriptor_tables(vcpu);
748
749 /* Setup the guest args page (so it gets the args). */
750 args_gva = vm_vaddr_alloc_page(vm);
751 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args));
752 vcpu_args_set(vcpu, 1, args_gva);
753
754 gic_fd = vgic_v3_setup(vm, 1, nr_irqs);
755 __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping");
756
757 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
758 guest_irq_handlers[args.eoi_split][args.level_sensitive]);
759
760 while (1) {
761 vcpu_run(vcpu);
762
763 switch (get_ucall(vcpu, &uc)) {
764 case UCALL_SYNC:
765 kvm_inject_get_call(vm, &uc, &inject_args);
766 run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
767 break;
768 case UCALL_ABORT:
769 REPORT_GUEST_ASSERT(uc);
770 break;
771 case UCALL_DONE:
772 goto done;
773 default:
774 TEST_FAIL("Unknown ucall %lu", uc.cmd);
775 }
776 }
777
778 done:
779 close(gic_fd);
780 kvm_vm_free(vm);
781 }
782
help(const char * name)783 static void help(const char *name)
784 {
785 printf(
786 "\n"
787 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
788 printf(" -n: specify number of IRQs to setup the vgic with. "
789 "It has to be a multiple of 32 and between 64 and 1024.\n");
790 printf(" -e: if 1 then EOI is split into a write to DIR on top "
791 "of writing EOI.\n");
792 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
793 puts("");
794 exit(1);
795 }
796
main(int argc,char ** argv)797 int main(int argc, char **argv)
798 {
799 uint32_t nr_irqs = 64;
800 bool default_args = true;
801 bool level_sensitive = false;
802 int opt;
803 bool eoi_split = false;
804
805 while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
806 switch (opt) {
807 case 'n':
808 nr_irqs = atoi_non_negative("Number of IRQs", optarg);
809 if (nr_irqs > 1024 || nr_irqs % 32)
810 help(argv[0]);
811 break;
812 case 'e':
813 eoi_split = (bool)atoi_paranoid(optarg);
814 default_args = false;
815 break;
816 case 'l':
817 level_sensitive = (bool)atoi_paranoid(optarg);
818 default_args = false;
819 break;
820 case 'h':
821 default:
822 help(argv[0]);
823 break;
824 }
825 }
826
827 /*
828 * If the user just specified nr_irqs and/or gic_version, then run all
829 * combinations.
830 */
831 if (default_args) {
832 test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
833 test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
834 test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
835 test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
836 } else {
837 test_vgic(nr_irqs, level_sensitive, eoi_split);
838 }
839
840 return 0;
841 }
842