1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Test for s390x KVM_S390_MEM_OP
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7
8 #include <stdio.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <sys/ioctl.h>
12 #include <pthread.h>
13
14 #include <linux/bits.h>
15
16 #include "test_util.h"
17 #include "kvm_util.h"
18 #include "kselftest.h"
19
20 enum mop_target {
21 LOGICAL,
22 SIDA,
23 ABSOLUTE,
24 INVALID,
25 };
26
27 enum mop_access_mode {
28 READ,
29 WRITE,
30 CMPXCHG,
31 };
32
33 struct mop_desc {
34 uintptr_t gaddr;
35 uintptr_t gaddr_v;
36 uint64_t set_flags;
37 unsigned int f_check : 1;
38 unsigned int f_inject : 1;
39 unsigned int f_key : 1;
40 unsigned int _gaddr_v : 1;
41 unsigned int _set_flags : 1;
42 unsigned int _sida_offset : 1;
43 unsigned int _ar : 1;
44 uint32_t size;
45 enum mop_target target;
46 enum mop_access_mode mode;
47 void *buf;
48 uint32_t sida_offset;
49 void *old;
50 uint8_t old_value[16];
51 bool *cmpxchg_success;
52 uint8_t ar;
53 uint8_t key;
54 };
55
56 const uint8_t NO_KEY = 0xff;
57
ksmo_from_desc(struct mop_desc * desc)58 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
59 {
60 struct kvm_s390_mem_op ksmo = {
61 .gaddr = (uintptr_t)desc->gaddr,
62 .size = desc->size,
63 .buf = ((uintptr_t)desc->buf),
64 .reserved = "ignored_ignored_ignored_ignored"
65 };
66
67 switch (desc->target) {
68 case LOGICAL:
69 if (desc->mode == READ)
70 ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
71 if (desc->mode == WRITE)
72 ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
73 break;
74 case SIDA:
75 if (desc->mode == READ)
76 ksmo.op = KVM_S390_MEMOP_SIDA_READ;
77 if (desc->mode == WRITE)
78 ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
79 break;
80 case ABSOLUTE:
81 if (desc->mode == READ)
82 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
83 if (desc->mode == WRITE)
84 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
85 if (desc->mode == CMPXCHG) {
86 ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
87 ksmo.old_addr = (uint64_t)desc->old;
88 memcpy(desc->old_value, desc->old, desc->size);
89 }
90 break;
91 case INVALID:
92 ksmo.op = -1;
93 }
94 if (desc->f_check)
95 ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
96 if (desc->f_inject)
97 ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
98 if (desc->_set_flags)
99 ksmo.flags = desc->set_flags;
100 if (desc->f_key && desc->key != NO_KEY) {
101 ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
102 ksmo.key = desc->key;
103 }
104 if (desc->_ar)
105 ksmo.ar = desc->ar;
106 else
107 ksmo.ar = 0;
108 if (desc->_sida_offset)
109 ksmo.sida_offset = desc->sida_offset;
110
111 return ksmo;
112 }
113
114 struct test_info {
115 struct kvm_vm *vm;
116 struct kvm_vcpu *vcpu;
117 };
118
119 #define PRINT_MEMOP false
print_memop(struct kvm_vcpu * vcpu,const struct kvm_s390_mem_op * ksmo)120 static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
121 {
122 if (!PRINT_MEMOP)
123 return;
124
125 if (!vcpu)
126 printf("vm memop(");
127 else
128 printf("vcpu memop(");
129 switch (ksmo->op) {
130 case KVM_S390_MEMOP_LOGICAL_READ:
131 printf("LOGICAL, READ, ");
132 break;
133 case KVM_S390_MEMOP_LOGICAL_WRITE:
134 printf("LOGICAL, WRITE, ");
135 break;
136 case KVM_S390_MEMOP_SIDA_READ:
137 printf("SIDA, READ, ");
138 break;
139 case KVM_S390_MEMOP_SIDA_WRITE:
140 printf("SIDA, WRITE, ");
141 break;
142 case KVM_S390_MEMOP_ABSOLUTE_READ:
143 printf("ABSOLUTE, READ, ");
144 break;
145 case KVM_S390_MEMOP_ABSOLUTE_WRITE:
146 printf("ABSOLUTE, WRITE, ");
147 break;
148 case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
149 printf("ABSOLUTE, CMPXCHG, ");
150 break;
151 }
152 printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
153 ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
154 ksmo->old_addr);
155 if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
156 printf(", CHECK_ONLY");
157 if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
158 printf(", INJECT_EXCEPTION");
159 if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
160 printf(", SKEY_PROTECTION");
161 puts(")");
162 }
163
err_memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)164 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
165 struct mop_desc *desc)
166 {
167 struct kvm_vcpu *vcpu = info.vcpu;
168
169 if (!vcpu)
170 return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
171 else
172 return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
173 }
174
memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)175 static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
176 struct mop_desc *desc)
177 {
178 int r;
179
180 r = err_memop_ioctl(info, ksmo, desc);
181 if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
182 if (desc->cmpxchg_success) {
183 int diff = memcmp(desc->old_value, desc->old, desc->size);
184 *desc->cmpxchg_success = !diff;
185 }
186 }
187 TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
188 }
189
190 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...) \
191 ({ \
192 struct test_info __info = (info_p); \
193 struct mop_desc __desc = { \
194 .target = (mop_target_p), \
195 .mode = (access_mode_p), \
196 .buf = (buf_p), \
197 .size = (size_p), \
198 __VA_ARGS__ \
199 }; \
200 struct kvm_s390_mem_op __ksmo; \
201 \
202 if (__desc._gaddr_v) { \
203 if (__desc.target == ABSOLUTE) \
204 __desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v); \
205 else \
206 __desc.gaddr = __desc.gaddr_v; \
207 } \
208 __ksmo = ksmo_from_desc(&__desc); \
209 print_memop(__info.vcpu, &__ksmo); \
210 err##memop_ioctl(__info, &__ksmo, &__desc); \
211 })
212
213 #define MOP(...) MEMOP(, __VA_ARGS__)
214 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
215
216 #define GADDR(a) .gaddr = ((uintptr_t)a)
217 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
218 #define CHECK_ONLY .f_check = 1
219 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
220 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
221 #define AR(a) ._ar = 1, .ar = (a)
222 #define KEY(a) .f_key = 1, .key = (a)
223 #define INJECT .f_inject = 1
224 #define CMPXCHG_OLD(o) .old = (o)
225 #define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
226
227 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
228
229 #define PAGE_SHIFT 12
230 #define PAGE_SIZE (1ULL << PAGE_SHIFT)
231 #define PAGE_MASK (~(PAGE_SIZE - 1))
232 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38))
233 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
234
235 static uint8_t __aligned(PAGE_SIZE) mem1[65536];
236 static uint8_t __aligned(PAGE_SIZE) mem2[65536];
237
238 struct test_default {
239 struct kvm_vm *kvm_vm;
240 struct test_info vm;
241 struct test_info vcpu;
242 struct kvm_run *run;
243 int size;
244 };
245
test_default_init(void * guest_code)246 static struct test_default test_default_init(void *guest_code)
247 {
248 struct kvm_vcpu *vcpu;
249 struct test_default t;
250
251 t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
252 t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
253 t.vm = (struct test_info) { t.kvm_vm, NULL };
254 t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
255 t.run = vcpu->run;
256 return t;
257 }
258
259 enum stage {
260 /* Synced state set by host, e.g. DAT */
261 STAGE_INITED,
262 /* Guest did nothing */
263 STAGE_IDLED,
264 /* Guest set storage keys (specifics up to test case) */
265 STAGE_SKEYS_SET,
266 /* Guest copied memory (locations up to test case) */
267 STAGE_COPIED,
268 /* End of guest code reached */
269 STAGE_DONE,
270 };
271
272 #define HOST_SYNC(info_p, stage) \
273 ({ \
274 struct test_info __info = (info_p); \
275 struct kvm_vcpu *__vcpu = __info.vcpu; \
276 struct ucall uc; \
277 int __stage = (stage); \
278 \
279 vcpu_run(__vcpu); \
280 get_ucall(__vcpu, &uc); \
281 if (uc.cmd == UCALL_ABORT) { \
282 REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
283 } \
284 ASSERT_EQ(uc.cmd, UCALL_SYNC); \
285 ASSERT_EQ(uc.args[1], __stage); \
286 }) \
287
prepare_mem12(void)288 static void prepare_mem12(void)
289 {
290 int i;
291
292 for (i = 0; i < sizeof(mem1); i++)
293 mem1[i] = rand();
294 memset(mem2, 0xaa, sizeof(mem2));
295 }
296
297 #define ASSERT_MEM_EQ(p1, p2, size) \
298 TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
299
default_write_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)300 static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
301 enum mop_target mop_target, uint32_t size, uint8_t key)
302 {
303 prepare_mem12();
304 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
305 GADDR_V(mem1), KEY(key));
306 HOST_SYNC(copy_cpu, STAGE_COPIED);
307 CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
308 GADDR_V(mem2), KEY(key));
309 ASSERT_MEM_EQ(mem1, mem2, size);
310 }
311
default_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)312 static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
313 enum mop_target mop_target, uint32_t size, uint8_t key)
314 {
315 prepare_mem12();
316 CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
317 HOST_SYNC(copy_cpu, STAGE_COPIED);
318 CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
319 GADDR_V(mem2), KEY(key));
320 ASSERT_MEM_EQ(mem1, mem2, size);
321 }
322
default_cmpxchg(struct test_default * test,uint8_t key)323 static void default_cmpxchg(struct test_default *test, uint8_t key)
324 {
325 for (int size = 1; size <= 16; size *= 2) {
326 for (int offset = 0; offset < 16; offset += size) {
327 uint8_t __aligned(16) new[16] = {};
328 uint8_t __aligned(16) old[16];
329 bool succ;
330
331 prepare_mem12();
332 default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
333
334 memcpy(&old, mem1, 16);
335 MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
336 size, GADDR_V(mem1 + offset),
337 CMPXCHG_OLD(old + offset),
338 CMPXCHG_SUCCESS(&succ), KEY(key));
339 HOST_SYNC(test->vcpu, STAGE_COPIED);
340 MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
341 TEST_ASSERT(succ, "exchange of values should succeed");
342 memcpy(mem1 + offset, new + offset, size);
343 ASSERT_MEM_EQ(mem1, mem2, 16);
344
345 memcpy(&old, mem1, 16);
346 new[offset]++;
347 old[offset]++;
348 MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
349 size, GADDR_V(mem1 + offset),
350 CMPXCHG_OLD(old + offset),
351 CMPXCHG_SUCCESS(&succ), KEY(key));
352 HOST_SYNC(test->vcpu, STAGE_COPIED);
353 MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
354 TEST_ASSERT(!succ, "exchange of values should not succeed");
355 ASSERT_MEM_EQ(mem1, mem2, 16);
356 ASSERT_MEM_EQ(&old, mem1, 16);
357 }
358 }
359 }
360
guest_copy(void)361 static void guest_copy(void)
362 {
363 GUEST_SYNC(STAGE_INITED);
364 memcpy(&mem2, &mem1, sizeof(mem2));
365 GUEST_SYNC(STAGE_COPIED);
366 }
367
test_copy(void)368 static void test_copy(void)
369 {
370 struct test_default t = test_default_init(guest_copy);
371
372 HOST_SYNC(t.vcpu, STAGE_INITED);
373
374 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
375
376 kvm_vm_free(t.kvm_vm);
377 }
378
set_storage_key_range(void * addr,size_t len,uint8_t key)379 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
380 {
381 uintptr_t _addr, abs, i;
382 int not_mapped = 0;
383
384 _addr = (uintptr_t)addr;
385 for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
386 abs = i;
387 asm volatile (
388 "lra %[abs], 0(0,%[abs])\n"
389 " jz 0f\n"
390 " llill %[not_mapped],1\n"
391 " j 1f\n"
392 "0: sske %[key], %[abs]\n"
393 "1:"
394 : [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
395 : [key] "r" (key)
396 : "cc"
397 );
398 GUEST_ASSERT_EQ(not_mapped, 0);
399 }
400 }
401
guest_copy_key(void)402 static void guest_copy_key(void)
403 {
404 set_storage_key_range(mem1, sizeof(mem1), 0x90);
405 set_storage_key_range(mem2, sizeof(mem2), 0x90);
406 GUEST_SYNC(STAGE_SKEYS_SET);
407
408 for (;;) {
409 memcpy(&mem2, &mem1, sizeof(mem2));
410 GUEST_SYNC(STAGE_COPIED);
411 }
412 }
413
test_copy_key(void)414 static void test_copy_key(void)
415 {
416 struct test_default t = test_default_init(guest_copy_key);
417
418 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
419
420 /* vm, no key */
421 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
422
423 /* vm/vcpu, machting key or key 0 */
424 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
425 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
426 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
427 default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
428 /*
429 * There used to be different code paths for key handling depending on
430 * if the region crossed a page boundary.
431 * There currently are not, but the more tests the merrier.
432 */
433 default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
434 default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
435 default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
436 default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
437
438 /* vm/vcpu, mismatching keys on read, but no fetch protection */
439 default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
440 default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
441
442 kvm_vm_free(t.kvm_vm);
443 }
444
test_cmpxchg_key(void)445 static void test_cmpxchg_key(void)
446 {
447 struct test_default t = test_default_init(guest_copy_key);
448
449 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
450
451 default_cmpxchg(&t, NO_KEY);
452 default_cmpxchg(&t, 0);
453 default_cmpxchg(&t, 9);
454
455 kvm_vm_free(t.kvm_vm);
456 }
457
cut_to_size(int size,__uint128_t val)458 static __uint128_t cut_to_size(int size, __uint128_t val)
459 {
460 switch (size) {
461 case 1:
462 return (uint8_t)val;
463 case 2:
464 return (uint16_t)val;
465 case 4:
466 return (uint32_t)val;
467 case 8:
468 return (uint64_t)val;
469 case 16:
470 return val;
471 }
472 GUEST_ASSERT_1(false, "Invalid size");
473 return 0;
474 }
475
popcount_eq(__uint128_t a,__uint128_t b)476 static bool popcount_eq(__uint128_t a, __uint128_t b)
477 {
478 unsigned int count_a, count_b;
479
480 count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
481 __builtin_popcountl((uint64_t)a);
482 count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
483 __builtin_popcountl((uint64_t)b);
484 return count_a == count_b;
485 }
486
rotate(int size,__uint128_t val,int amount)487 static __uint128_t rotate(int size, __uint128_t val, int amount)
488 {
489 unsigned int bits = size * 8;
490
491 amount = (amount + bits) % bits;
492 val = cut_to_size(size, val);
493 return (val << (bits - amount)) | (val >> amount);
494 }
495
496 const unsigned int max_block = 16;
497
choose_block(bool guest,int i,int * size,int * offset)498 static void choose_block(bool guest, int i, int *size, int *offset)
499 {
500 unsigned int rand;
501
502 rand = i;
503 if (guest) {
504 rand = rand * 19 + 11;
505 *size = 1 << ((rand % 3) + 2);
506 rand = rand * 19 + 11;
507 *offset = (rand % max_block) & ~(*size - 1);
508 } else {
509 rand = rand * 17 + 5;
510 *size = 1 << (rand % 5);
511 rand = rand * 17 + 5;
512 *offset = (rand % max_block) & ~(*size - 1);
513 }
514 }
515
permutate_bits(bool guest,int i,int size,__uint128_t old)516 static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
517 {
518 unsigned int rand;
519 int amount;
520 bool swap;
521
522 rand = i;
523 rand = rand * 3 + 1;
524 if (guest)
525 rand = rand * 3 + 1;
526 swap = rand % 2 == 0;
527 if (swap) {
528 int i, j;
529 __uint128_t new;
530 uint8_t byte0, byte1;
531
532 rand = rand * 3 + 1;
533 i = rand % size;
534 rand = rand * 3 + 1;
535 j = rand % size;
536 if (i == j)
537 return old;
538 new = rotate(16, old, i * 8);
539 byte0 = new & 0xff;
540 new &= ~0xff;
541 new = rotate(16, new, -i * 8);
542 new = rotate(16, new, j * 8);
543 byte1 = new & 0xff;
544 new = (new & ~0xff) | byte0;
545 new = rotate(16, new, -j * 8);
546 new = rotate(16, new, i * 8);
547 new = new | byte1;
548 new = rotate(16, new, -i * 8);
549 return new;
550 }
551 rand = rand * 3 + 1;
552 amount = rand % (size * 8);
553 return rotate(size, old, amount);
554 }
555
_cmpxchg(int size,void * target,__uint128_t * old_addr,__uint128_t new)556 static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
557 {
558 bool ret;
559
560 switch (size) {
561 case 4: {
562 uint32_t old = *old_addr;
563
564 asm volatile ("cs %[old],%[new],%[address]"
565 : [old] "+d" (old),
566 [address] "+Q" (*(uint32_t *)(target))
567 : [new] "d" ((uint32_t)new)
568 : "cc"
569 );
570 ret = old == (uint32_t)*old_addr;
571 *old_addr = old;
572 return ret;
573 }
574 case 8: {
575 uint64_t old = *old_addr;
576
577 asm volatile ("csg %[old],%[new],%[address]"
578 : [old] "+d" (old),
579 [address] "+Q" (*(uint64_t *)(target))
580 : [new] "d" ((uint64_t)new)
581 : "cc"
582 );
583 ret = old == (uint64_t)*old_addr;
584 *old_addr = old;
585 return ret;
586 }
587 case 16: {
588 __uint128_t old = *old_addr;
589
590 asm volatile ("cdsg %[old],%[new],%[address]"
591 : [old] "+d" (old),
592 [address] "+Q" (*(__uint128_t *)(target))
593 : [new] "d" (new)
594 : "cc"
595 );
596 ret = old == *old_addr;
597 *old_addr = old;
598 return ret;
599 }
600 }
601 GUEST_ASSERT_1(false, "Invalid size");
602 return 0;
603 }
604
605 const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
606
guest_cmpxchg_key(void)607 static void guest_cmpxchg_key(void)
608 {
609 int size, offset;
610 __uint128_t old, new;
611
612 set_storage_key_range(mem1, max_block, 0x10);
613 set_storage_key_range(mem2, max_block, 0x10);
614 GUEST_SYNC(STAGE_SKEYS_SET);
615
616 for (int i = 0; i < cmpxchg_iter_outer; i++) {
617 do {
618 old = 1;
619 } while (!_cmpxchg(16, mem1, &old, 0));
620 for (int j = 0; j < cmpxchg_iter_inner; j++) {
621 choose_block(true, i + j, &size, &offset);
622 do {
623 new = permutate_bits(true, i + j, size, old);
624 } while (!_cmpxchg(size, mem2 + offset, &old, new));
625 }
626 }
627
628 GUEST_SYNC(STAGE_DONE);
629 }
630
run_guest(void * data)631 static void *run_guest(void *data)
632 {
633 struct test_info *info = data;
634
635 HOST_SYNC(*info, STAGE_DONE);
636 return NULL;
637 }
638
quad_to_char(__uint128_t * quad,int size)639 static char *quad_to_char(__uint128_t *quad, int size)
640 {
641 return ((char *)quad) + (sizeof(*quad) - size);
642 }
643
test_cmpxchg_key_concurrent(void)644 static void test_cmpxchg_key_concurrent(void)
645 {
646 struct test_default t = test_default_init(guest_cmpxchg_key);
647 int size, offset;
648 __uint128_t old, new;
649 bool success;
650 pthread_t thread;
651
652 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
653 prepare_mem12();
654 MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
655 pthread_create(&thread, NULL, run_guest, &t.vcpu);
656
657 for (int i = 0; i < cmpxchg_iter_outer; i++) {
658 do {
659 old = 0;
660 new = 1;
661 MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
662 sizeof(new), GADDR_V(mem1),
663 CMPXCHG_OLD(&old),
664 CMPXCHG_SUCCESS(&success), KEY(1));
665 } while (!success);
666 for (int j = 0; j < cmpxchg_iter_inner; j++) {
667 choose_block(false, i + j, &size, &offset);
668 do {
669 new = permutate_bits(false, i + j, size, old);
670 MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
671 size, GADDR_V(mem2 + offset),
672 CMPXCHG_OLD(quad_to_char(&old, size)),
673 CMPXCHG_SUCCESS(&success), KEY(1));
674 } while (!success);
675 }
676 }
677
678 pthread_join(thread, NULL);
679
680 MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
681 TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
682 "Must retain number of set bits");
683
684 kvm_vm_free(t.kvm_vm);
685 }
686
guest_copy_key_fetch_prot(void)687 static void guest_copy_key_fetch_prot(void)
688 {
689 /*
690 * For some reason combining the first sync with override enablement
691 * results in an exception when calling HOST_SYNC.
692 */
693 GUEST_SYNC(STAGE_INITED);
694 /* Storage protection override applies to both store and fetch. */
695 set_storage_key_range(mem1, sizeof(mem1), 0x98);
696 set_storage_key_range(mem2, sizeof(mem2), 0x98);
697 GUEST_SYNC(STAGE_SKEYS_SET);
698
699 for (;;) {
700 memcpy(&mem2, &mem1, sizeof(mem2));
701 GUEST_SYNC(STAGE_COPIED);
702 }
703 }
704
test_copy_key_storage_prot_override(void)705 static void test_copy_key_storage_prot_override(void)
706 {
707 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
708
709 HOST_SYNC(t.vcpu, STAGE_INITED);
710 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
711 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
712 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
713
714 /* vcpu, mismatching keys, storage protection override in effect */
715 default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
716
717 kvm_vm_free(t.kvm_vm);
718 }
719
test_copy_key_fetch_prot(void)720 static void test_copy_key_fetch_prot(void)
721 {
722 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
723
724 HOST_SYNC(t.vcpu, STAGE_INITED);
725 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
726
727 /* vm/vcpu, matching key, fetch protection in effect */
728 default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
729 default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
730
731 kvm_vm_free(t.kvm_vm);
732 }
733
734 #define ERR_PROT_MOP(...) \
735 ({ \
736 int rv; \
737 \
738 rv = ERR_MOP(__VA_ARGS__); \
739 TEST_ASSERT(rv == 4, "Should result in protection exception"); \
740 })
741
guest_error_key(void)742 static void guest_error_key(void)
743 {
744 GUEST_SYNC(STAGE_INITED);
745 set_storage_key_range(mem1, PAGE_SIZE, 0x18);
746 set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
747 GUEST_SYNC(STAGE_SKEYS_SET);
748 GUEST_SYNC(STAGE_IDLED);
749 }
750
test_errors_key(void)751 static void test_errors_key(void)
752 {
753 struct test_default t = test_default_init(guest_error_key);
754
755 HOST_SYNC(t.vcpu, STAGE_INITED);
756 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
757
758 /* vm/vcpu, mismatching keys, fetch protection in effect */
759 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
760 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
761 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
762 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
763
764 kvm_vm_free(t.kvm_vm);
765 }
766
test_errors_cmpxchg_key(void)767 static void test_errors_cmpxchg_key(void)
768 {
769 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
770 int i;
771
772 HOST_SYNC(t.vcpu, STAGE_INITED);
773 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
774
775 for (i = 1; i <= 16; i *= 2) {
776 __uint128_t old = 0;
777
778 ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
779 CMPXCHG_OLD(&old), KEY(2));
780 }
781
782 kvm_vm_free(t.kvm_vm);
783 }
784
test_termination(void)785 static void test_termination(void)
786 {
787 struct test_default t = test_default_init(guest_error_key);
788 uint64_t prefix;
789 uint64_t teid;
790 uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
791 uint64_t psw[2];
792
793 HOST_SYNC(t.vcpu, STAGE_INITED);
794 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
795
796 /* vcpu, mismatching keys after first page */
797 ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
798 /*
799 * The memop injected a program exception and the test needs to check the
800 * Translation-Exception Identification (TEID). It is necessary to run
801 * the guest in order to be able to read the TEID from guest memory.
802 * Set the guest program new PSW, so the guest state is not clobbered.
803 */
804 prefix = t.run->s.regs.prefix;
805 psw[0] = t.run->psw_mask;
806 psw[1] = t.run->psw_addr;
807 MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
808 HOST_SYNC(t.vcpu, STAGE_IDLED);
809 MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
810 /* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
811 ASSERT_EQ(teid & teid_mask, 0);
812
813 kvm_vm_free(t.kvm_vm);
814 }
815
test_errors_key_storage_prot_override(void)816 static void test_errors_key_storage_prot_override(void)
817 {
818 struct test_default t = test_default_init(guest_copy_key_fetch_prot);
819
820 HOST_SYNC(t.vcpu, STAGE_INITED);
821 t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
822 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
823 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
824
825 /* vm, mismatching keys, storage protection override not applicable to vm */
826 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
827 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
828
829 kvm_vm_free(t.kvm_vm);
830 }
831
832 const uint64_t last_page_addr = -PAGE_SIZE;
833
guest_copy_key_fetch_prot_override(void)834 static void guest_copy_key_fetch_prot_override(void)
835 {
836 int i;
837 char *page_0 = 0;
838
839 GUEST_SYNC(STAGE_INITED);
840 set_storage_key_range(0, PAGE_SIZE, 0x18);
841 set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
842 asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
843 GUEST_SYNC(STAGE_SKEYS_SET);
844
845 for (;;) {
846 for (i = 0; i < PAGE_SIZE; i++)
847 page_0[i] = mem1[i];
848 GUEST_SYNC(STAGE_COPIED);
849 }
850 }
851
test_copy_key_fetch_prot_override(void)852 static void test_copy_key_fetch_prot_override(void)
853 {
854 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
855 vm_vaddr_t guest_0_page, guest_last_page;
856
857 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
858 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
859 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
860 print_skip("did not allocate guest pages at required positions");
861 goto out;
862 }
863
864 HOST_SYNC(t.vcpu, STAGE_INITED);
865 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
866 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
867 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
868
869 /* vcpu, mismatching keys on fetch, fetch protection override applies */
870 prepare_mem12();
871 MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
872 HOST_SYNC(t.vcpu, STAGE_COPIED);
873 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
874 ASSERT_MEM_EQ(mem1, mem2, 2048);
875
876 /*
877 * vcpu, mismatching keys on fetch, fetch protection override applies,
878 * wraparound
879 */
880 prepare_mem12();
881 MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
882 HOST_SYNC(t.vcpu, STAGE_COPIED);
883 CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
884 GADDR_V(guest_last_page), KEY(2));
885 ASSERT_MEM_EQ(mem1, mem2, 2048);
886
887 out:
888 kvm_vm_free(t.kvm_vm);
889 }
890
test_errors_key_fetch_prot_override_not_enabled(void)891 static void test_errors_key_fetch_prot_override_not_enabled(void)
892 {
893 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
894 vm_vaddr_t guest_0_page, guest_last_page;
895
896 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
897 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
898 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
899 print_skip("did not allocate guest pages at required positions");
900 goto out;
901 }
902 HOST_SYNC(t.vcpu, STAGE_INITED);
903 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
904
905 /* vcpu, mismatching keys on fetch, fetch protection override not enabled */
906 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
907
908 out:
909 kvm_vm_free(t.kvm_vm);
910 }
911
test_errors_key_fetch_prot_override_enabled(void)912 static void test_errors_key_fetch_prot_override_enabled(void)
913 {
914 struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
915 vm_vaddr_t guest_0_page, guest_last_page;
916
917 guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
918 guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
919 if (guest_0_page != 0 || guest_last_page != last_page_addr) {
920 print_skip("did not allocate guest pages at required positions");
921 goto out;
922 }
923 HOST_SYNC(t.vcpu, STAGE_INITED);
924 t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
925 t.run->kvm_dirty_regs = KVM_SYNC_CRS;
926 HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
927
928 /*
929 * vcpu, mismatching keys on fetch,
930 * fetch protection override does not apply because memory range exceeded
931 */
932 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
933 CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
934 GADDR_V(guest_last_page), KEY(2));
935 /* vm, fetch protected override does not apply */
936 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
937 CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
938
939 out:
940 kvm_vm_free(t.kvm_vm);
941 }
942
guest_idle(void)943 static void guest_idle(void)
944 {
945 GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
946 for (;;)
947 GUEST_SYNC(STAGE_IDLED);
948 }
949
_test_errors_common(struct test_info info,enum mop_target target,int size)950 static void _test_errors_common(struct test_info info, enum mop_target target, int size)
951 {
952 int rv;
953
954 /* Bad size: */
955 rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
956 TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
957
958 /* Zero size: */
959 rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
960 TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
961 "ioctl allows 0 as size");
962
963 /* Bad flags: */
964 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
965 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
966
967 /* Bad guest address: */
968 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
969 TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
970 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
971 TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
972
973 /* Bad host address: */
974 rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
975 TEST_ASSERT(rv == -1 && errno == EFAULT,
976 "ioctl does not report bad host memory address");
977
978 /* Bad key: */
979 rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
980 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
981 }
982
test_errors(void)983 static void test_errors(void)
984 {
985 struct test_default t = test_default_init(guest_idle);
986 int rv;
987
988 HOST_SYNC(t.vcpu, STAGE_INITED);
989
990 _test_errors_common(t.vcpu, LOGICAL, t.size);
991 _test_errors_common(t.vm, ABSOLUTE, t.size);
992
993 /* Bad operation: */
994 rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
995 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
996 /* virtual addresses are not translated when passing INVALID */
997 rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
998 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
999
1000 /* Bad access register: */
1001 t.run->psw_mask &= ~(3UL << (63 - 17));
1002 t.run->psw_mask |= 1UL << (63 - 17); /* Enable AR mode */
1003 HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
1004 rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
1005 TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
1006 t.run->psw_mask &= ~(3UL << (63 - 17)); /* Disable AR mode */
1007 HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
1008
1009 /* Check that the SIDA calls are rejected for non-protected guests */
1010 rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1011 TEST_ASSERT(rv == -1 && errno == EINVAL,
1012 "ioctl does not reject SIDA_READ in non-protected mode");
1013 rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1014 TEST_ASSERT(rv == -1 && errno == EINVAL,
1015 "ioctl does not reject SIDA_WRITE in non-protected mode");
1016
1017 kvm_vm_free(t.kvm_vm);
1018 }
1019
test_errors_cmpxchg(void)1020 static void test_errors_cmpxchg(void)
1021 {
1022 struct test_default t = test_default_init(guest_idle);
1023 __uint128_t old;
1024 int rv, i, power = 1;
1025
1026 HOST_SYNC(t.vcpu, STAGE_INITED);
1027
1028 for (i = 0; i < 32; i++) {
1029 if (i == power) {
1030 power *= 2;
1031 continue;
1032 }
1033 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
1034 CMPXCHG_OLD(&old));
1035 TEST_ASSERT(rv == -1 && errno == EINVAL,
1036 "ioctl allows bad size for cmpxchg");
1037 }
1038 for (i = 1; i <= 16; i *= 2) {
1039 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
1040 CMPXCHG_OLD(&old));
1041 TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
1042 }
1043 for (i = 2; i <= 16; i *= 2) {
1044 rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
1045 CMPXCHG_OLD(&old));
1046 TEST_ASSERT(rv == -1 && errno == EINVAL,
1047 "ioctl allows bad alignment for cmpxchg");
1048 }
1049
1050 kvm_vm_free(t.kvm_vm);
1051 }
1052
main(int argc,char * argv[])1053 int main(int argc, char *argv[])
1054 {
1055 int extension_cap, idx;
1056
1057 TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
1058 extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
1059
1060 struct testdef {
1061 const char *name;
1062 void (*test)(void);
1063 bool requirements_met;
1064 } testlist[] = {
1065 {
1066 .name = "simple copy",
1067 .test = test_copy,
1068 .requirements_met = true,
1069 },
1070 {
1071 .name = "generic error checks",
1072 .test = test_errors,
1073 .requirements_met = true,
1074 },
1075 {
1076 .name = "copy with storage keys",
1077 .test = test_copy_key,
1078 .requirements_met = extension_cap > 0,
1079 },
1080 {
1081 .name = "cmpxchg with storage keys",
1082 .test = test_cmpxchg_key,
1083 .requirements_met = extension_cap & 0x2,
1084 },
1085 {
1086 .name = "concurrently cmpxchg with storage keys",
1087 .test = test_cmpxchg_key_concurrent,
1088 .requirements_met = extension_cap & 0x2,
1089 },
1090 {
1091 .name = "copy with key storage protection override",
1092 .test = test_copy_key_storage_prot_override,
1093 .requirements_met = extension_cap > 0,
1094 },
1095 {
1096 .name = "copy with key fetch protection",
1097 .test = test_copy_key_fetch_prot,
1098 .requirements_met = extension_cap > 0,
1099 },
1100 {
1101 .name = "copy with key fetch protection override",
1102 .test = test_copy_key_fetch_prot_override,
1103 .requirements_met = extension_cap > 0,
1104 },
1105 {
1106 .name = "error checks with key",
1107 .test = test_errors_key,
1108 .requirements_met = extension_cap > 0,
1109 },
1110 {
1111 .name = "error checks for cmpxchg with key",
1112 .test = test_errors_cmpxchg_key,
1113 .requirements_met = extension_cap & 0x2,
1114 },
1115 {
1116 .name = "error checks for cmpxchg",
1117 .test = test_errors_cmpxchg,
1118 .requirements_met = extension_cap & 0x2,
1119 },
1120 {
1121 .name = "termination",
1122 .test = test_termination,
1123 .requirements_met = extension_cap > 0,
1124 },
1125 {
1126 .name = "error checks with key storage protection override",
1127 .test = test_errors_key_storage_prot_override,
1128 .requirements_met = extension_cap > 0,
1129 },
1130 {
1131 .name = "error checks without key fetch prot override",
1132 .test = test_errors_key_fetch_prot_override_not_enabled,
1133 .requirements_met = extension_cap > 0,
1134 },
1135 {
1136 .name = "error checks with key fetch prot override",
1137 .test = test_errors_key_fetch_prot_override_enabled,
1138 .requirements_met = extension_cap > 0,
1139 },
1140 };
1141
1142 ksft_print_header();
1143 ksft_set_plan(ARRAY_SIZE(testlist));
1144
1145 for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
1146 if (testlist[idx].requirements_met) {
1147 testlist[idx].test();
1148 ksft_test_result_pass("%s\n", testlist[idx].name);
1149 } else {
1150 ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
1151 testlist[idx].name, extension_cap);
1152 }
1153 }
1154
1155 ksft_finished(); /* Print results and exit() accordingly */
1156 }
1157