1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 #include <linux/init.h>
4 #include <linux/log2.h>
5 #include <kunit/test.h>
6
7 #include <asm/guest-state-buffer.h>
8 #include <asm/kvm_ppc.h>
9
test_creating_buffer(struct kunit * test)10 static void test_creating_buffer(struct kunit *test)
11 {
12 struct kvmppc_gs_buff *gsb;
13 size_t size = 0x100;
14
15 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
16 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
17
18 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
19
20 KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
21 KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
22
23 kvmppc_gsb_free(gsb);
24 }
25
test_adding_element(struct kunit * test)26 static void test_adding_element(struct kunit *test)
27 {
28 const struct kvmppc_gs_elem *head, *curr;
29 union {
30 __vector128 v;
31 u64 dw[2];
32 } u;
33 int rem;
34 struct kvmppc_gs_buff *gsb;
35 size_t size = 0x1000;
36 int i, rc;
37 u64 data;
38
39 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
40 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
41
42 /* Single elements, direct use of __kvmppc_gse_put() */
43 data = 0xdeadbeef;
44 rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
45 KUNIT_EXPECT_GE(test, rc, 0);
46
47 head = kvmppc_gsb_data(gsb);
48 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
49 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
50 data = 0;
51 memcpy(&data, kvmppc_gse_data(head), 8);
52 KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
53
54 /* Multiple elements, simple wrapper */
55 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
56 KUNIT_EXPECT_GE(test, rc, 0);
57
58 u.dw[0] = 0x1;
59 u.dw[1] = 0x2;
60 rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
61 KUNIT_EXPECT_GE(test, rc, 0);
62 u.dw[0] = 0x0;
63 u.dw[1] = 0x0;
64
65 kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
66 switch (i) {
67 case 0:
68 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
69 KVMPPC_GSID_GPR(0));
70 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
71 KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
72 0xdeadbeef);
73 break;
74 case 1:
75 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
76 KVMPPC_GSID_GPR(1));
77 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
78 KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
79 0xcafef00d);
80 break;
81 case 2:
82 KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
83 KVMPPC_GSID_VSRS(0));
84 KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
85 kvmppc_gse_get_vector128(curr, &u.v);
86 KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
87 KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
88 break;
89 }
90 }
91 KUNIT_EXPECT_EQ(test, i, 3);
92
93 kvmppc_gsb_reset(gsb);
94 KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
95 KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
96 sizeof(struct kvmppc_gs_header));
97
98 kvmppc_gsb_free(gsb);
99 }
100
test_gs_parsing(struct kunit * test)101 static void test_gs_parsing(struct kunit *test)
102 {
103 struct kvmppc_gs_elem *gse;
104 struct kvmppc_gs_parser gsp = { 0 };
105 struct kvmppc_gs_buff *gsb;
106 size_t size = 0x1000;
107 u64 tmp1, tmp2;
108
109 gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
110 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
111
112 tmp1 = 0xdeadbeefull;
113 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
114
115 KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
116
117 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
118 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
119
120 tmp2 = kvmppc_gse_get_u64(gse);
121 KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
122
123 kvmppc_gsb_free(gsb);
124 }
125
test_gs_bitmap(struct kunit * test)126 static void test_gs_bitmap(struct kunit *test)
127 {
128 struct kvmppc_gs_bitmap gsbm = { 0 };
129 struct kvmppc_gs_bitmap gsbm1 = { 0 };
130 struct kvmppc_gs_bitmap gsbm2 = { 0 };
131 u16 iden;
132 int i, j;
133
134 i = 0;
135 for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
136 iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
137 kvmppc_gsbm_set(&gsbm, iden);
138 kvmppc_gsbm_set(&gsbm1, iden);
139 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
140 kvmppc_gsbm_clear(&gsbm, iden);
141 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
142 i++;
143 }
144
145 for (u16 iden = KVMPPC_GSID_L0_GUEST_HEAP;
146 iden <= KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM; iden++) {
147 kvmppc_gsbm_set(&gsbm, iden);
148 kvmppc_gsbm_set(&gsbm1, iden);
149 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
150 kvmppc_gsbm_clear(&gsbm, iden);
151 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
152 i++;
153 }
154
155 for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
156 iden++) {
157 kvmppc_gsbm_set(&gsbm, iden);
158 kvmppc_gsbm_set(&gsbm1, iden);
159 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
160 kvmppc_gsbm_clear(&gsbm, iden);
161 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
162 i++;
163 }
164
165 for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSE_DW_REGS_END; iden++) {
166 kvmppc_gsbm_set(&gsbm, iden);
167 kvmppc_gsbm_set(&gsbm1, iden);
168 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
169 kvmppc_gsbm_clear(&gsbm, iden);
170 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
171 i++;
172 }
173
174 for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
175 kvmppc_gsbm_set(&gsbm, iden);
176 kvmppc_gsbm_set(&gsbm1, iden);
177 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
178 kvmppc_gsbm_clear(&gsbm, iden);
179 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
180 i++;
181 }
182
183 for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
184 iden++) {
185 kvmppc_gsbm_set(&gsbm, iden);
186 kvmppc_gsbm_set(&gsbm1, iden);
187 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
188 kvmppc_gsbm_clear(&gsbm, iden);
189 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
190 i++;
191 }
192
193 for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
194 kvmppc_gsbm_set(&gsbm, iden);
195 kvmppc_gsbm_set(&gsbm1, iden);
196 KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
197 kvmppc_gsbm_clear(&gsbm, iden);
198 KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
199 i++;
200 }
201
202 j = 0;
203 kvmppc_gsbm_for_each(&gsbm1, iden)
204 {
205 kvmppc_gsbm_set(&gsbm2, iden);
206 j++;
207 }
208 KUNIT_EXPECT_EQ(test, i, j);
209 KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
210 }
211
212 struct kvmppc_gs_msg_test1_data {
213 u64 a;
214 u32 b;
215 struct kvmppc_gs_part_table c;
216 struct kvmppc_gs_proc_table d;
217 struct kvmppc_gs_buff_info e;
218 };
219
test1_get_size(struct kvmppc_gs_msg * gsm)220 static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
221 {
222 size_t size = 0;
223 u16 ids[] = {
224 KVMPPC_GSID_PARTITION_TABLE,
225 KVMPPC_GSID_PROCESS_TABLE,
226 KVMPPC_GSID_RUN_INPUT,
227 KVMPPC_GSID_GPR(0),
228 KVMPPC_GSID_CR,
229 };
230
231 for (int i = 0; i < ARRAY_SIZE(ids); i++)
232 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
233 return size;
234 }
235
test1_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)236 static int test1_fill_info(struct kvmppc_gs_buff *gsb,
237 struct kvmppc_gs_msg *gsm)
238 {
239 struct kvmppc_gs_msg_test1_data *data = gsm->data;
240
241 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
242 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
243
244 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
245 kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
246
247 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
248 kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
249 data->c);
250
251 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
252 kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
253 data->d);
254
255 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
256 kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
257
258 return 0;
259 }
260
test1_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)261 static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
262 struct kvmppc_gs_buff *gsb)
263 {
264 struct kvmppc_gs_parser gsp = { 0 };
265 struct kvmppc_gs_msg_test1_data *data = gsm->data;
266 struct kvmppc_gs_elem *gse;
267 int rc;
268
269 rc = kvmppc_gse_parse(&gsp, gsb);
270 if (rc < 0)
271 return rc;
272
273 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
274 if (gse)
275 data->a = kvmppc_gse_get_u64(gse);
276
277 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
278 if (gse)
279 data->b = kvmppc_gse_get_u32(gse);
280
281 return 0;
282 }
283
284 static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
285 .get_size = test1_get_size,
286 .fill_info = test1_fill_info,
287 .refresh_info = test1_refresh_info,
288 };
289
test_gs_msg(struct kunit * test)290 static void test_gs_msg(struct kunit *test)
291 {
292 struct kvmppc_gs_msg_test1_data test1_data = {
293 .a = 0xdeadbeef,
294 .b = 0x1,
295 };
296 struct kvmppc_gs_msg *gsm;
297 struct kvmppc_gs_buff *gsb;
298
299 gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
300 GFP_KERNEL);
301 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
302
303 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
304 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
305
306 kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
307 kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
308 kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
309 kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
310 kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
311
312 kvmppc_gsm_fill_info(gsm, gsb);
313
314 memset(&test1_data, 0, sizeof(test1_data));
315
316 kvmppc_gsm_refresh_info(gsm, gsb);
317 KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
318 KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
319
320 kvmppc_gsm_free(gsm);
321 }
322
323 /* Test data struct for hostwide/L0 counters */
324 struct kvmppc_gs_msg_test_hostwide_data {
325 u64 guest_heap;
326 u64 guest_heap_max;
327 u64 guest_pgtable_size;
328 u64 guest_pgtable_size_max;
329 u64 guest_pgtable_reclaim;
330 };
331
test_hostwide_get_size(struct kvmppc_gs_msg * gsm)332 static size_t test_hostwide_get_size(struct kvmppc_gs_msg *gsm)
333
334 {
335 size_t size = 0;
336 u16 ids[] = {
337 KVMPPC_GSID_L0_GUEST_HEAP,
338 KVMPPC_GSID_L0_GUEST_HEAP_MAX,
339 KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
340 KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
341 KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM
342 };
343
344 for (int i = 0; i < ARRAY_SIZE(ids); i++)
345 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
346 return size;
347 }
348
test_hostwide_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)349 static int test_hostwide_fill_info(struct kvmppc_gs_buff *gsb,
350 struct kvmppc_gs_msg *gsm)
351 {
352 struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data;
353
354 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP))
355 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP,
356 data->guest_heap);
357 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX))
358 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_HEAP_MAX,
359 data->guest_heap_max);
360 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE))
361 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE,
362 data->guest_pgtable_size);
363 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX))
364 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX,
365 data->guest_pgtable_size_max);
366 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM))
367 kvmppc_gse_put_u64(gsb, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM,
368 data->guest_pgtable_reclaim);
369
370 return 0;
371 }
372
test_hostwide_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)373 static int test_hostwide_refresh_info(struct kvmppc_gs_msg *gsm,
374 struct kvmppc_gs_buff *gsb)
375 {
376 struct kvmppc_gs_parser gsp = { 0 };
377 struct kvmppc_gs_msg_test_hostwide_data *data = gsm->data;
378 struct kvmppc_gs_elem *gse;
379 int rc;
380
381 rc = kvmppc_gse_parse(&gsp, gsb);
382 if (rc < 0)
383 return rc;
384
385 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
386 if (gse)
387 data->guest_heap = kvmppc_gse_get_u64(gse);
388
389 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
390 if (gse)
391 data->guest_heap_max = kvmppc_gse_get_u64(gse);
392
393 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
394 if (gse)
395 data->guest_pgtable_size = kvmppc_gse_get_u64(gse);
396
397 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
398 if (gse)
399 data->guest_pgtable_size_max = kvmppc_gse_get_u64(gse);
400
401 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
402 if (gse)
403 data->guest_pgtable_reclaim = kvmppc_gse_get_u64(gse);
404
405 return 0;
406 }
407
408 static struct kvmppc_gs_msg_ops gs_msg_test_hostwide_ops = {
409 .get_size = test_hostwide_get_size,
410 .fill_info = test_hostwide_fill_info,
411 .refresh_info = test_hostwide_refresh_info,
412 };
413
test_gs_hostwide_msg(struct kunit * test)414 static void test_gs_hostwide_msg(struct kunit *test)
415 {
416 struct kvmppc_gs_msg_test_hostwide_data test_data = {
417 .guest_heap = 0xdeadbeef,
418 .guest_heap_max = ~0ULL,
419 .guest_pgtable_size = 0xff,
420 .guest_pgtable_size_max = 0xffffff,
421 .guest_pgtable_reclaim = 0xdeadbeef,
422 };
423 struct kvmppc_gs_msg *gsm;
424 struct kvmppc_gs_buff *gsb;
425
426 gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND,
427 GFP_KERNEL);
428 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
429
430 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
431 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
432
433 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP);
434 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
435 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
436 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
437 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
438
439 kvmppc_gsm_fill_info(gsm, gsb);
440
441 memset(&test_data, 0, sizeof(test_data));
442
443 kvmppc_gsm_refresh_info(gsm, gsb);
444 KUNIT_EXPECT_EQ(test, test_data.guest_heap, 0xdeadbeef);
445 KUNIT_EXPECT_EQ(test, test_data.guest_heap_max, ~0ULL);
446 KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size, 0xff);
447 KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_size_max, 0xffffff);
448 KUNIT_EXPECT_EQ(test, test_data.guest_pgtable_reclaim, 0xdeadbeef);
449
450 kvmppc_gsm_free(gsm);
451 }
452
453 /* Test if the H_GUEST_GET_STATE for hostwide counters works */
test_gs_hostwide_counters(struct kunit * test)454 static void test_gs_hostwide_counters(struct kunit *test)
455 {
456 struct kvmppc_gs_msg_test_hostwide_data test_data;
457 struct kvmppc_gs_parser gsp = { 0 };
458
459 struct kvmppc_gs_msg *gsm;
460 struct kvmppc_gs_buff *gsb;
461 struct kvmppc_gs_elem *gse;
462 int rc;
463
464 if (!kvmhv_on_pseries())
465 kunit_skip(test, "This test need a kmv-hv guest");
466
467 gsm = kvmppc_gsm_new(&gs_msg_test_hostwide_ops, &test_data, GSM_SEND,
468 GFP_KERNEL);
469 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
470
471 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
472 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
473
474 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP);
475
476 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
477
478 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
479
480 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
481
482 kvmppc_gsm_include(gsm, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
483
484 kvmppc_gsm_fill_info(gsm, gsb);
485
486 /* With HOST_WIDE flags guestid and vcpuid will be ignored */
487 rc = kvmppc_gsb_recv(gsb, KVMPPC_GS_FLAGS_HOST_WIDE);
488 KUNIT_ASSERT_EQ(test, rc, 0);
489
490 /* Parse the guest state buffer is successful */
491 rc = kvmppc_gse_parse(&gsp, gsb);
492 KUNIT_ASSERT_EQ(test, rc, 0);
493
494 /* Parse the GSB and get the counters */
495 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP);
496 KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter missing");
497 kunit_info(test, "Guest Heap Size=%llu bytes",
498 kvmppc_gse_get_u64(gse));
499
500 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_HEAP_MAX);
501 KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 Heap counter max missing");
502 kunit_info(test, "Guest Heap Size Max=%llu bytes",
503 kvmppc_gse_get_u64(gse));
504
505 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE);
506 KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size missing");
507 kunit_info(test, "Guest Page-table Size=%llu bytes",
508 kvmppc_gse_get_u64(gse));
509
510 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX);
511 KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table size-max missing");
512 kunit_info(test, "Guest Page-table Size Max=%llu bytes",
513 kvmppc_gse_get_u64(gse));
514
515 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM);
516 KUNIT_ASSERT_NOT_NULL_MSG(test, gse, "L0 page-table reclaim size missing");
517 kunit_info(test, "Guest Page-table Reclaim Size=%llu bytes",
518 kvmppc_gse_get_u64(gse));
519
520 kvmppc_gsm_free(gsm);
521 kvmppc_gsb_free(gsb);
522 }
523
524 static struct kunit_case guest_state_buffer_testcases[] = {
525 KUNIT_CASE(test_creating_buffer),
526 KUNIT_CASE(test_adding_element),
527 KUNIT_CASE(test_gs_bitmap),
528 KUNIT_CASE(test_gs_parsing),
529 KUNIT_CASE(test_gs_msg),
530 KUNIT_CASE(test_gs_hostwide_msg),
531 KUNIT_CASE(test_gs_hostwide_counters),
532 {}
533 };
534
535 static struct kunit_suite guest_state_buffer_test_suite = {
536 .name = "guest_state_buffer_test",
537 .test_cases = guest_state_buffer_testcases,
538 };
539
540 kunit_test_suites(&guest_state_buffer_test_suite);
541
542 MODULE_DESCRIPTION("KUnit tests for Guest State Buffer APIs");
543 MODULE_LICENSE("GPL");
544