1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
5 */
6
7 #include <linux/bitops.h>
8 #include <linux/kvm_host.h>
9
10 #define INSN_OPCODE_MASK 0x007c
11 #define INSN_OPCODE_SHIFT 2
12 #define INSN_OPCODE_SYSTEM 28
13
14 #define INSN_MASK_WFI 0xffffffff
15 #define INSN_MATCH_WFI 0x10500073
16
17 #define INSN_MATCH_CSRRW 0x1073
18 #define INSN_MASK_CSRRW 0x707f
19 #define INSN_MATCH_CSRRS 0x2073
20 #define INSN_MASK_CSRRS 0x707f
21 #define INSN_MATCH_CSRRC 0x3073
22 #define INSN_MASK_CSRRC 0x707f
23 #define INSN_MATCH_CSRRWI 0x5073
24 #define INSN_MASK_CSRRWI 0x707f
25 #define INSN_MATCH_CSRRSI 0x6073
26 #define INSN_MASK_CSRRSI 0x707f
27 #define INSN_MATCH_CSRRCI 0x7073
28 #define INSN_MASK_CSRRCI 0x707f
29
30 #define INSN_MATCH_LB 0x3
31 #define INSN_MASK_LB 0x707f
32 #define INSN_MATCH_LH 0x1003
33 #define INSN_MASK_LH 0x707f
34 #define INSN_MATCH_LW 0x2003
35 #define INSN_MASK_LW 0x707f
36 #define INSN_MATCH_LD 0x3003
37 #define INSN_MASK_LD 0x707f
38 #define INSN_MATCH_LBU 0x4003
39 #define INSN_MASK_LBU 0x707f
40 #define INSN_MATCH_LHU 0x5003
41 #define INSN_MASK_LHU 0x707f
42 #define INSN_MATCH_LWU 0x6003
43 #define INSN_MASK_LWU 0x707f
44 #define INSN_MATCH_SB 0x23
45 #define INSN_MASK_SB 0x707f
46 #define INSN_MATCH_SH 0x1023
47 #define INSN_MASK_SH 0x707f
48 #define INSN_MATCH_SW 0x2023
49 #define INSN_MASK_SW 0x707f
50 #define INSN_MATCH_SD 0x3023
51 #define INSN_MASK_SD 0x707f
52
53 #define INSN_MATCH_C_LD 0x6000
54 #define INSN_MASK_C_LD 0xe003
55 #define INSN_MATCH_C_SD 0xe000
56 #define INSN_MASK_C_SD 0xe003
57 #define INSN_MATCH_C_LW 0x4000
58 #define INSN_MASK_C_LW 0xe003
59 #define INSN_MATCH_C_SW 0xc000
60 #define INSN_MASK_C_SW 0xe003
61 #define INSN_MATCH_C_LDSP 0x6002
62 #define INSN_MASK_C_LDSP 0xe003
63 #define INSN_MATCH_C_SDSP 0xe002
64 #define INSN_MASK_C_SDSP 0xe003
65 #define INSN_MATCH_C_LWSP 0x4002
66 #define INSN_MASK_C_LWSP 0xe003
67 #define INSN_MATCH_C_SWSP 0xc002
68 #define INSN_MASK_C_SWSP 0xe003
69
70 #define INSN_16BIT_MASK 0x3
71
72 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
73
74 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
75
76 #ifdef CONFIG_64BIT
77 #define LOG_REGBYTES 3
78 #else
79 #define LOG_REGBYTES 2
80 #endif
81 #define REGBYTES (1 << LOG_REGBYTES)
82
83 #define SH_RD 7
84 #define SH_RS1 15
85 #define SH_RS2 20
86 #define SH_RS2C 2
87 #define MASK_RX 0x1f
88
89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
90 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
91 (RV_X(x, 10, 3) << 3) | \
92 (RV_X(x, 5, 1) << 6))
93 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
94 (RV_X(x, 5, 2) << 6))
95 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
96 (RV_X(x, 12, 1) << 5) | \
97 (RV_X(x, 2, 2) << 6))
98 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
99 (RV_X(x, 12, 1) << 5) | \
100 (RV_X(x, 2, 3) << 6))
101 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
102 (RV_X(x, 7, 2) << 6))
103 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
104 (RV_X(x, 7, 3) << 6))
105 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
106 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
107 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
108
109 #define SHIFT_RIGHT(x, y) \
110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
111
112 #define REG_MASK \
113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
114
115 #define REG_OFFSET(insn, pos) \
116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
117
118 #define REG_PTR(insn, pos, regs) \
119 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
120
121 #define GET_FUNCT3(insn) (((insn) >> 12) & 7)
122
123 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
124 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
125 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
126 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
127 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
128 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
129 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
130 #define IMM_I(insn) ((s32)(insn) >> 20)
131 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
132 (s32)(((insn) >> 7) & 0x1f))
133
134 struct insn_func {
135 unsigned long mask;
136 unsigned long match;
137 /*
138 * Possible return values are as follows:
139 * 1) Returns < 0 for error case
140 * 2) Returns 0 for exit to user-space
141 * 3) Returns 1 to continue with next sepc
142 * 4) Returns 2 to continue with same sepc
143 * 5) Returns 3 to inject illegal instruction trap and continue
144 * 6) Returns 4 to inject virtual instruction trap and continue
145 *
146 * Use enum kvm_insn_return for return values
147 */
148 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
149 };
150
truly_illegal_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)151 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
152 ulong insn)
153 {
154 struct kvm_cpu_trap utrap = { 0 };
155
156 /* Redirect trap to Guest VCPU */
157 utrap.sepc = vcpu->arch.guest_context.sepc;
158 utrap.scause = EXC_INST_ILLEGAL;
159 utrap.stval = insn;
160 utrap.htval = 0;
161 utrap.htinst = 0;
162 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
163
164 return 1;
165 }
166
truly_virtual_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)167 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
168 ulong insn)
169 {
170 struct kvm_cpu_trap utrap = { 0 };
171
172 /* Redirect trap to Guest VCPU */
173 utrap.sepc = vcpu->arch.guest_context.sepc;
174 utrap.scause = EXC_VIRTUAL_INST_FAULT;
175 utrap.stval = insn;
176 utrap.htval = 0;
177 utrap.htinst = 0;
178 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
179
180 return 1;
181 }
182
183 /**
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
185 *
186 * @vcpu: The VCPU pointer
187 */
kvm_riscv_vcpu_wfi(struct kvm_vcpu * vcpu)188 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
189 {
190 if (!kvm_arch_vcpu_runnable(vcpu)) {
191 kvm_vcpu_srcu_read_unlock(vcpu);
192 kvm_vcpu_halt(vcpu);
193 kvm_vcpu_srcu_read_lock(vcpu);
194 }
195 }
196
wfi_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)197 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
198 {
199 vcpu->stat.wfi_exit_stat++;
200 kvm_riscv_vcpu_wfi(vcpu);
201 return KVM_INSN_CONTINUE_NEXT_SEPC;
202 }
203
204 struct csr_func {
205 unsigned int base;
206 unsigned int count;
207 /*
208 * Possible return values are as same as "func" callback in
209 * "struct insn_func".
210 */
211 int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
212 unsigned long *val, unsigned long new_val,
213 unsigned long wr_mask);
214 };
215
216 static const struct csr_func csr_funcs[] = {
217 KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
218 };
219
220 /**
221 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
222 * emulation or in-kernel emulation
223 *
224 * @vcpu: The VCPU pointer
225 * @run: The VCPU run struct containing the CSR data
226 *
227 * Returns > 0 upon failure and 0 upon success
228 */
kvm_riscv_vcpu_csr_return(struct kvm_vcpu * vcpu,struct kvm_run * run)229 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
230 {
231 ulong insn;
232
233 if (vcpu->arch.csr_decode.return_handled)
234 return 0;
235 vcpu->arch.csr_decode.return_handled = 1;
236
237 /* Update destination register for CSR reads */
238 insn = vcpu->arch.csr_decode.insn;
239 if ((insn >> SH_RD) & MASK_RX)
240 SET_RD(insn, &vcpu->arch.guest_context,
241 run->riscv_csr.ret_value);
242
243 /* Move to next instruction */
244 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
245
246 return 0;
247 }
248
csr_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)249 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
250 {
251 int i, rc = KVM_INSN_ILLEGAL_TRAP;
252 unsigned int csr_num = insn >> SH_RS2;
253 unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
254 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
255 const struct csr_func *tcfn, *cfn = NULL;
256 ulong val = 0, wr_mask = 0, new_val = 0;
257
258 /* Decode the CSR instruction */
259 switch (GET_FUNCT3(insn)) {
260 case GET_FUNCT3(INSN_MATCH_CSRRW):
261 wr_mask = -1UL;
262 new_val = rs1_val;
263 break;
264 case GET_FUNCT3(INSN_MATCH_CSRRS):
265 wr_mask = rs1_val;
266 new_val = -1UL;
267 break;
268 case GET_FUNCT3(INSN_MATCH_CSRRC):
269 wr_mask = rs1_val;
270 new_val = 0;
271 break;
272 case GET_FUNCT3(INSN_MATCH_CSRRWI):
273 wr_mask = -1UL;
274 new_val = rs1_num;
275 break;
276 case GET_FUNCT3(INSN_MATCH_CSRRSI):
277 wr_mask = rs1_num;
278 new_val = -1UL;
279 break;
280 case GET_FUNCT3(INSN_MATCH_CSRRCI):
281 wr_mask = rs1_num;
282 new_val = 0;
283 break;
284 default:
285 return rc;
286 }
287
288 /* Save instruction decode info */
289 vcpu->arch.csr_decode.insn = insn;
290 vcpu->arch.csr_decode.return_handled = 0;
291
292 /* Update CSR details in kvm_run struct */
293 run->riscv_csr.csr_num = csr_num;
294 run->riscv_csr.new_value = new_val;
295 run->riscv_csr.write_mask = wr_mask;
296 run->riscv_csr.ret_value = 0;
297
298 /* Find in-kernel CSR function */
299 for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
300 tcfn = &csr_funcs[i];
301 if ((tcfn->base <= csr_num) &&
302 (csr_num < (tcfn->base + tcfn->count))) {
303 cfn = tcfn;
304 break;
305 }
306 }
307
308 /* First try in-kernel CSR emulation */
309 if (cfn && cfn->func) {
310 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
311 if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
312 if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
313 run->riscv_csr.ret_value = val;
314 vcpu->stat.csr_exit_kernel++;
315 kvm_riscv_vcpu_csr_return(vcpu, run);
316 rc = KVM_INSN_CONTINUE_SAME_SEPC;
317 }
318 return rc;
319 }
320 }
321
322 /* Exit to user-space for CSR emulation */
323 if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
324 vcpu->stat.csr_exit_user++;
325 run->exit_reason = KVM_EXIT_RISCV_CSR;
326 }
327
328 return rc;
329 }
330
331 static const struct insn_func system_opcode_funcs[] = {
332 {
333 .mask = INSN_MASK_CSRRW,
334 .match = INSN_MATCH_CSRRW,
335 .func = csr_insn,
336 },
337 {
338 .mask = INSN_MASK_CSRRS,
339 .match = INSN_MATCH_CSRRS,
340 .func = csr_insn,
341 },
342 {
343 .mask = INSN_MASK_CSRRC,
344 .match = INSN_MATCH_CSRRC,
345 .func = csr_insn,
346 },
347 {
348 .mask = INSN_MASK_CSRRWI,
349 .match = INSN_MATCH_CSRRWI,
350 .func = csr_insn,
351 },
352 {
353 .mask = INSN_MASK_CSRRSI,
354 .match = INSN_MATCH_CSRRSI,
355 .func = csr_insn,
356 },
357 {
358 .mask = INSN_MASK_CSRRCI,
359 .match = INSN_MATCH_CSRRCI,
360 .func = csr_insn,
361 },
362 {
363 .mask = INSN_MASK_WFI,
364 .match = INSN_MATCH_WFI,
365 .func = wfi_insn,
366 },
367 };
368
system_opcode_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)369 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
370 ulong insn)
371 {
372 int i, rc = KVM_INSN_ILLEGAL_TRAP;
373 const struct insn_func *ifn;
374
375 for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
376 ifn = &system_opcode_funcs[i];
377 if ((insn & ifn->mask) == ifn->match) {
378 rc = ifn->func(vcpu, run, insn);
379 break;
380 }
381 }
382
383 switch (rc) {
384 case KVM_INSN_ILLEGAL_TRAP:
385 return truly_illegal_insn(vcpu, run, insn);
386 case KVM_INSN_VIRTUAL_TRAP:
387 return truly_virtual_insn(vcpu, run, insn);
388 case KVM_INSN_CONTINUE_NEXT_SEPC:
389 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
390 break;
391 default:
392 break;
393 }
394
395 return (rc <= 0) ? rc : 1;
396 }
397
398 /**
399 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
400 *
401 * @vcpu: The VCPU pointer
402 * @run: The VCPU run struct containing the mmio data
403 * @trap: Trap details
404 *
405 * Returns > 0 to continue run-loop
406 * Returns 0 to exit run-loop and handle in user-space.
407 * Returns < 0 to report failure and exit run-loop
408 */
kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)409 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
410 struct kvm_cpu_trap *trap)
411 {
412 unsigned long insn = trap->stval;
413 struct kvm_cpu_trap utrap = { 0 };
414 struct kvm_cpu_context *ct;
415
416 if (unlikely(INSN_IS_16BIT(insn))) {
417 if (insn == 0) {
418 ct = &vcpu->arch.guest_context;
419 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
420 ct->sepc,
421 &utrap);
422 if (utrap.scause) {
423 utrap.sepc = ct->sepc;
424 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
425 return 1;
426 }
427 }
428 if (INSN_IS_16BIT(insn))
429 return truly_illegal_insn(vcpu, run, insn);
430 }
431
432 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
433 case INSN_OPCODE_SYSTEM:
434 return system_opcode_insn(vcpu, run, insn);
435 default:
436 return truly_illegal_insn(vcpu, run, insn);
437 }
438 }
439
440 /**
441 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
442 *
443 * @vcpu: The VCPU pointer
444 * @run: The VCPU run struct containing the mmio data
445 * @fault_addr: Guest physical address to load
446 * @htinst: Transformed encoding of the load instruction
447 *
448 * Returns > 0 to continue run-loop
449 * Returns 0 to exit run-loop and handle in user-space.
450 * Returns < 0 to report failure and exit run-loop
451 */
kvm_riscv_vcpu_mmio_load(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)452 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
453 unsigned long fault_addr,
454 unsigned long htinst)
455 {
456 u8 data_buf[8];
457 unsigned long insn;
458 int shift = 0, len = 0, insn_len = 0;
459 struct kvm_cpu_trap utrap = { 0 };
460 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
461
462 /* Determine trapped instruction */
463 if (htinst & 0x1) {
464 /*
465 * Bit[0] == 1 implies trapped instruction value is
466 * transformed instruction or custom instruction.
467 */
468 insn = htinst | INSN_16BIT_MASK;
469 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
470 } else {
471 /*
472 * Bit[0] == 0 implies trapped instruction value is
473 * zero or special value.
474 */
475 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
476 &utrap);
477 if (utrap.scause) {
478 /* Redirect trap if we failed to read instruction */
479 utrap.sepc = ct->sepc;
480 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
481 return 1;
482 }
483 insn_len = INSN_LEN(insn);
484 }
485
486 /* Decode length of MMIO and shift */
487 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
488 len = 4;
489 shift = 8 * (sizeof(ulong) - len);
490 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
491 len = 1;
492 shift = 8 * (sizeof(ulong) - len);
493 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
494 len = 1;
495 shift = 8 * (sizeof(ulong) - len);
496 #ifdef CONFIG_64BIT
497 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
498 len = 8;
499 shift = 8 * (sizeof(ulong) - len);
500 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
501 len = 4;
502 #endif
503 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
504 len = 2;
505 shift = 8 * (sizeof(ulong) - len);
506 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
507 len = 2;
508 #ifdef CONFIG_64BIT
509 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
510 len = 8;
511 shift = 8 * (sizeof(ulong) - len);
512 insn = RVC_RS2S(insn) << SH_RD;
513 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
514 ((insn >> SH_RD) & 0x1f)) {
515 len = 8;
516 shift = 8 * (sizeof(ulong) - len);
517 #endif
518 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
519 len = 4;
520 shift = 8 * (sizeof(ulong) - len);
521 insn = RVC_RS2S(insn) << SH_RD;
522 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
523 ((insn >> SH_RD) & 0x1f)) {
524 len = 4;
525 shift = 8 * (sizeof(ulong) - len);
526 } else {
527 return -EOPNOTSUPP;
528 }
529
530 /* Fault address should be aligned to length of MMIO */
531 if (fault_addr & (len - 1))
532 return -EIO;
533
534 /* Save instruction decode info */
535 vcpu->arch.mmio_decode.insn = insn;
536 vcpu->arch.mmio_decode.insn_len = insn_len;
537 vcpu->arch.mmio_decode.shift = shift;
538 vcpu->arch.mmio_decode.len = len;
539 vcpu->arch.mmio_decode.return_handled = 0;
540
541 /* Update MMIO details in kvm_run struct */
542 run->mmio.is_write = false;
543 run->mmio.phys_addr = fault_addr;
544 run->mmio.len = len;
545
546 /* Try to handle MMIO access in the kernel */
547 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
548 /* Successfully handled MMIO access in the kernel so resume */
549 memcpy(run->mmio.data, data_buf, len);
550 vcpu->stat.mmio_exit_kernel++;
551 kvm_riscv_vcpu_mmio_return(vcpu, run);
552 return 1;
553 }
554
555 /* Exit to userspace for MMIO emulation */
556 vcpu->stat.mmio_exit_user++;
557 run->exit_reason = KVM_EXIT_MMIO;
558
559 return 0;
560 }
561
562 /**
563 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
564 *
565 * @vcpu: The VCPU pointer
566 * @run: The VCPU run struct containing the mmio data
567 * @fault_addr: Guest physical address to store
568 * @htinst: Transformed encoding of the store instruction
569 *
570 * Returns > 0 to continue run-loop
571 * Returns 0 to exit run-loop and handle in user-space.
572 * Returns < 0 to report failure and exit run-loop
573 */
kvm_riscv_vcpu_mmio_store(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)574 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
575 unsigned long fault_addr,
576 unsigned long htinst)
577 {
578 u8 data8;
579 u16 data16;
580 u32 data32;
581 u64 data64;
582 ulong data;
583 unsigned long insn;
584 int len = 0, insn_len = 0;
585 struct kvm_cpu_trap utrap = { 0 };
586 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
587
588 /* Determine trapped instruction */
589 if (htinst & 0x1) {
590 /*
591 * Bit[0] == 1 implies trapped instruction value is
592 * transformed instruction or custom instruction.
593 */
594 insn = htinst | INSN_16BIT_MASK;
595 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
596 } else {
597 /*
598 * Bit[0] == 0 implies trapped instruction value is
599 * zero or special value.
600 */
601 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
602 &utrap);
603 if (utrap.scause) {
604 /* Redirect trap if we failed to read instruction */
605 utrap.sepc = ct->sepc;
606 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
607 return 1;
608 }
609 insn_len = INSN_LEN(insn);
610 }
611
612 data = GET_RS2(insn, &vcpu->arch.guest_context);
613 data8 = data16 = data32 = data64 = data;
614
615 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
616 len = 4;
617 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
618 len = 1;
619 #ifdef CONFIG_64BIT
620 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
621 len = 8;
622 #endif
623 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
624 len = 2;
625 #ifdef CONFIG_64BIT
626 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
627 len = 8;
628 data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
629 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
630 ((insn >> SH_RD) & 0x1f)) {
631 len = 8;
632 data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
633 #endif
634 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
635 len = 4;
636 data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
637 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
638 ((insn >> SH_RD) & 0x1f)) {
639 len = 4;
640 data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
641 } else {
642 return -EOPNOTSUPP;
643 }
644
645 /* Fault address should be aligned to length of MMIO */
646 if (fault_addr & (len - 1))
647 return -EIO;
648
649 /* Save instruction decode info */
650 vcpu->arch.mmio_decode.insn = insn;
651 vcpu->arch.mmio_decode.insn_len = insn_len;
652 vcpu->arch.mmio_decode.shift = 0;
653 vcpu->arch.mmio_decode.len = len;
654 vcpu->arch.mmio_decode.return_handled = 0;
655
656 /* Copy data to kvm_run instance */
657 switch (len) {
658 case 1:
659 *((u8 *)run->mmio.data) = data8;
660 break;
661 case 2:
662 *((u16 *)run->mmio.data) = data16;
663 break;
664 case 4:
665 *((u32 *)run->mmio.data) = data32;
666 break;
667 case 8:
668 *((u64 *)run->mmio.data) = data64;
669 break;
670 default:
671 return -EOPNOTSUPP;
672 }
673
674 /* Update MMIO details in kvm_run struct */
675 run->mmio.is_write = true;
676 run->mmio.phys_addr = fault_addr;
677 run->mmio.len = len;
678
679 /* Try to handle MMIO access in the kernel */
680 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
681 fault_addr, len, run->mmio.data)) {
682 /* Successfully handled MMIO access in the kernel so resume */
683 vcpu->stat.mmio_exit_kernel++;
684 kvm_riscv_vcpu_mmio_return(vcpu, run);
685 return 1;
686 }
687
688 /* Exit to userspace for MMIO emulation */
689 vcpu->stat.mmio_exit_user++;
690 run->exit_reason = KVM_EXIT_MMIO;
691
692 return 0;
693 }
694
695 /**
696 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
697 * or in-kernel IO emulation
698 *
699 * @vcpu: The VCPU pointer
700 * @run: The VCPU run struct containing the mmio data
701 */
kvm_riscv_vcpu_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)702 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
703 {
704 u8 data8;
705 u16 data16;
706 u32 data32;
707 u64 data64;
708 ulong insn;
709 int len, shift;
710
711 if (vcpu->arch.mmio_decode.return_handled)
712 return 0;
713
714 vcpu->arch.mmio_decode.return_handled = 1;
715 insn = vcpu->arch.mmio_decode.insn;
716
717 if (run->mmio.is_write)
718 goto done;
719
720 len = vcpu->arch.mmio_decode.len;
721 shift = vcpu->arch.mmio_decode.shift;
722
723 switch (len) {
724 case 1:
725 data8 = *((u8 *)run->mmio.data);
726 SET_RD(insn, &vcpu->arch.guest_context,
727 (ulong)data8 << shift >> shift);
728 break;
729 case 2:
730 data16 = *((u16 *)run->mmio.data);
731 SET_RD(insn, &vcpu->arch.guest_context,
732 (ulong)data16 << shift >> shift);
733 break;
734 case 4:
735 data32 = *((u32 *)run->mmio.data);
736 SET_RD(insn, &vcpu->arch.guest_context,
737 (ulong)data32 << shift >> shift);
738 break;
739 case 8:
740 data64 = *((u64 *)run->mmio.data);
741 SET_RD(insn, &vcpu->arch.guest_context,
742 (ulong)data64 << shift >> shift);
743 break;
744 default:
745 return -EOPNOTSUPP;
746 }
747
748 done:
749 /* Move to next instruction */
750 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
751
752 return 0;
753 }
754