1 /*
2  * Copyright (C) 2018-2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <types.h>
8 #include <asm/per_cpu.h>
9 #include <asm/mmu.h>
10 #include <asm/guest/vcpu.h>
11 #include <asm/vmx.h>
12 #include <asm/guest/vm.h>
13 #include <asm/init.h>
14 #include <logmsg.h>
15 #include <dump.h>
16 #include <reloc.h>
17 
18 #define CALL_TRACE_HIERARCHY_MAX    20U
19 #define DUMP_STACK_SIZE 0x200U
20 
21 static spinlock_t exception_spinlock = { .head = 0U, .tail = 0U, };
22 /*
23  * readable exception descriptors.
24  */
25 static const char *const excp_names[32] = {
26 	[0] = "Divide Error",
27 	[1] = "RESERVED",
28 	[2] = "NMI",
29 	[3] = "Breakpoint",
30 	[4] = "Overflow",
31 	[5] = "BOUND range exceeded",
32 	[6] = "Invalid Opcode",
33 	[7] = "Device Not Available",
34 	[8] = "Double Fault",
35 	[9] = "Coprocessor Segment Overrun",
36 	[10] = "Invalid TSS",
37 	[11] = "Segment Not Present",
38 	[12] = "Stack Segment Fault",
39 	[13] = "General Protection",
40 	[14] = "Page Fault",
41 	[15] = "Intel Reserved",
42 	[16] = "x87 FPU Floating Point Error",
43 	[17] = "Alignment Check",
44 	[18] = "Machine Check",
45 	[19] = "SIMD Floating Point Exception",
46 	[20] = "Virtualization Exception",
47 	[21] = "Intel Reserved",
48 	[22] = "Intel Reserved",
49 	[23] = "Intel Reserved",
50 	[24] = "Intel Reserved",
51 	[25] = "Intel Reserved",
52 	[26] = "Intel Reserved",
53 	[27] = "Intel Reserved",
54 	[28] = "Intel Reserved",
55 	[29] = "Intel Reserved",
56 	[30] = "Intel Reserved",
57 	[31] = "Intel Reserved"
58 };
59 
60 /*
61  * Global variable for save registers on exception.
62  * don't change crash_ctx to static.
63  * crash_ctx is for offline analysis when system crashed, not for runtime usage.
64  * as crash_ctx is only be set without being read, compiler will regard
65  * crash_ctx as an useless variable if it is set to static, and will not
66  * generate code for it.
67  */
68 struct intr_excp_ctx *crash_ctx;
69 
dump_guest_reg(struct acrn_vcpu * vcpu)70 static void dump_guest_reg(struct acrn_vcpu *vcpu)
71 {
72 	uint16_t pcpu_id = pcpuid_from_vcpu(vcpu);
73 
74 	pr_acrnlog("\n\n================================================");
75 	pr_acrnlog("================================\n\n");
76 	pr_acrnlog("Guest Registers:\r\n");
77 	pr_acrnlog("=	VM ID %d ==== vCPU ID %hu ===  pCPU ID %d ===="
78 			"world %d =============\r\n",
79 			vcpu->vm->vm_id, vcpu->vcpu_id, pcpu_id,
80 			vcpu->arch.cur_context);
81 	pr_acrnlog("=	RIP=0x%016lx  RSP=0x%016lx RFLAGS=0x%016lx\r\n",
82 			vcpu_get_rip(vcpu),
83 			vcpu_get_gpreg(vcpu, CPU_REG_RSP),
84 			vcpu_get_rflags(vcpu));
85 	pr_acrnlog("=	CR0=0x%016lx  CR2=0x%016lx  CR3=0x%016lx\r\n",
86 			vcpu_get_cr0(vcpu),
87 			vcpu_get_cr2(vcpu),
88 			exec_vmread(VMX_GUEST_CR3));
89 	pr_acrnlog("=	RAX=0x%016lx  RBX=0x%016lx  RCX=0x%016lx\r\n",
90 			vcpu_get_gpreg(vcpu, CPU_REG_RAX),
91 			vcpu_get_gpreg(vcpu, CPU_REG_RBX),
92 			vcpu_get_gpreg(vcpu, CPU_REG_RCX));
93 	pr_acrnlog("=	RDX=0x%016lx  RDI=0x%016lx  RSI=0x%016lx\r\n",
94 			vcpu_get_gpreg(vcpu, CPU_REG_RDX),
95 			vcpu_get_gpreg(vcpu, CPU_REG_RDI),
96 			vcpu_get_gpreg(vcpu, CPU_REG_RSI));
97 	pr_acrnlog("=	RBP=0x%016lx  R8=0x%016lx  R9=0x%016lx\r\n",
98 			vcpu_get_gpreg(vcpu, CPU_REG_RBP),
99 			vcpu_get_gpreg(vcpu, CPU_REG_R8),
100 			vcpu_get_gpreg(vcpu, CPU_REG_R9));
101 	pr_acrnlog("=	R10=0x%016lx  R11=0x%016lx  R12=0x%016lx\r\n",
102 			vcpu_get_gpreg(vcpu, CPU_REG_R10),
103 			vcpu_get_gpreg(vcpu, CPU_REG_R11),
104 			vcpu_get_gpreg(vcpu, CPU_REG_R12));
105 	pr_acrnlog("=	R13=0x%016lx  R14=0x%016lx  R15=0x%016lx\r\n",
106 			vcpu_get_gpreg(vcpu, CPU_REG_R13),
107 			vcpu_get_gpreg(vcpu, CPU_REG_R14),
108 			vcpu_get_gpreg(vcpu, CPU_REG_R15));
109 	pr_acrnlog("\r\n");
110 }
111 
dump_guest_stack(struct acrn_vcpu * vcpu)112 static void dump_guest_stack(struct acrn_vcpu *vcpu)
113 {
114 	uint32_t i;
115 	uint64_t tmp[DUMP_STACK_SIZE], fault_addr;
116 	uint32_t err_code = 0U;
117 
118 	if (copy_from_gva(vcpu, tmp, vcpu_get_gpreg(vcpu, CPU_REG_RSP),
119 		DUMP_STACK_SIZE, &err_code, &fault_addr) < 0) {
120 		pr_acrnlog("\r\nUnabled to Copy Guest Stack:\r\n");
121 		return;
122 	}
123 
124 	pr_acrnlog("\r\nGuest Stack:\r\n");
125 	pr_acrnlog("Dump stack for vcpu %hu, from gva 0x%016lx\r\n",
126 			vcpu->vcpu_id, vcpu_get_gpreg(vcpu, CPU_REG_RSP));
127 	for (i = 0U; i < (DUMP_STACK_SIZE >> 5U); i++) {
128 		pr_acrnlog("guest_rsp(0x%lx):  0x%016lx  0x%016lx 0x%016lx  0x%016lx\r\n",
129 				(vcpu_get_gpreg(vcpu, CPU_REG_RSP)+(i*32U)),
130 				tmp[i*4], tmp[(i*4)+1],
131 				tmp[(i*4)+2], tmp[(i*4)+3]);
132 	}
133 	pr_acrnlog("\r\n");
134 }
135 
dump_guest_context(uint16_t pcpu_id)136 static void dump_guest_context(uint16_t pcpu_id)
137 {
138 	struct acrn_vcpu *vcpu = get_running_vcpu(pcpu_id);
139 
140 	if (vcpu != NULL) {
141 		dump_guest_reg(vcpu);
142 		dump_guest_stack(vcpu);
143 	}
144 }
145 
show_host_call_trace(uint64_t rsp,uint64_t rbp_arg,uint16_t pcpu_id)146 static void show_host_call_trace(uint64_t rsp, uint64_t rbp_arg, uint16_t pcpu_id)
147 {
148 	uint64_t rbp = rbp_arg, return_address;
149 	uint32_t i = 0U;
150 	uint32_t cb_hierarchy = 0U;
151 	uint64_t *sp = (uint64_t *)rsp;
152 
153 	pr_acrnlog("\r\n delta = (actual_load_address - CONFIG_HV_RAM_START) = 0x%llx\r\n", get_hv_image_delta());
154 	pr_acrnlog("\r\nHost Stack: CPU_ID = %hu\r\n", pcpu_id);
155 	for (i = 0U; i < (DUMP_STACK_SIZE >> 5U); i++) {
156 		pr_acrnlog("addr(0x%lx)	0x%016lx  0x%016lx  0x%016lx  0x%016lx\r\n",
157 			(rsp + (i * 32U)), sp[i * 4U],
158 			sp[(i * 4U) + 1U], sp[(i * 4U) + 2U],
159 			sp[(i * 4U) + 3U]);
160 	}
161 	pr_acrnlog("\r\n");
162 
163 	pr_acrnlog("Host Call Trace:\r\n");
164 
165 	/* if enable compiler option(no-omit-frame-pointer)  the stack layout
166 	 * should be like this when call a function for x86_64
167 	 *
168 	 *                  |                    |
169 	 *       rbp+8      |  return address    |
170 	 *       rbp        |  rbp               |    push rbp
171 	 *                  |                    |    mov rsp rbp
172 	 *
173 	 *       rsp        |                    |
174 	 *
175 	 *
176 	 *  if the address is invalid, it will cause hv page fault
177 	 *  then halt system */
178 	while (cb_hierarchy < CALL_TRACE_HIERARCHY_MAX) {
179 		return_address = *(uint64_t *)(rbp + sizeof(uint64_t));
180 		if (return_address == SP_BOTTOM_MAGIC) {
181 			break;
182 		}
183 		pr_acrnlog("----> 0x%016lx\r\n",
184 				*(uint64_t *)(rbp + sizeof(uint64_t)));
185 		rbp = *(uint64_t *)rbp;
186 		cb_hierarchy++;
187 	}
188 	pr_acrnlog("\r\n");
189 }
190 
asm_assert(int32_t line,const char * file,const char * txt)191 void asm_assert(int32_t line, const char *file, const char *txt)
192 {
193 	uint16_t pcpu_id = get_pcpu_id();
194 	uint64_t rsp = cpu_rsp_get();
195 	uint64_t rbp = cpu_rbp_get();
196 
197 	pr_acrnlog("Assertion failed in file %s,line %d : %s",
198 			file, line, txt);
199 	show_host_call_trace(rsp, rbp, pcpu_id);
200 	dump_guest_context(pcpu_id);
201 	do {
202 		asm_pause();
203 	} while (1);
204 }
205 
dump_intr_excp_frame(const struct intr_excp_ctx * ctx)206 void dump_intr_excp_frame(const struct intr_excp_ctx *ctx)
207 {
208 	const char *name = "Not defined";
209 	uint64_t cr2_val;
210 
211 	pr_acrnlog("\n\n================================================");
212 	pr_acrnlog("================================\n=\n");
213 	if (ctx->vector < 0x20UL) {
214 		name = excp_names[ctx->vector];
215 		pr_acrnlog("= Unhandled exception: %d (%s)\n", ctx->vector, name);
216 	}
217 
218 	/* Dump host register*/
219 	pr_acrnlog("\r\nHost Registers:\r\n");
220 	pr_acrnlog("=  Vector=0x%016llX  RIP=0x%016llX\n",
221 			ctx->vector, ctx->rip);
222 	pr_acrnlog("=     RAX=0x%016llX  RBX=0x%016llX  RCX=0x%016llX\n",
223 			ctx->gp_regs.rax, ctx->gp_regs.rbx, ctx->gp_regs.rcx);
224 	pr_acrnlog("=     RDX=0x%016llX  RDI=0x%016llX  RSI=0x%016llX\n",
225 			ctx->gp_regs.rdx, ctx->gp_regs.rdi, ctx->gp_regs.rsi);
226 	pr_acrnlog("=     RSP=0x%016llX  RBP=0x%016llX  RBX=0x%016llX\n",
227 			ctx->rsp, ctx->gp_regs.rbp, ctx->gp_regs.rbx);
228 	pr_acrnlog("=      R8=0x%016llX   R9=0x%016llX  R10=0x%016llX\n",
229 			ctx->gp_regs.r8, ctx->gp_regs.r9, ctx->gp_regs.r10);
230 	pr_acrnlog("=     R11=0x%016llX  R12=0x%016llX  R13=0x%016llX\n",
231 			ctx->gp_regs.r11, ctx->gp_regs.r12, ctx->gp_regs.r13);
232 	pr_acrnlog("=  RFLAGS=0x%016llX  R14=0x%016llX  R15=0x%016llX\n",
233 			ctx->rflags, ctx->gp_regs.r14, ctx->gp_regs.r15);
234 	pr_acrnlog("= ERRCODE=0x%016llX   CS=0x%016llX   SS=0x%016llX\n",
235 			ctx->error_code, ctx->cs, ctx->ss);
236 	asm volatile ("movq %%cr2, %0" : "=r" (cr2_val));
237 	pr_acrnlog("= CR2=0x%016llX", cr2_val);
238 	pr_acrnlog("\r\n");
239 
240 	pr_acrnlog("=====================================================");
241 	pr_acrnlog("===========================\n");
242 }
243 
dump_exception(struct intr_excp_ctx * ctx,uint16_t pcpu_id)244 void dump_exception(struct intr_excp_ctx *ctx, uint16_t pcpu_id)
245 {
246 	/* Obtain lock to ensure exception dump doesn't get corrupted */
247 	spinlock_obtain(&exception_spinlock);
248 
249 	/* Dump host context */
250 	dump_intr_excp_frame(ctx);
251 	/* Show host stack */
252 	show_host_call_trace(ctx->gp_regs.rsp, ctx->gp_regs.rbp, pcpu_id);
253 	/* Dump guest context */
254 	dump_guest_context(pcpu_id);
255 
256 	/* Save registers*/
257 	crash_ctx = ctx;
258 	flush_invalidate_all_cache();
259 
260 	/* Release lock to let other CPUs handle exception */
261 	spinlock_release(&exception_spinlock);
262 }
263