1 /*
2 * xen/arch/arm/vsmc.c
3 *
4 * Generic handler for SMC and HVC calls according to
5 * ARM SMC calling convention
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17
18 #include <xen/lib.h>
19 #include <xen/types.h>
20 #include <public/arch-arm/smccc.h>
21 #include <asm/monitor.h>
22 #include <asm/psci.h>
23 #include <asm/regs.h>
24 #include <asm/smccc.h>
25 #include <asm/traps.h>
26
27 /* Number of functions currently supported by Hypervisor Service. */
28 #define XEN_SMCCC_FUNCTION_COUNT 3
29
30 /* Number of functions currently supported by Standard Service Service Calls. */
31 #define SSSC_SMCCC_FUNCTION_COUNT 13
32
fill_uid(struct cpu_user_regs * regs,xen_uuid_t uuid)33 static bool fill_uid(struct cpu_user_regs *regs, xen_uuid_t uuid)
34 {
35 int n;
36
37 /*
38 * UID is returned in registers r0..r3, four bytes per register,
39 * first byte is stored in low-order bits of a register.
40 * (ARM DEN 0028B page 14)
41 */
42 for (n = 0; n < 4; n++)
43 {
44 const uint8_t *bytes = uuid.a + n * 4;
45 uint32_t r;
46
47 r = bytes[0];
48 r |= bytes[1] << 8;
49 r |= bytes[2] << 16;
50 r |= bytes[3] << 24;
51
52 set_user_reg(regs, n, r);
53 }
54
55 return true;
56 }
57
fill_revision(struct cpu_user_regs * regs,uint32_t major,uint32_t minor)58 static bool fill_revision(struct cpu_user_regs *regs, uint32_t major,
59 uint32_t minor)
60 {
61 /*
62 * Revision is returned in registers r0 and r1.
63 * r0 stores major part of the version
64 * r1 stores minor part of the version
65 * (ARM DEN 0028B page 15)
66 */
67 set_user_reg(regs, 0, major);
68 set_user_reg(regs, 1, minor);
69
70 return true;
71 }
72
fill_function_call_count(struct cpu_user_regs * regs,uint32_t cnt)73 static bool fill_function_call_count(struct cpu_user_regs *regs, uint32_t cnt)
74 {
75 /*
76 * Function call count is retuned as any other return value in register r0
77 * (ARM DEN 0028B page 17)
78 */
79 set_user_reg(regs, 0, cnt);
80
81 return true;
82 }
83
84 /* SMCCC interface for hypervisor. Tell about itself. */
handle_hypervisor(struct cpu_user_regs * regs)85 static bool handle_hypervisor(struct cpu_user_regs *regs)
86 {
87 switch ( smccc_get_fn(get_user_reg(regs, 0)) )
88 {
89 case ARM_SMCCC_FUNC_CALL_COUNT:
90 return fill_function_call_count(regs, XEN_SMCCC_FUNCTION_COUNT);
91 case ARM_SMCCC_FUNC_CALL_UID:
92 return fill_uid(regs, XEN_SMCCC_UID);
93 case ARM_SMCCC_FUNC_CALL_REVISION:
94 return fill_revision(regs, XEN_SMCCC_MAJOR_REVISION,
95 XEN_SMCCC_MINOR_REVISION);
96 default:
97 return false;
98 }
99 }
100
101 #define PSCI_SET_RESULT(reg, val) set_user_reg(reg, 0, val)
102 #define PSCI_ARG(reg, n) get_user_reg(reg, n)
103
104 #ifdef CONFIG_ARM_64
105 #define PSCI_ARG32(reg, n) (uint32_t)(get_user_reg(reg, n))
106 #else
107 #define PSCI_ARG32(reg, n) PSCI_ARG(reg, n)
108 #endif
109
110 /* Existing (pre SMCCC) APIs. This includes PSCI 0.1 interface */
handle_existing_apis(struct cpu_user_regs * regs)111 static bool handle_existing_apis(struct cpu_user_regs *regs)
112 {
113 /* Only least 32 bits are significant (ARM DEN 0028B, page 12) */
114 switch ( (uint32_t)get_user_reg(regs, 0) )
115 {
116 case PSCI_cpu_off:
117 {
118 uint32_t pstate = PSCI_ARG32(regs, 1);
119
120 perfc_incr(vpsci_cpu_off);
121 PSCI_SET_RESULT(regs, do_psci_cpu_off(pstate));
122 return true;
123 }
124 case PSCI_cpu_on:
125 {
126 uint32_t vcpuid = PSCI_ARG32(regs, 1);
127 register_t epoint = PSCI_ARG(regs, 2);
128
129 perfc_incr(vpsci_cpu_on);
130 PSCI_SET_RESULT(regs, do_psci_cpu_on(vcpuid, epoint));
131 return true;
132 }
133 default:
134 return false;
135 }
136 }
137
138 /* PSCI 0.2 interface and other Standard Secure Calls */
handle_sssc(struct cpu_user_regs * regs)139 static bool handle_sssc(struct cpu_user_regs *regs)
140 {
141 uint32_t fid = (uint32_t)get_user_reg(regs, 0);
142
143 switch ( smccc_get_fn(fid) )
144 {
145 case PSCI_0_2_FN_PSCI_VERSION:
146 perfc_incr(vpsci_version);
147 PSCI_SET_RESULT(regs, do_psci_0_2_version());
148 return true;
149
150 case PSCI_0_2_FN_CPU_OFF:
151 perfc_incr(vpsci_cpu_off);
152 PSCI_SET_RESULT(regs, do_psci_0_2_cpu_off());
153 return true;
154
155 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
156 perfc_incr(vpsci_migrate_info_type);
157 PSCI_SET_RESULT(regs, do_psci_0_2_migrate_info_type());
158 return true;
159
160 case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
161 perfc_incr(vpsci_migrate_info_up_cpu);
162 PSCI_SET_RESULT(regs, do_psci_0_2_migrate_info_up_cpu());
163 return true;
164
165 case PSCI_0_2_FN_SYSTEM_OFF:
166 perfc_incr(vpsci_system_off);
167 do_psci_0_2_system_off();
168 PSCI_SET_RESULT(regs, PSCI_INTERNAL_FAILURE);
169 return true;
170
171 case PSCI_0_2_FN_SYSTEM_RESET:
172 perfc_incr(vpsci_system_reset);
173 do_psci_0_2_system_reset();
174 PSCI_SET_RESULT(regs, PSCI_INTERNAL_FAILURE);
175 return true;
176
177 case PSCI_0_2_FN_CPU_ON:
178 {
179 register_t vcpuid = PSCI_ARG(regs, 1);
180 register_t epoint = PSCI_ARG(regs, 2);
181 register_t cid = PSCI_ARG(regs, 3);
182
183 perfc_incr(vpsci_cpu_on);
184 PSCI_SET_RESULT(regs, do_psci_0_2_cpu_on(vcpuid, epoint, cid));
185 return true;
186 }
187
188 case PSCI_0_2_FN_CPU_SUSPEND:
189 {
190 uint32_t pstate = PSCI_ARG32(regs, 1);
191 register_t epoint = PSCI_ARG(regs, 2);
192 register_t cid = PSCI_ARG(regs, 3);
193
194 perfc_incr(vpsci_cpu_suspend);
195 PSCI_SET_RESULT(regs, do_psci_0_2_cpu_suspend(pstate, epoint, cid));
196 return true;
197 }
198
199 case PSCI_0_2_FN_AFFINITY_INFO:
200 {
201 register_t taff = PSCI_ARG(regs, 1);
202 uint32_t laff = PSCI_ARG32(regs, 2);
203
204 perfc_incr(vpsci_cpu_affinity_info);
205 PSCI_SET_RESULT(regs, do_psci_0_2_affinity_info(taff, laff));
206 return true;
207 }
208
209 case PSCI_0_2_FN_MIGRATE:
210 {
211 uint32_t tcpu = PSCI_ARG32(regs, 1);
212
213 perfc_incr(vpsci_cpu_migrate);
214 PSCI_SET_RESULT(regs, do_psci_0_2_migrate(tcpu));
215 return true;
216 }
217
218 case ARM_SMCCC_FUNC_CALL_COUNT:
219 return fill_function_call_count(regs, SSSC_SMCCC_FUNCTION_COUNT);
220
221 case ARM_SMCCC_FUNC_CALL_UID:
222 return fill_uid(regs, SSSC_SMCCC_UID);
223
224 case ARM_SMCCC_FUNC_CALL_REVISION:
225 return fill_revision(regs, SSSC_SMCCC_MAJOR_REVISION,
226 SSSC_SMCCC_MINOR_REVISION);
227
228 default:
229 return false;
230 }
231 }
232
233 /*
234 * vsmccc_handle_call() - handle SMC/HVC call according to ARM SMCCC.
235 * returns true if that was valid SMCCC call (even if function number
236 * was unknown).
237 */
vsmccc_handle_call(struct cpu_user_regs * regs)238 static bool vsmccc_handle_call(struct cpu_user_regs *regs)
239 {
240 bool handled = false;
241 const union hsr hsr = { .bits = regs->hsr };
242 register_t funcid = get_user_reg(regs, 0);
243
244 /*
245 * Check immediate value for HVC32, HVC64 and SMC64.
246 * It is not so easy to check immediate value for SMC32,
247 * because it is not stored in HSR.ISS field. To get immediate
248 * value we need to disassemble instruction at current pc, which
249 * is expensive. So we will assume that it is 0x0.
250 */
251 switch ( hsr.ec )
252 {
253 case HSR_EC_HVC32:
254 #ifdef CONFIG_ARM_64
255 case HSR_EC_HVC64:
256 case HSR_EC_SMC64:
257 #endif
258 if ( (hsr.iss & HSR_XXC_IMM_MASK) != 0)
259 return false;
260 break;
261 case HSR_EC_SMC32:
262 break;
263 default:
264 return false;
265 }
266
267 /* 64 bit calls are allowed only from 64 bit domains. */
268 if ( smccc_is_conv_64(funcid) && is_32bit_domain(current->domain) )
269 {
270 set_user_reg(regs, 0, ARM_SMCCC_ERR_UNKNOWN_FUNCTION);
271 return true;
272 }
273
274 /*
275 * Special case: identifier range for existing APIs.
276 * This range is described in SMCCC (ARM DEN 0028B, page 16),
277 * but it does not conforms to standard function identifier
278 * encoding.
279 */
280 if ( funcid >= ARM_SMCCC_RESERVED_RANGE_START &&
281 funcid <= ARM_SMCCC_RESERVED_RANGE_END )
282 handled = handle_existing_apis(regs);
283 else
284 {
285 switch ( smccc_get_owner(funcid) )
286 {
287 case ARM_SMCCC_OWNER_HYPERVISOR:
288 handled = handle_hypervisor(regs);
289 break;
290 case ARM_SMCCC_OWNER_STANDARD:
291 handled = handle_sssc(regs);
292 break;
293 }
294 }
295
296 if ( !handled )
297 {
298 gprintk(XENLOG_INFO, "Unhandled SMC/HVC: %08"PRIregister"\n", funcid);
299
300 /* Inform caller that function is not supported. */
301 set_user_reg(regs, 0, ARM_SMCCC_ERR_UNKNOWN_FUNCTION);
302 }
303
304 return true;
305 }
306
do_trap_smc(struct cpu_user_regs * regs,const union hsr hsr)307 void do_trap_smc(struct cpu_user_regs *regs, const union hsr hsr)
308 {
309 int rc = 0;
310
311 if ( !check_conditional_instr(regs, hsr) )
312 {
313 advance_pc(regs, hsr);
314 return;
315 }
316
317 /* If monitor is enabled, let it handle the call. */
318 if ( current->domain->arch.monitor.privileged_call_enabled )
319 rc = monitor_smc();
320
321 if ( rc == 1 )
322 return;
323
324 /*
325 * Use standard routines to handle the call.
326 * vsmccc_handle_call() will return false if this call is not
327 * SMCCC compatible (e.g. immediate value != 0). As it is not
328 * compatible, we can't be sure that guest will understand
329 * ARM_SMCCC_ERR_UNKNOWN_FUNCTION.
330 */
331 if ( vsmccc_handle_call(regs) )
332 advance_pc(regs, hsr);
333 else
334 inject_undef_exception(regs, hsr);
335 }
336
do_trap_hvc_smccc(struct cpu_user_regs * regs)337 void do_trap_hvc_smccc(struct cpu_user_regs *regs)
338 {
339 const union hsr hsr = { .bits = regs->hsr };
340
341 /*
342 * vsmccc_handle_call() will return false if this call is not
343 * SMCCC compatible (e.g. immediate value != 0). As it is not
344 * compatible, we can't be sure that guest will understand
345 * ARM_SMCCC_ERR_UNKNOWN_FUNCTION.
346 */
347 if ( !vsmccc_handle_call(regs) )
348 inject_undef_exception(regs, hsr);
349 }
350
351 /*
352 * Local variables:
353 * mode: C
354 * c-file-style: "BSD"
355 * c-basic-offset: 4
356 * indent-tabs-mode: nil
357 * End:
358 */
359