1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <hyptypes.h>
6
7 #include <thread.h>
8
9 #include <events/smccc.h>
10
11 #include "event_handlers.h"
12 #include "smccc_hypercall.h"
13
14 static bool
smccc_handle_call(bool is_hvc)15 smccc_handle_call(bool is_hvc) EXCLUDE_PREEMPT_DISABLED
16 {
17 bool handled;
18 thread_t *current = thread_get_self();
19 smccc_function_id_t function_id =
20 smccc_function_id_cast((uint32_t)current->vcpu_regs_gpr.x[0]);
21
22 uint32_t res0 = smccc_function_id_get_res0(&function_id);
23 if (res0 != 0U) {
24 current->vcpu_regs_gpr.x[0] =
25 (register_t)SMCCC_UNKNOWN_FUNCTION64;
26 handled = true;
27 goto out;
28 }
29
30 // TODO: the smccc handling below needs to be refactored, to permit
31 // registering ranges of service IDs, rather than registering
32 // individual calls directly. The current approach allows for unknown
33 // call IDs to be unhandled and fallthrough to a later module, which is
34 // undesirable.
35 //
36 // For SMCCC based hypercalls, we need function ID range-base handling,
37 // so its currently called directly here.
38 if (smccc_handle_hypercall_wrapper(function_id, is_hvc)) {
39 handled = true;
40 goto out;
41 }
42
43 if (smccc_function_id_get_is_smc64(&function_id)) {
44 uint64_t ret0 = (uint64_t)current->vcpu_regs_gpr.x[0];
45 uint64_t ret1 = (uint64_t)current->vcpu_regs_gpr.x[1];
46 uint64_t ret2 = (uint64_t)current->vcpu_regs_gpr.x[2];
47 uint64_t ret3 = (uint64_t)current->vcpu_regs_gpr.x[3];
48
49 if (smccc_function_id_get_is_fast(&function_id)) {
50 handled = trigger_smccc_dispatch_fast_64_event(
51 smccc_function_id_get_owner_id(&function_id),
52 smccc_function_id_get_function(&function_id),
53 is_hvc, (uint64_t)current->vcpu_regs_gpr.x[1],
54 (uint64_t)current->vcpu_regs_gpr.x[2],
55 (uint64_t)current->vcpu_regs_gpr.x[3],
56 (uint64_t)current->vcpu_regs_gpr.x[4],
57 (uint64_t)current->vcpu_regs_gpr.x[5],
58 (uint64_t)current->vcpu_regs_gpr.x[6],
59 smccc_client_id_cast(
60 (uint32_t)current->vcpu_regs_gpr.x[7]),
61 &ret0, &ret1, &ret2, &ret3);
62 } else {
63 handled = trigger_smccc_dispatch_yielding_64_event(
64 smccc_function_id_get_owner_id(&function_id),
65 smccc_function_id_get_function(&function_id),
66 is_hvc, (uint64_t)current->vcpu_regs_gpr.x[1],
67 (uint64_t)current->vcpu_regs_gpr.x[2],
68 (uint64_t)current->vcpu_regs_gpr.x[3],
69 (uint64_t)current->vcpu_regs_gpr.x[4],
70 (uint64_t)current->vcpu_regs_gpr.x[5],
71 (uint64_t)current->vcpu_regs_gpr.x[6],
72 smccc_client_id_cast(
73 (uint32_t)current->vcpu_regs_gpr.x[7]),
74 &ret0, &ret1, &ret2, &ret3);
75 }
76
77 if (handled) {
78 current->vcpu_regs_gpr.x[0] = (register_t)ret0;
79 current->vcpu_regs_gpr.x[1] = (register_t)ret1;
80 current->vcpu_regs_gpr.x[2] = (register_t)ret2;
81 current->vcpu_regs_gpr.x[3] = (register_t)ret3;
82 }
83 } else {
84 uint32_t ret0 = (uint32_t)current->vcpu_regs_gpr.x[0];
85 uint32_t ret1 = (uint32_t)current->vcpu_regs_gpr.x[1];
86 uint32_t ret2 = (uint32_t)current->vcpu_regs_gpr.x[2];
87 uint32_t ret3 = (uint32_t)current->vcpu_regs_gpr.x[3];
88
89 if (smccc_function_id_get_is_fast(&function_id)) {
90 handled = trigger_smccc_dispatch_fast_32_event(
91 smccc_function_id_get_owner_id(&function_id),
92 smccc_function_id_get_function(&function_id),
93 is_hvc, (uint32_t)current->vcpu_regs_gpr.x[1],
94 (uint32_t)current->vcpu_regs_gpr.x[2],
95 (uint32_t)current->vcpu_regs_gpr.x[3],
96 (uint32_t)current->vcpu_regs_gpr.x[4],
97 (uint32_t)current->vcpu_regs_gpr.x[5],
98 (uint32_t)current->vcpu_regs_gpr.x[6],
99 smccc_client_id_cast(
100 (uint32_t)current->vcpu_regs_gpr.x[7]),
101 &ret0, &ret1, &ret2, &ret3);
102 } else {
103 handled = trigger_smccc_dispatch_yielding_32_event(
104 smccc_function_id_get_owner_id(&function_id),
105 smccc_function_id_get_function(&function_id),
106 is_hvc, (uint32_t)current->vcpu_regs_gpr.x[1],
107 (uint32_t)current->vcpu_regs_gpr.x[2],
108 (uint32_t)current->vcpu_regs_gpr.x[3],
109 (uint32_t)current->vcpu_regs_gpr.x[4],
110 (uint32_t)current->vcpu_regs_gpr.x[5],
111 (uint32_t)current->vcpu_regs_gpr.x[6],
112 smccc_client_id_cast(
113 (uint32_t)current->vcpu_regs_gpr.x[7]),
114 &ret0, &ret1, &ret2, &ret3);
115 }
116
117 if (handled) {
118 current->vcpu_regs_gpr.x[0] = (register_t)ret0;
119 current->vcpu_regs_gpr.x[1] = (register_t)ret1;
120 current->vcpu_regs_gpr.x[2] = (register_t)ret2;
121 current->vcpu_regs_gpr.x[3] = (register_t)ret3;
122 }
123 }
124
125 out:
126 return handled;
127 }
128
129 bool
smccc_handle_vcpu_trap_smc64(ESR_EL2_ISS_SMC64_t iss)130 smccc_handle_vcpu_trap_smc64(ESR_EL2_ISS_SMC64_t iss)
131 {
132 bool handled = false;
133
134 if (ESR_EL2_ISS_SMC64_get_imm16(&iss) == (uint16_t)0U) {
135 handled = smccc_handle_call(false);
136 }
137
138 return handled;
139 }
140
141 bool
smccc_handle_vcpu_trap_hvc64(ESR_EL2_ISS_HVC_t iss)142 smccc_handle_vcpu_trap_hvc64(ESR_EL2_ISS_HVC_t iss)
143 {
144 bool handled = false;
145
146 if (ESR_EL2_ISS_HVC_get_imm16(&iss) == (uint16_t)0U) {
147 handled = smccc_handle_call(true);
148 }
149
150 return handled;
151 }
152
153 bool
smccc_handle_vcpu_trap_default(void)154 smccc_handle_vcpu_trap_default(void)
155 {
156 // We always fallback to returning -1, otherwise we'll deliver an
157 // exception to the VCPU.
158 thread_t *current = thread_get_self();
159 current->vcpu_regs_gpr.x[0] = (register_t)SMCCC_UNKNOWN_FUNCTION64;
160
161 return true;
162 }
163