1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include "hf/arch/other_world.h"
10
11 #include "hf/api.h"
12 #include "hf/ffa/indirect_messaging.h"
13 #include "hf/ffa_internal.h"
14 #include "hf/vcpu.h"
15
ffa_cpu_cycles_run_forward(ffa_id_t vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * ret)16 bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
17 struct ffa_value *ret)
18 {
19 /*
20 * VM's requests should be forwarded to the SPMC, if target is an SP.
21 */
22 if (!vm_id_is_current_world(vm_id)) {
23 *ret = arch_other_world_call_ext((struct ffa_value){
24 .func = FFA_RUN_32, ffa_vm_vcpu(vm_id, vcpu_idx)});
25 return true;
26 }
27
28 return false;
29 }
30
31 /**
32 * Check if current VM can resume target VM/SP using FFA_RUN ABI.
33 */
ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,ffa_id_t target_vm_id,ffa_vcpu_index_t vcpu_idx,struct ffa_value * run_ret,struct vcpu ** next)34 bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
35 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
36 struct ffa_value *run_ret, struct vcpu **next)
37 {
38 (void)next;
39 (void)vcpu_idx;
40
41 /* Only the primary VM can switch vCPUs. */
42 if (!vm_is_primary(current_locked.vcpu->vm)) {
43 run_ret->arg2 = FFA_DENIED;
44 return false;
45 }
46
47 /* Only secondary VM vCPUs can be run. */
48 if (target_vm_id == HF_PRIMARY_VM_ID) {
49 return false;
50 }
51
52 return true;
53 }
54
55 /**
56 * The invocation of FFA_MSG_WAIT at non-secure virtual FF-A instance is made
57 * to be compliant with version v1.0 of the FF-A specification. It serves as
58 * a blocking call.
59 */
ffa_cpu_cycles_msg_wait_prepare(struct vcpu_locked current_locked,struct vcpu ** next)60 struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
61 struct vcpu_locked current_locked, struct vcpu **next)
62 {
63 return ffa_indirect_msg_recv(true, current_locked, next);
64 }
65
ffa_cpu_cycles_check_runtime_state_transition(struct vcpu_locked current_locked,ffa_id_t vm_id,ffa_id_t receiver_vm_id,struct vcpu_locked receiver_locked,uint32_t func,enum vcpu_state * next_state)66 bool ffa_cpu_cycles_check_runtime_state_transition(
67 struct vcpu_locked current_locked, ffa_id_t vm_id,
68 ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
69 uint32_t func, enum vcpu_state *next_state)
70 {
71 (void)current_locked;
72 (void)vm_id;
73 (void)receiver_vm_id;
74 (void)receiver_locked;
75
76 switch (func) {
77 case FFA_YIELD_32:
78 [[fallthrough]];
79 case FFA_MSG_SEND_DIRECT_REQ_64:
80 case FFA_MSG_SEND_DIRECT_REQ_32:
81 case FFA_MSG_SEND_DIRECT_REQ2_64:
82 case FFA_RUN_32:
83 *next_state = VCPU_STATE_BLOCKED;
84 return true;
85 case FFA_MSG_WAIT_32:
86 [[fallthrough]];
87 case FFA_MSG_SEND_DIRECT_RESP_64:
88 case FFA_MSG_SEND_DIRECT_RESP_32:
89 case FFA_MSG_SEND_DIRECT_RESP2_64:
90 *next_state = VCPU_STATE_WAITING;
91 return true;
92 default:
93 return false;
94 }
95 }
96
ffa_cpu_cycles_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,struct vcpu_locked target_locked)97 void ffa_cpu_cycles_init_schedule_mode_ffa_run(
98 struct vcpu_locked current_locked, struct vcpu_locked target_locked)
99 {
100 /* Scheduling mode not supported in the Hypervisor/VMs. */
101 (void)current_locked;
102 (void)target_locked;
103 }
104
105 /*
106 * Prepare to yield execution back to the VM that allocated cpu cycles and move
107 * to BLOCKED state.
108 */
ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,struct vcpu ** next,uint32_t timeout_low,uint32_t timeout_high)109 struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
110 struct vcpu **next,
111 uint32_t timeout_low,
112 uint32_t timeout_high)
113 {
114 struct vcpu *current = current_locked.vcpu;
115 struct ffa_value ret = {
116 .func = FFA_YIELD_32,
117 .arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
118 .arg2 = timeout_low,
119 .arg3 = timeout_high,
120 };
121
122 /*
123 * Return execution to primary VM.
124 */
125 *next = api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
126
127 return (struct ffa_value){.func = FFA_SUCCESS_32};
128 }
129
ffa_cpu_cycles_error_32(struct vcpu * current,struct vcpu ** next,enum ffa_error error_code)130 struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
131 struct vcpu **next,
132 enum ffa_error error_code)
133 {
134 (void)current;
135 (void)next;
136 (void)error_code;
137 /* TODO: Interface not handled in hypervisor. */
138 return ffa_error(FFA_NOT_SUPPORTED);
139 }
140