1 /*
2 * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11
12 #include <lib/el3_runtime/context_mgmt.h>
13 #include <lib/spinlock.h>
14 #include "spmd_private.h"
15
16 static struct {
17 bool secondary_ep_locked;
18 uintptr_t secondary_ep;
19 spinlock_t lock;
20 } g_spmd_pm;
21
22 /*******************************************************************************
23 * spmd_pm_secondary_ep_register
24 ******************************************************************************/
spmd_pm_secondary_ep_register(uintptr_t entry_point)25 int spmd_pm_secondary_ep_register(uintptr_t entry_point)
26 {
27 int ret = FFA_ERROR_INVALID_PARAMETER;
28
29 spin_lock(&g_spmd_pm.lock);
30
31 if (g_spmd_pm.secondary_ep_locked == true) {
32 goto out;
33 }
34
35 /*
36 * Check entry_point address is a PA within
37 * load_address <= entry_point < load_address + binary_size
38 */
39 if (!spmd_check_address_in_binary_image(entry_point)) {
40 ERROR("%s entry point is not within image boundaries\n",
41 __func__);
42 goto out;
43 }
44
45 g_spmd_pm.secondary_ep = entry_point;
46 g_spmd_pm.secondary_ep_locked = true;
47
48 VERBOSE("%s %lx\n", __func__, entry_point);
49
50 ret = 0;
51
52 out:
53 spin_unlock(&g_spmd_pm.lock);
54
55 return ret;
56 }
57
58 /*******************************************************************************
59 * This CPU has been turned on. Enter SPMC to initialise S-EL1 or S-EL2. As part
60 * of the SPMC initialization path, they will initialize any SPs that they
61 * manage. Entry into SPMC is done after initialising minimal architectural
62 * state that guarantees safe execution.
63 ******************************************************************************/
spmd_cpu_on_finish_handler(u_register_t unused)64 static void spmd_cpu_on_finish_handler(u_register_t unused)
65 {
66 spmd_spm_core_context_t *ctx = spmd_get_context();
67 unsigned int linear_id = plat_my_core_pos();
68 el3_state_t *el3_state;
69 uintptr_t entry_point;
70 uint64_t rc;
71
72 assert(ctx != NULL);
73 assert(ctx->state != SPMC_STATE_ON);
74
75 spin_lock(&g_spmd_pm.lock);
76
77 /*
78 * Leave the possibility that the SPMC does not call
79 * FFA_SECONDARY_EP_REGISTER in which case re-use the
80 * primary core address for booting secondary cores.
81 */
82 if (g_spmd_pm.secondary_ep_locked == true) {
83 /*
84 * The CPU context has already been initialized at boot time
85 * (in spmd_spmc_init by a call to cm_setup_context). Adjust
86 * below the target core entry point based on the address
87 * passed to by FFA_SECONDARY_EP_REGISTER.
88 */
89 entry_point = g_spmd_pm.secondary_ep;
90 el3_state = get_el3state_ctx(&ctx->cpu_ctx);
91 write_ctx_reg(el3_state, CTX_ELR_EL3, entry_point);
92 }
93
94 spin_unlock(&g_spmd_pm.lock);
95
96 /* Mark CPU as initiating ON operation. */
97 ctx->state = SPMC_STATE_ON_PENDING;
98
99 rc = spmd_spm_core_sync_entry(ctx);
100 if (rc != 0ULL) {
101 ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc,
102 linear_id);
103 ctx->state = SPMC_STATE_OFF;
104 return;
105 }
106
107 ctx->state = SPMC_STATE_ON;
108
109 VERBOSE("CPU %u on!\n", linear_id);
110 }
111
112 /*******************************************************************************
113 * spmd_cpu_off_handler
114 ******************************************************************************/
spmd_cpu_off_handler(u_register_t unused)115 static int32_t spmd_cpu_off_handler(u_register_t unused)
116 {
117 spmd_spm_core_context_t *ctx = spmd_get_context();
118 unsigned int linear_id = plat_my_core_pos();
119 int64_t rc;
120
121 assert(ctx != NULL);
122 assert(ctx->state != SPMC_STATE_OFF);
123
124 /* Build an SPMD to SPMC direct message request. */
125 spmd_build_spmc_message(get_gpregs_ctx(&ctx->cpu_ctx),
126 FFA_FWK_MSG_PSCI, PSCI_CPU_OFF);
127
128 rc = spmd_spm_core_sync_entry(ctx);
129 if (rc != 0ULL) {
130 ERROR("%s failed (%" PRIu64 ") on CPU%u\n", __func__, rc, linear_id);
131 }
132
133 /* Expect a direct message response from the SPMC. */
134 u_register_t ffa_resp_func = read_ctx_reg(get_gpregs_ctx(&ctx->cpu_ctx),
135 CTX_GPREG_X0);
136 if (ffa_resp_func != FFA_MSG_SEND_DIRECT_RESP_SMC32) {
137 ERROR("%s invalid SPMC response (%lx).\n",
138 __func__, ffa_resp_func);
139 return -EINVAL;
140 }
141
142 ctx->state = SPMC_STATE_OFF;
143
144 VERBOSE("CPU %u off!\n", linear_id);
145
146 return 0;
147 }
148
149 /*******************************************************************************
150 * Structure populated by the SPM Dispatcher to perform any bookkeeping before
151 * PSCI executes a power mgmt. operation.
152 ******************************************************************************/
153 const spd_pm_ops_t spmd_pm = {
154 .svc_on_finish = spmd_cpu_on_finish_handler,
155 .svc_off = spmd_cpu_off_handler
156 };
157