1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/firmware/scmi/protocol.h>
8 #include <zephyr/drivers/firmware/scmi/transport.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/device.h>
11 #include "mailbox.h"
12
13 LOG_MODULE_REGISTER(scmi_core);
14
15 #define SCMI_CHAN_LOCK_TIMEOUT_USEC 500
16 #define SCMI_CHAN_SEM_TIMEOUT_USEC 500
17
scmi_status_to_errno(int scmi_status)18 int scmi_status_to_errno(int scmi_status)
19 {
20 switch (scmi_status) {
21 case SCMI_SUCCESS:
22 return 0;
23 case SCMI_NOT_SUPPORTED:
24 return -EOPNOTSUPP;
25 case SCMI_INVALID_PARAMETERS:
26 return -EINVAL;
27 case SCMI_DENIED:
28 return -EACCES;
29 case SCMI_NOT_FOUND:
30 return -ENOENT;
31 case SCMI_OUT_OF_RANGE:
32 return -ERANGE;
33 case SCMI_IN_USE:
34 case SCMI_BUSY:
35 return -EBUSY;
36 case SCMI_PROTOCOL_ERROR:
37 return -EPROTO;
38 case SCMI_COMMS_ERROR:
39 case SCMI_GENERIC_ERROR:
40 case SCMI_HARDWARE_ERROR:
41 default:
42 return -EIO;
43 }
44 }
45
scmi_core_reply_cb(struct scmi_channel * chan)46 static void scmi_core_reply_cb(struct scmi_channel *chan)
47 {
48 if (!k_is_pre_kernel()) {
49 k_sem_give(&chan->sem);
50 }
51 }
52
scmi_core_setup_chan(const struct device * transport,struct scmi_channel * chan,bool tx)53 static int scmi_core_setup_chan(const struct device *transport,
54 struct scmi_channel *chan, bool tx)
55 {
56 int ret;
57
58 if (!chan) {
59 return -EINVAL;
60 }
61
62 if (chan->ready) {
63 return 0;
64 }
65
66 /* no support for RX channels ATM */
67 if (!tx) {
68 return -ENOTSUP;
69 }
70
71 k_mutex_init(&chan->lock);
72 k_sem_init(&chan->sem, 0, 1);
73
74 chan->cb = scmi_core_reply_cb;
75
76 /* setup transport-related channel data */
77 ret = scmi_transport_setup_chan(transport, chan, tx);
78 if (ret < 0) {
79 LOG_ERR("failed to setup channel");
80 return ret;
81 }
82
83 /* protocols might share a channel. In such cases, this
84 * will stop them from being initialized again.
85 */
86 chan->ready = true;
87
88 return 0;
89 }
90
scmi_interrupt_enable(struct scmi_channel * chan,bool enable)91 static int scmi_interrupt_enable(struct scmi_channel *chan, bool enable)
92 {
93 struct scmi_mbox_channel *mbox_chan;
94 struct mbox_dt_spec *tx_reply;
95 bool comp_int;
96
97 mbox_chan = chan->data;
98 comp_int = enable ? SCMI_SHMEM_CHAN_FLAG_IRQ_BIT : 0;
99
100 if (mbox_chan->tx_reply.dev) {
101 tx_reply = &mbox_chan->tx_reply;
102 } else {
103 tx_reply = &mbox_chan->tx;
104 }
105
106 /* re-set completion interrupt */
107 scmi_shmem_update_flags(mbox_chan->shmem, SCMI_SHMEM_CHAN_FLAG_IRQ_BIT, comp_int);
108
109 return mbox_set_enabled_dt(tx_reply, enable);
110 }
111
scmi_send_message_polling(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)112 static int scmi_send_message_polling(struct scmi_protocol *proto,
113 struct scmi_message *msg,
114 struct scmi_message *reply)
115 {
116 int ret;
117 int status;
118
119 /*
120 * SCMI communication interrupt is enabled by default during setup_chan
121 * to support interrupt-driven communication. When using polling mode
122 * it must be disabled to avoid unnecessary interrupts and
123 * ensure proper polling behavior.
124 */
125 status = scmi_interrupt_enable(proto->tx, false);
126
127 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
128 if (ret < 0) {
129 goto cleanup;
130 }
131
132 /* no kernel primitives, we're forced to poll here.
133 *
134 * Cortex-M quirk: no interrupts at this point => no timer =>
135 * no timeout mechanism => this can block the whole system.
136 *
137 * Polling mode repeatedly checks the chan_status field in share memory
138 * to detect whether the remote side have completed message processing
139 *
140 * TODO: is there a better way to handle this?
141 */
142 while (!scmi_transport_channel_is_free(proto->transport, proto->tx)) {
143 }
144
145 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
146 if (ret < 0) {
147 return ret;
148 }
149
150 cleanup:
151 /* restore scmi interrupt enable status when disable it pass */
152 if (status >= 0) {
153 scmi_interrupt_enable(proto->tx, true);
154 }
155
156 return ret;
157 }
158
scmi_send_message_interrupt(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)159 static int scmi_send_message_interrupt(struct scmi_protocol *proto,
160 struct scmi_message *msg,
161 struct scmi_message *reply)
162 {
163 int ret = 0;
164
165 if (!proto->tx) {
166 return -ENODEV;
167 }
168
169 /* wait for channel to be free */
170 ret = k_mutex_lock(&proto->tx->lock, K_USEC(SCMI_CHAN_LOCK_TIMEOUT_USEC));
171 if (ret < 0) {
172 LOG_ERR("failed to acquire chan lock");
173 return ret;
174 }
175
176 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
177 if (ret < 0) {
178 LOG_ERR("failed to send message");
179 goto out_release_mutex;
180 }
181
182 /* only one protocol instance can wait for a message reply at a time */
183 ret = k_sem_take(&proto->tx->sem, K_USEC(SCMI_CHAN_SEM_TIMEOUT_USEC));
184 if (ret < 0) {
185 LOG_ERR("failed to wait for msg reply");
186 goto out_release_mutex;
187 }
188
189 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
190 if (ret < 0) {
191 LOG_ERR("failed to read reply");
192 goto out_release_mutex;
193 }
194
195 out_release_mutex:
196 k_mutex_unlock(&proto->tx->lock);
197
198 return ret;
199 }
200
scmi_send_message(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply,bool use_polling)201 int scmi_send_message(struct scmi_protocol *proto, struct scmi_message *msg,
202 struct scmi_message *reply, bool use_polling)
203 {
204 if (!proto->tx) {
205 return -ENODEV;
206 }
207
208 if (!proto->tx->ready) {
209 return -EINVAL;
210 }
211
212 if (use_polling) {
213 return scmi_send_message_polling(proto, msg, reply);
214 } else {
215 return scmi_send_message_interrupt(proto, msg, reply);
216 }
217 }
218
scmi_core_protocol_setup(const struct device * transport)219 static int scmi_core_protocol_setup(const struct device *transport)
220 {
221 int ret;
222
223 STRUCT_SECTION_FOREACH(scmi_protocol, it) {
224 it->transport = transport;
225
226 #ifndef CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS
227 /* no static channel allocation, attempt dynamic binding */
228 it->tx = scmi_transport_request_channel(transport, it->id, true);
229 #endif /* CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS */
230
231 if (!it->tx) {
232 return -ENODEV;
233 }
234
235 ret = scmi_core_setup_chan(transport, it->tx, true);
236 if (ret < 0) {
237 return ret;
238 }
239 }
240
241 return 0;
242 }
243
scmi_core_transport_init(const struct device * transport)244 int scmi_core_transport_init(const struct device *transport)
245 {
246 int ret;
247
248 ret = scmi_transport_init(transport);
249 if (ret < 0) {
250 return ret;
251 }
252
253 return scmi_core_protocol_setup(transport);
254 }
255