1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2018 NXP
4 * Author: Dong Aisheng <aisheng.dong@nxp.com>
5 *
6 * Implementation of the SCU IPC functions using MUs (client side).
7 *
8 */
9
10 #include <linux/err.h>
11 #include <linux/firmware/imx/ipc.h>
12 #include <linux/firmware/imx/sci.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/mailbox_client.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21
22 #define SCU_MU_CHAN_NUM 8
23 #define MAX_RX_TIMEOUT (msecs_to_jiffies(30))
24
25 struct imx_sc_chan {
26 struct imx_sc_ipc *sc_ipc;
27
28 struct mbox_client cl;
29 struct mbox_chan *ch;
30 int idx;
31 struct completion tx_done;
32 };
33
34 struct imx_sc_ipc {
35 /* SCU uses 4 Tx and 4 Rx channels */
36 struct imx_sc_chan chans[SCU_MU_CHAN_NUM];
37 struct device *dev;
38 struct mutex lock;
39 struct completion done;
40 bool fast_ipc;
41
42 /* temporarily store the SCU msg */
43 u32 *msg;
44 u8 rx_size;
45 u8 count;
46 };
47
48 /*
49 * This type is used to indicate error response for most functions.
50 */
51 enum imx_sc_error_codes {
52 IMX_SC_ERR_NONE = 0, /* Success */
53 IMX_SC_ERR_VERSION = 1, /* Incompatible API version */
54 IMX_SC_ERR_CONFIG = 2, /* Configuration error */
55 IMX_SC_ERR_PARM = 3, /* Bad parameter */
56 IMX_SC_ERR_NOACCESS = 4, /* Permission error (no access) */
57 IMX_SC_ERR_LOCKED = 5, /* Permission error (locked) */
58 IMX_SC_ERR_UNAVAILABLE = 6, /* Unavailable (out of resources) */
59 IMX_SC_ERR_NOTFOUND = 7, /* Not found */
60 IMX_SC_ERR_NOPOWER = 8, /* No power */
61 IMX_SC_ERR_IPC = 9, /* Generic IPC error */
62 IMX_SC_ERR_BUSY = 10, /* Resource is currently busy/active */
63 IMX_SC_ERR_FAIL = 11, /* General I/O failure */
64 IMX_SC_ERR_LAST
65 };
66
67 static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
68 0, /* IMX_SC_ERR_NONE */
69 -EINVAL, /* IMX_SC_ERR_VERSION */
70 -EINVAL, /* IMX_SC_ERR_CONFIG */
71 -EINVAL, /* IMX_SC_ERR_PARM */
72 -EACCES, /* IMX_SC_ERR_NOACCESS */
73 -EACCES, /* IMX_SC_ERR_LOCKED */
74 -ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
75 -EEXIST, /* IMX_SC_ERR_NOTFOUND */
76 -EPERM, /* IMX_SC_ERR_NOPOWER */
77 -EPIPE, /* IMX_SC_ERR_IPC */
78 -EBUSY, /* IMX_SC_ERR_BUSY */
79 -EIO, /* IMX_SC_ERR_FAIL */
80 };
81
82 static struct imx_sc_ipc *imx_sc_ipc_handle;
83
imx_sc_to_linux_errno(int errno)84 static inline int imx_sc_to_linux_errno(int errno)
85 {
86 if (errno >= IMX_SC_ERR_NONE && errno < IMX_SC_ERR_LAST)
87 return imx_sc_linux_errmap[errno];
88 return -EIO;
89 }
90
91 /*
92 * Get the default handle used by SCU
93 */
imx_scu_get_handle(struct imx_sc_ipc ** ipc)94 int imx_scu_get_handle(struct imx_sc_ipc **ipc)
95 {
96 if (!imx_sc_ipc_handle)
97 return -EPROBE_DEFER;
98
99 *ipc = imx_sc_ipc_handle;
100 return 0;
101 }
102 EXPORT_SYMBOL(imx_scu_get_handle);
103
104 /* Callback called when the word of a message is ack-ed, eg read by SCU */
imx_scu_tx_done(struct mbox_client * cl,void * mssg,int r)105 static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
106 {
107 struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
108
109 complete(&sc_chan->tx_done);
110 }
111
imx_scu_rx_callback(struct mbox_client * c,void * msg)112 static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
113 {
114 struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
115 struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc;
116 struct imx_sc_rpc_msg *hdr;
117 u32 *data = msg;
118 int i;
119
120 if (!sc_ipc->msg) {
121 dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n",
122 sc_chan->idx, *data);
123 return;
124 }
125
126 if (sc_ipc->fast_ipc) {
127 hdr = msg;
128 sc_ipc->rx_size = hdr->size;
129 sc_ipc->msg[0] = *data++;
130
131 for (i = 1; i < sc_ipc->rx_size; i++)
132 sc_ipc->msg[i] = *data++;
133
134 complete(&sc_ipc->done);
135
136 return;
137 }
138
139 if (sc_chan->idx == 0) {
140 hdr = msg;
141 sc_ipc->rx_size = hdr->size;
142 dev_dbg(sc_ipc->dev, "msg rx size %u\n", sc_ipc->rx_size);
143 if (sc_ipc->rx_size > 4)
144 dev_warn(sc_ipc->dev, "RPC does not support receiving over 4 words: %u\n",
145 sc_ipc->rx_size);
146 }
147
148 sc_ipc->msg[sc_chan->idx] = *data;
149 sc_ipc->count++;
150
151 dev_dbg(sc_ipc->dev, "mu %u msg %u 0x%x\n", sc_chan->idx,
152 sc_ipc->count, *data);
153
154 if ((sc_ipc->rx_size != 0) && (sc_ipc->count == sc_ipc->rx_size))
155 complete(&sc_ipc->done);
156 }
157
imx_scu_ipc_write(struct imx_sc_ipc * sc_ipc,void * msg)158 static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
159 {
160 struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg;
161 struct imx_sc_chan *sc_chan;
162 u32 *data = msg;
163 int ret;
164 int size;
165 int i;
166
167 /* Check size */
168 if (hdr.size > IMX_SC_RPC_MAX_MSG)
169 return -EINVAL;
170
171 dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc,
172 hdr.func, hdr.size);
173
174 size = sc_ipc->fast_ipc ? 1 : hdr.size;
175 for (i = 0; i < size; i++) {
176 sc_chan = &sc_ipc->chans[i % 4];
177
178 /*
179 * SCU requires that all messages words are written
180 * sequentially but linux MU driver implements multiple
181 * independent channels for each register so ordering between
182 * different channels must be ensured by SCU API interface.
183 *
184 * Wait for tx_done before every send to ensure that no
185 * queueing happens at the mailbox channel level.
186 */
187 if (!sc_ipc->fast_ipc) {
188 wait_for_completion(&sc_chan->tx_done);
189 reinit_completion(&sc_chan->tx_done);
190 }
191
192 ret = mbox_send_message(sc_chan->ch, &data[i]);
193 if (ret < 0)
194 return ret;
195 }
196
197 return 0;
198 }
199
200 /*
201 * RPC command/response
202 */
imx_scu_call_rpc(struct imx_sc_ipc * sc_ipc,void * msg,bool have_resp)203 int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp)
204 {
205 uint8_t saved_svc, saved_func;
206 struct imx_sc_rpc_msg *hdr;
207 int ret;
208
209 if (WARN_ON(!sc_ipc || !msg))
210 return -EINVAL;
211
212 mutex_lock(&sc_ipc->lock);
213 reinit_completion(&sc_ipc->done);
214
215 if (have_resp) {
216 sc_ipc->msg = msg;
217 saved_svc = ((struct imx_sc_rpc_msg *)msg)->svc;
218 saved_func = ((struct imx_sc_rpc_msg *)msg)->func;
219 }
220 sc_ipc->count = 0;
221 ret = imx_scu_ipc_write(sc_ipc, msg);
222 if (ret < 0) {
223 dev_err(sc_ipc->dev, "RPC send msg failed: %d\n", ret);
224 goto out;
225 }
226
227 if (have_resp) {
228 if (!wait_for_completion_timeout(&sc_ipc->done,
229 MAX_RX_TIMEOUT)) {
230 dev_err(sc_ipc->dev, "RPC send msg timeout\n");
231 mutex_unlock(&sc_ipc->lock);
232 return -ETIMEDOUT;
233 }
234
235 /* response status is stored in hdr->func field */
236 hdr = msg;
237 ret = hdr->func;
238 /*
239 * Some special SCU firmware APIs do NOT have return value
240 * in hdr->func, but they do have response data, those special
241 * APIs are defined as void function in SCU firmware, so they
242 * should be treated as return success always.
243 */
244 if ((saved_svc == IMX_SC_RPC_SVC_MISC) &&
245 (saved_func == IMX_SC_MISC_FUNC_UNIQUE_ID ||
246 saved_func == IMX_SC_MISC_FUNC_GET_BUTTON_STATUS))
247 ret = 0;
248 }
249
250 out:
251 sc_ipc->msg = NULL;
252 mutex_unlock(&sc_ipc->lock);
253
254 dev_dbg(sc_ipc->dev, "RPC SVC done\n");
255
256 return imx_sc_to_linux_errno(ret);
257 }
258 EXPORT_SYMBOL(imx_scu_call_rpc);
259
imx_scu_probe(struct platform_device * pdev)260 static int imx_scu_probe(struct platform_device *pdev)
261 {
262 struct device *dev = &pdev->dev;
263 struct imx_sc_ipc *sc_ipc;
264 struct imx_sc_chan *sc_chan;
265 struct mbox_client *cl;
266 char *chan_name;
267 struct of_phandle_args args;
268 int num_channel;
269 int ret;
270 int i;
271
272 sc_ipc = devm_kzalloc(dev, sizeof(*sc_ipc), GFP_KERNEL);
273 if (!sc_ipc)
274 return -ENOMEM;
275
276 ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
277 "#mbox-cells", 0, &args);
278 if (ret)
279 return ret;
280
281 sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu");
282
283 num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM;
284 for (i = 0; i < num_channel; i++) {
285 if (i < num_channel / 2)
286 chan_name = kasprintf(GFP_KERNEL, "tx%d", i);
287 else
288 chan_name = kasprintf(GFP_KERNEL, "rx%d",
289 i - num_channel / 2);
290
291 if (!chan_name)
292 return -ENOMEM;
293
294 sc_chan = &sc_ipc->chans[i];
295 cl = &sc_chan->cl;
296 cl->dev = dev;
297 cl->tx_block = false;
298 cl->knows_txdone = true;
299 cl->rx_callback = imx_scu_rx_callback;
300
301 if (!sc_ipc->fast_ipc) {
302 /* Initial tx_done completion as "done" */
303 cl->tx_done = imx_scu_tx_done;
304 init_completion(&sc_chan->tx_done);
305 complete(&sc_chan->tx_done);
306 }
307
308 sc_chan->sc_ipc = sc_ipc;
309 sc_chan->idx = i % (num_channel / 2);
310 sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
311 if (IS_ERR(sc_chan->ch)) {
312 ret = PTR_ERR(sc_chan->ch);
313 if (ret != -EPROBE_DEFER)
314 dev_err(dev, "Failed to request mbox chan %s ret %d\n",
315 chan_name, ret);
316 kfree(chan_name);
317 return ret;
318 }
319
320 dev_dbg(dev, "request mbox chan %s\n", chan_name);
321 /* chan_name is not used anymore by framework */
322 kfree(chan_name);
323 }
324
325 sc_ipc->dev = dev;
326 mutex_init(&sc_ipc->lock);
327 init_completion(&sc_ipc->done);
328
329 imx_sc_ipc_handle = sc_ipc;
330
331 ret = imx_scu_soc_init(dev);
332 if (ret)
333 dev_warn(dev, "failed to initialize SoC info: %d\n", ret);
334
335 ret = imx_scu_enable_general_irq_channel(dev);
336 if (ret)
337 dev_warn(dev,
338 "failed to enable general irq channel: %d\n", ret);
339
340 dev_info(dev, "NXP i.MX SCU Initialized\n");
341
342 return devm_of_platform_populate(dev);
343 }
344
345 static const struct of_device_id imx_scu_match[] = {
346 { .compatible = "fsl,imx-scu", },
347 { /* Sentinel */ }
348 };
349
350 static struct platform_driver imx_scu_driver = {
351 .driver = {
352 .name = "imx-scu",
353 .of_match_table = imx_scu_match,
354 },
355 .probe = imx_scu_probe,
356 };
357 builtin_platform_driver(imx_scu_driver);
358
359 MODULE_AUTHOR("Dong Aisheng <aisheng.dong@nxp.com>");
360 MODULE_DESCRIPTION("IMX SCU firmware protocol driver");
361 MODULE_LICENSE("GPL v2");
362