1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * cnl-sst.c - DSP library functions for CNL platform
4 *
5 * Copyright (C) 2016-17, Intel Corporation.
6 *
7 * Author: Guneshwor Singh <guneshwor.o.singh@intel.com>
8 *
9 * Modified from:
10 * HDA DSP library functions for SKL platform
11 * Copyright (C) 2014-15, Intel Corporation.
12 *
13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 *
15 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
16 */
17
18 #include <linux/module.h>
19 #include <linux/delay.h>
20 #include <linux/firmware.h>
21 #include <linux/device.h>
22
23 #include "../common/sst-dsp.h"
24 #include "../common/sst-dsp-priv.h"
25 #include "../common/sst-ipc.h"
26 #include "cnl-sst-dsp.h"
27 #include "skl.h"
28
29 #define CNL_FW_ROM_INIT 0x1
30 #define CNL_FW_INIT 0x5
31 #define CNL_IPC_PURGE 0x01004000
32 #define CNL_INIT_TIMEOUT 300
33 #define CNL_BASEFW_TIMEOUT 3000
34
35 #define CNL_ADSP_SRAM0_BASE 0x80000
36
37 /* Firmware status window */
38 #define CNL_ADSP_FW_STATUS CNL_ADSP_SRAM0_BASE
39 #define CNL_ADSP_ERROR_CODE (CNL_ADSP_FW_STATUS + 0x4)
40
41 #define CNL_INSTANCE_ID 0
42 #define CNL_BASE_FW_MODULE_ID 0
43 #define CNL_ADSP_FW_HDR_OFFSET 0x2000
44 #define CNL_ROM_CTRL_DMA_ID 0x9
45
cnl_prepare_fw(struct sst_dsp * ctx,const void * fwdata,u32 fwsize)46 static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
47 {
48
49 int ret, stream_tag;
50
51 stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
52 if (stream_tag <= 0) {
53 dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag);
54 return stream_tag;
55 }
56
57 ctx->dsp_ops.stream_tag = stream_tag;
58 memcpy(ctx->dmab.area, fwdata, fwsize);
59
60 ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
61 if (ret < 0) {
62 dev_err(ctx->dev, "dsp core0 power up failed\n");
63 ret = -EIO;
64 goto base_fw_load_failed;
65 }
66
67 /* purge FW request */
68 sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
69 CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
70 ((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
71
72 ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
73 if (ret < 0) {
74 dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
75 ret = -EIO;
76 goto base_fw_load_failed;
77 }
78
79 ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
80 CNL_ADSP_REG_HIPCIDA_DONE,
81 CNL_ADSP_REG_HIPCIDA_DONE,
82 BXT_INIT_TIMEOUT, "HIPCIDA Done");
83 if (ret < 0) {
84 dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
85 goto base_fw_load_failed;
86 }
87
88 /* enable interrupt */
89 cnl_ipc_int_enable(ctx);
90 cnl_ipc_op_int_enable(ctx);
91
92 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
93 CNL_FW_ROM_INIT, CNL_INIT_TIMEOUT,
94 "rom load");
95 if (ret < 0) {
96 dev_err(ctx->dev, "rom init timeout, ret: %d\n", ret);
97 goto base_fw_load_failed;
98 }
99
100 return 0;
101
102 base_fw_load_failed:
103 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
104 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
105
106 return ret;
107 }
108
sst_transfer_fw_host_dma(struct sst_dsp * ctx)109 static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
110 {
111 int ret;
112
113 ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
114 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
115 CNL_FW_INIT, CNL_BASEFW_TIMEOUT,
116 "firmware boot");
117
118 ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
119 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
120
121 return ret;
122 }
123
cnl_load_base_firmware(struct sst_dsp * ctx)124 static int cnl_load_base_firmware(struct sst_dsp *ctx)
125 {
126 struct firmware stripped_fw;
127 struct skl_dev *cnl = ctx->thread_context;
128 int ret, i;
129
130 if (!ctx->fw) {
131 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
132 if (ret < 0) {
133 dev_err(ctx->dev, "request firmware failed: %d\n", ret);
134 goto cnl_load_base_firmware_failed;
135 }
136 }
137
138 /* parse uuids if first boot */
139 if (cnl->is_first_boot) {
140 ret = snd_skl_parse_uuids(ctx, ctx->fw,
141 CNL_ADSP_FW_HDR_OFFSET, 0);
142 if (ret < 0)
143 goto cnl_load_base_firmware_failed;
144 }
145
146 stripped_fw.data = ctx->fw->data;
147 stripped_fw.size = ctx->fw->size;
148 skl_dsp_strip_extended_manifest(&stripped_fw);
149
150 for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
151 ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
152 if (!ret)
153 break;
154 dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
155 }
156
157 if (ret < 0)
158 goto cnl_load_base_firmware_failed;
159
160 ret = sst_transfer_fw_host_dma(ctx);
161 if (ret < 0) {
162 dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
163 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
164 goto cnl_load_base_firmware_failed;
165 }
166
167 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
168 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
169 if (ret == 0) {
170 dev_err(ctx->dev, "FW ready timed-out\n");
171 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
172 ret = -EIO;
173 goto cnl_load_base_firmware_failed;
174 }
175
176 cnl->fw_loaded = true;
177
178 return 0;
179
180 cnl_load_base_firmware_failed:
181 dev_err(ctx->dev, "firmware load failed: %d\n", ret);
182 release_firmware(ctx->fw);
183 ctx->fw = NULL;
184
185 return ret;
186 }
187
cnl_set_dsp_D0(struct sst_dsp * ctx,unsigned int core_id)188 static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
189 {
190 struct skl_dev *cnl = ctx->thread_context;
191 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
192 struct skl_ipc_dxstate_info dx;
193 int ret;
194
195 if (!cnl->fw_loaded) {
196 cnl->boot_complete = false;
197 ret = cnl_load_base_firmware(ctx);
198 if (ret < 0) {
199 dev_err(ctx->dev, "fw reload failed: %d\n", ret);
200 return ret;
201 }
202
203 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
204 return ret;
205 }
206
207 ret = cnl_dsp_enable_core(ctx, core_mask);
208 if (ret < 0) {
209 dev_err(ctx->dev, "enable dsp core %d failed: %d\n",
210 core_id, ret);
211 goto err;
212 }
213
214 if (core_id == SKL_DSP_CORE0_ID) {
215 /* enable interrupt */
216 cnl_ipc_int_enable(ctx);
217 cnl_ipc_op_int_enable(ctx);
218 cnl->boot_complete = false;
219
220 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
221 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
222 if (ret == 0) {
223 dev_err(ctx->dev,
224 "dsp boot timeout, status=%#x error=%#x\n",
225 sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
226 sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
227 ret = -ETIMEDOUT;
228 goto err;
229 }
230 } else {
231 dx.core_mask = core_mask;
232 dx.dx_mask = core_mask;
233
234 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
235 CNL_BASE_FW_MODULE_ID, &dx);
236 if (ret < 0) {
237 dev_err(ctx->dev, "set_dx failed, core: %d ret: %d\n",
238 core_id, ret);
239 goto err;
240 }
241 }
242 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
243
244 return 0;
245 err:
246 cnl_dsp_disable_core(ctx, core_mask);
247
248 return ret;
249 }
250
cnl_set_dsp_D3(struct sst_dsp * ctx,unsigned int core_id)251 static int cnl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
252 {
253 struct skl_dev *cnl = ctx->thread_context;
254 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
255 struct skl_ipc_dxstate_info dx;
256 int ret;
257
258 dx.core_mask = core_mask;
259 dx.dx_mask = SKL_IPC_D3_MASK;
260
261 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
262 CNL_BASE_FW_MODULE_ID, &dx);
263 if (ret < 0) {
264 dev_err(ctx->dev,
265 "dsp core %d to d3 failed; continue reset\n",
266 core_id);
267 cnl->fw_loaded = false;
268 }
269
270 /* disable interrupts if core 0 */
271 if (core_id == SKL_DSP_CORE0_ID) {
272 skl_ipc_op_int_disable(ctx);
273 skl_ipc_int_disable(ctx);
274 }
275
276 ret = cnl_dsp_disable_core(ctx, core_mask);
277 if (ret < 0) {
278 dev_err(ctx->dev, "disable dsp core %d failed: %d\n",
279 core_id, ret);
280 return ret;
281 }
282
283 cnl->cores.state[core_id] = SKL_DSP_RESET;
284
285 return ret;
286 }
287
cnl_get_errno(struct sst_dsp * ctx)288 static unsigned int cnl_get_errno(struct sst_dsp *ctx)
289 {
290 return sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE);
291 }
292
293 static const struct skl_dsp_fw_ops cnl_fw_ops = {
294 .set_state_D0 = cnl_set_dsp_D0,
295 .set_state_D3 = cnl_set_dsp_D3,
296 .load_fw = cnl_load_base_firmware,
297 .get_fw_errcode = cnl_get_errno,
298 };
299
300 static struct sst_ops cnl_ops = {
301 .irq_handler = cnl_dsp_sst_interrupt,
302 .write = sst_shim32_write,
303 .read = sst_shim32_read,
304 .free = cnl_dsp_free,
305 };
306
307 #define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29
308 #define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1
309 #define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \
310 & CNL_IPC_GLB_NOTIFY_RSP_MASK)
311
cnl_dsp_irq_thread_handler(int irq,void * context)312 static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context)
313 {
314 struct sst_dsp *dsp = context;
315 struct skl_dev *cnl = dsp->thread_context;
316 struct sst_generic_ipc *ipc = &cnl->ipc;
317 struct skl_ipc_header header = {0};
318 u32 hipcida, hipctdr, hipctdd;
319 int ipc_irq = 0;
320
321 /* here we handle ipc interrupts only */
322 if (!(dsp->intr_status & CNL_ADSPIS_IPC))
323 return IRQ_NONE;
324
325 hipcida = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDA);
326 hipctdr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDR);
327 hipctdd = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDD);
328
329 /* reply message from dsp */
330 if (hipcida & CNL_ADSP_REG_HIPCIDA_DONE) {
331 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
332 CNL_ADSP_REG_HIPCCTL_DONE, 0);
333
334 /* clear done bit - tell dsp operation is complete */
335 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCIDA,
336 CNL_ADSP_REG_HIPCIDA_DONE, CNL_ADSP_REG_HIPCIDA_DONE);
337
338 ipc_irq = 1;
339
340 /* unmask done interrupt */
341 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
342 CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE);
343 }
344
345 /* new message from dsp */
346 if (hipctdr & CNL_ADSP_REG_HIPCTDR_BUSY) {
347 header.primary = hipctdr;
348 header.extension = hipctdd;
349 dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
350 header.primary);
351 dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
352 header.extension);
353
354 if (CNL_IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
355 /* Handle Immediate reply from DSP Core */
356 skl_ipc_process_reply(ipc, header);
357 } else {
358 dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
359 skl_ipc_process_notification(ipc, header);
360 }
361 /* clear busy interrupt */
362 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDR,
363 CNL_ADSP_REG_HIPCTDR_BUSY, CNL_ADSP_REG_HIPCTDR_BUSY);
364
365 /* set done bit to ack dsp */
366 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDA,
367 CNL_ADSP_REG_HIPCTDA_DONE, CNL_ADSP_REG_HIPCTDA_DONE);
368 ipc_irq = 1;
369 }
370
371 if (ipc_irq == 0)
372 return IRQ_NONE;
373
374 cnl_ipc_int_enable(dsp);
375
376 /* continue to send any remaining messages */
377 schedule_work(&ipc->kwork);
378
379 return IRQ_HANDLED;
380 }
381
382 static struct sst_dsp_device cnl_dev = {
383 .thread = cnl_dsp_irq_thread_handler,
384 .ops = &cnl_ops,
385 };
386
cnl_ipc_tx_msg(struct sst_generic_ipc * ipc,struct ipc_message * msg)387 static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
388 {
389 struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->tx.header);
390
391 if (msg->tx.size)
392 sst_dsp_outbox_write(ipc->dsp, msg->tx.data, msg->tx.size);
393 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDD,
394 header->extension);
395 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDR,
396 header->primary | CNL_ADSP_REG_HIPCIDR_BUSY);
397 }
398
cnl_ipc_is_dsp_busy(struct sst_dsp * dsp)399 static bool cnl_ipc_is_dsp_busy(struct sst_dsp *dsp)
400 {
401 u32 hipcidr;
402
403 hipcidr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDR);
404
405 return (hipcidr & CNL_ADSP_REG_HIPCIDR_BUSY);
406 }
407
cnl_ipc_init(struct device * dev,struct skl_dev * cnl)408 static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl)
409 {
410 struct sst_generic_ipc *ipc;
411 int err;
412
413 ipc = &cnl->ipc;
414 ipc->dsp = cnl->dsp;
415 ipc->dev = dev;
416
417 ipc->tx_data_max_size = CNL_ADSP_W1_SZ;
418 ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ;
419
420 err = sst_ipc_init(ipc);
421 if (err)
422 return err;
423
424 /*
425 * overriding tx_msg and is_dsp_busy since
426 * ipc registers are different for cnl
427 */
428 ipc->ops.tx_msg = cnl_ipc_tx_msg;
429 ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
430 ipc->ops.is_dsp_busy = cnl_ipc_is_dsp_busy;
431
432 return 0;
433 }
434
cnl_sst_dsp_init(struct device * dev,void __iomem * mmio_base,int irq,const char * fw_name,struct skl_dsp_loader_ops dsp_ops,struct skl_dev ** dsp)435 int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
436 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
437 struct skl_dev **dsp)
438 {
439 struct skl_dev *cnl;
440 struct sst_dsp *sst;
441 int ret;
442
443 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev);
444 if (ret < 0) {
445 dev_err(dev, "%s: no device\n", __func__);
446 return ret;
447 }
448
449 cnl = *dsp;
450 sst = cnl->dsp;
451 sst->fw_ops = cnl_fw_ops;
452 sst->addr.lpe = mmio_base;
453 sst->addr.shim = mmio_base;
454 sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE;
455 sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE;
456 sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ;
457 sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ;
458
459 sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ),
460 CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE,
461 CNL_ADSP_W1_SZ);
462
463 ret = cnl_ipc_init(dev, cnl);
464 if (ret) {
465 skl_dsp_free(sst);
466 return ret;
467 }
468
469 cnl->boot_complete = false;
470 init_waitqueue_head(&cnl->boot_wait);
471
472 return skl_dsp_acquire_irq(sst);
473 }
474 EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
475
cnl_sst_init_fw(struct device * dev,struct skl_dev * skl)476 int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl)
477 {
478 int ret;
479 struct sst_dsp *sst = skl->dsp;
480
481 ret = skl->dsp->fw_ops.load_fw(sst);
482 if (ret < 0) {
483 dev_err(dev, "load base fw failed: %d", ret);
484 return ret;
485 }
486
487 skl_dsp_init_core_state(sst);
488
489 skl->is_first_boot = false;
490
491 return 0;
492 }
493 EXPORT_SYMBOL_GPL(cnl_sst_init_fw);
494
cnl_sst_dsp_cleanup(struct device * dev,struct skl_dev * skl)495 void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
496 {
497 if (skl->dsp->fw)
498 release_firmware(skl->dsp->fw);
499
500 skl_freeup_uuid_list(skl);
501 cnl_ipc_free(&skl->ipc);
502
503 skl->dsp->ops->free(skl->dsp);
504 }
505 EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup);
506
507 MODULE_LICENSE("GPL v2");
508 MODULE_DESCRIPTION("Intel Cannonlake IPC driver");
509