1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
4 *
5 * Copyright (C) 2009 Secret Lab Technologies Ltd.
6 *
7 * Todo:
8 * - Add support for multiple requests to be queued.
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_irq.h>
16 #include <linux/of_platform.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <asm/io.h>
20 #include <asm/mpc52xx.h>
21 #include <asm/time.h>
22
23 #include <linux/fsl/bestcomm/bestcomm.h>
24 #include <linux/fsl/bestcomm/bestcomm_priv.h>
25 #include <linux/fsl/bestcomm/gen_bd.h>
26
27 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
28 MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
29 MODULE_LICENSE("GPL");
30
31 #define LPBFIFO_REG_PACKET_SIZE (0x00)
32 #define LPBFIFO_REG_START_ADDRESS (0x04)
33 #define LPBFIFO_REG_CONTROL (0x08)
34 #define LPBFIFO_REG_ENABLE (0x0C)
35 #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
36 #define LPBFIFO_REG_FIFO_DATA (0x40)
37 #define LPBFIFO_REG_FIFO_STATUS (0x44)
38 #define LPBFIFO_REG_FIFO_CONTROL (0x48)
39 #define LPBFIFO_REG_FIFO_ALARM (0x4C)
40
41 struct mpc52xx_lpbfifo {
42 struct device *dev;
43 phys_addr_t regs_phys;
44 void __iomem *regs;
45 int irq;
46 spinlock_t lock;
47
48 struct bcom_task *bcom_tx_task;
49 struct bcom_task *bcom_rx_task;
50 struct bcom_task *bcom_cur_task;
51
52 /* Current state data */
53 struct mpc52xx_lpbfifo_request *req;
54 int dma_irqs_enabled;
55 };
56
57 /* The MPC5200 has only one fifo, so only need one instance structure */
58 static struct mpc52xx_lpbfifo lpbfifo;
59
60 /**
61 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
62 *
63 * @req: Pointer to request structure
64 */
mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request * req)65 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
66 {
67 size_t transfer_size = req->size - req->pos;
68 struct bcom_bd *bd;
69 void __iomem *reg;
70 u32 *data;
71 int i;
72 int bit_fields;
73 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
74 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
75 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
76
77 /* Set and clear the reset bits; is good practice in User Manual */
78 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
79
80 /* set master enable bit */
81 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
82 if (!dma) {
83 /* While the FIFO can be setup for transfer sizes as large as
84 * 16M-1, the FIFO itself is only 512 bytes deep and it does
85 * not generate interrupts for FIFO full events (only transfer
86 * complete will raise an IRQ). Therefore when not using
87 * Bestcomm to drive the FIFO it needs to either be polled, or
88 * transfers need to constrained to the size of the fifo.
89 *
90 * This driver restricts the size of the transfer
91 */
92 if (transfer_size > 512)
93 transfer_size = 512;
94
95 /* Load the FIFO with data */
96 if (write) {
97 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
98 data = req->data + req->pos;
99 for (i = 0; i < transfer_size; i += 4)
100 out_be32(reg, *data++);
101 }
102
103 /* Unmask both error and completion irqs */
104 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
105 } else {
106 /* Choose the correct direction
107 *
108 * Configure the watermarks so DMA will always complete correctly.
109 * It may be worth experimenting with the ALARM value to see if
110 * there is a performance impact. However, if it is wrong there
111 * is a risk of DMA not transferring the last chunk of data
112 */
113 if (write) {
114 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
115 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
116 lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
117 } else {
118 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
119 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
120 lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
121
122 if (poll_dma) {
123 if (lpbfifo.dma_irqs_enabled) {
124 disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
125 lpbfifo.dma_irqs_enabled = 0;
126 }
127 } else {
128 if (!lpbfifo.dma_irqs_enabled) {
129 enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
130 lpbfifo.dma_irqs_enabled = 1;
131 }
132 }
133 }
134
135 bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
136 bd->status = transfer_size;
137 if (!write) {
138 /*
139 * In the DMA read case, the DMA doesn't complete,
140 * possibly due to incorrect watermarks in the ALARM
141 * and CONTROL regs. For now instead of trying to
142 * determine the right watermarks that will make this
143 * work, just increase the number of bytes the FIFO is
144 * expecting.
145 *
146 * When submitting another operation, the FIFO will get
147 * reset, so the condition of the FIFO waiting for a
148 * non-existent 4 bytes will get cleared.
149 */
150 transfer_size += 4; /* BLECH! */
151 }
152 bd->data[0] = req->data_phys + req->pos;
153 bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
154
155 /* error irq & master enabled bit */
156 bit_fields = 0x00000201;
157
158 /* Unmask irqs */
159 if (write && (!poll_dma))
160 bit_fields |= 0x00000100; /* completion irq too */
161 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
162 }
163
164 /* Set transfer size, width, chip select and READ mode */
165 out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
166 req->offset + req->pos);
167 out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
168
169 bit_fields = req->cs << 24 | 0x000008;
170 if (!write)
171 bit_fields |= 0x010000; /* read mode */
172 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
173
174 /* Kick it off */
175 if (!lpbfifo.req->defer_xfer_start)
176 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
177 if (dma)
178 bcom_enable(lpbfifo.bcom_cur_task);
179 }
180
181 /**
182 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
183 * @irq: IRQ number to be handled
184 * @dev_id: device ID cookie
185 *
186 * On transmit, the dma completion irq triggers before the fifo completion
187 * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
188 * task completion irq because everything is not really done until the LPB FIFO
189 * completion irq triggers.
190 *
191 * In other words:
192 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
193 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
194 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
195 *
196 * Reasons for entering this routine:
197 * 1) PIO mode rx and tx completion irq
198 * 2) DMA interrupt mode tx completion irq
199 * 3) DMA polled mode tx
200 *
201 * Exit conditions:
202 * 1) Transfer aborted
203 * 2) FIFO complete without DMA; more data to do
204 * 3) FIFO complete without DMA; all data transferred
205 * 4) FIFO complete using DMA
206 *
207 * Condition 1 can occur regardless of whether or not DMA is used.
208 * It requires executing the callback to report the error and exiting
209 * immediately.
210 *
211 * Condition 2 requires programming the FIFO with the next block of data
212 *
213 * Condition 3 requires executing the callback to report completion
214 *
215 * Condition 4 means the same as 3, except that we also retrieve the bcom
216 * buffer so DMA doesn't get clogged up.
217 *
218 * To make things trickier, the spinlock must be dropped before
219 * executing the callback, otherwise we could end up with a deadlock
220 * or nested spinlock condition. The out path is non-trivial, so
221 * extra fiddling is done to make sure all paths lead to the same
222 * outbound code.
223 *
224 * Return: irqreturn code (%IRQ_HANDLED)
225 */
mpc52xx_lpbfifo_irq(int irq,void * dev_id)226 static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
227 {
228 struct mpc52xx_lpbfifo_request *req;
229 u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
230 void __iomem *reg;
231 u32 *data;
232 int count, i;
233 int do_callback = 0;
234 u32 ts;
235 unsigned long flags;
236 int dma, write, poll_dma;
237
238 spin_lock_irqsave(&lpbfifo.lock, flags);
239 ts = mftb();
240
241 req = lpbfifo.req;
242 if (!req) {
243 spin_unlock_irqrestore(&lpbfifo.lock, flags);
244 pr_err("bogus LPBFIFO IRQ\n");
245 return IRQ_HANDLED;
246 }
247
248 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
249 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
250 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
251
252 if (dma && !write) {
253 spin_unlock_irqrestore(&lpbfifo.lock, flags);
254 pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
255 return IRQ_HANDLED;
256 }
257
258 if ((status & 0x01) == 0) {
259 goto out;
260 }
261
262 /* check abort bit */
263 if (status & 0x10) {
264 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
265 do_callback = 1;
266 goto out;
267 }
268
269 /* Read result from hardware */
270 count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
271 count &= 0x00ffffff;
272
273 if (!dma && !write) {
274 /* copy the data out of the FIFO */
275 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
276 data = req->data + req->pos;
277 for (i = 0; i < count; i += 4)
278 *data++ = in_be32(reg);
279 }
280
281 /* Update transfer position and count */
282 req->pos += count;
283
284 /* Decide what to do next */
285 if (req->size - req->pos)
286 mpc52xx_lpbfifo_kick(req); /* more work to do */
287 else
288 do_callback = 1;
289
290 out:
291 /* Clear the IRQ */
292 out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
293
294 if (dma && (status & 0x11)) {
295 /*
296 * Count the DMA as complete only when the FIFO completion
297 * status or abort bits are set.
298 *
299 * (status & 0x01) should always be the case except sometimes
300 * when using polled DMA.
301 *
302 * (status & 0x10) {transfer aborted}: This case needs more
303 * testing.
304 */
305 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
306 }
307 req->last_byte = ((u8 *)req->data)[req->size - 1];
308
309 /* When the do_callback flag is set; it means the transfer is finished
310 * so set the FIFO as idle */
311 if (do_callback)
312 lpbfifo.req = NULL;
313
314 if (irq != 0) /* don't increment on polled case */
315 req->irq_count++;
316
317 req->irq_ticks += mftb() - ts;
318 spin_unlock_irqrestore(&lpbfifo.lock, flags);
319
320 /* Spinlock is released; it is now safe to call the callback */
321 if (do_callback && req->callback)
322 req->callback(req);
323
324 return IRQ_HANDLED;
325 }
326
327 /**
328 * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
329 * @irq: IRQ number to be handled
330 * @dev_id: device ID cookie
331 *
332 * Only used when receiving data.
333 *
334 * Return: irqreturn code (%IRQ_HANDLED)
335 */
mpc52xx_lpbfifo_bcom_irq(int irq,void * dev_id)336 static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
337 {
338 struct mpc52xx_lpbfifo_request *req;
339 unsigned long flags;
340 u32 status;
341 u32 ts;
342
343 spin_lock_irqsave(&lpbfifo.lock, flags);
344 ts = mftb();
345
346 req = lpbfifo.req;
347 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
348 spin_unlock_irqrestore(&lpbfifo.lock, flags);
349 return IRQ_HANDLED;
350 }
351
352 if (irq != 0) /* don't increment on polled case */
353 req->irq_count++;
354
355 if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
356 spin_unlock_irqrestore(&lpbfifo.lock, flags);
357
358 req->buffer_not_done_cnt++;
359 if ((req->buffer_not_done_cnt % 1000) == 0)
360 pr_err("transfer stalled\n");
361
362 return IRQ_HANDLED;
363 }
364
365 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
366
367 req->last_byte = ((u8 *)req->data)[req->size - 1];
368
369 req->pos = status & 0x00ffffff;
370
371 /* Mark the FIFO as idle */
372 lpbfifo.req = NULL;
373
374 /* Release the lock before calling out to the callback. */
375 req->irq_ticks += mftb() - ts;
376 spin_unlock_irqrestore(&lpbfifo.lock, flags);
377
378 if (req->callback)
379 req->callback(req);
380
381 return IRQ_HANDLED;
382 }
383
384 /**
385 * mpc52xx_lpbfifo_poll - Poll for DMA completion
386 */
mpc52xx_lpbfifo_poll(void)387 void mpc52xx_lpbfifo_poll(void)
388 {
389 struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
390 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
391 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
392
393 /*
394 * For more information, see comments on the "Fat Lady"
395 */
396 if (dma && write)
397 mpc52xx_lpbfifo_irq(0, NULL);
398 else
399 mpc52xx_lpbfifo_bcom_irq(0, NULL);
400 }
401 EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
402
403 /**
404 * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
405 * @req: Pointer to request structure
406 *
407 * Return: %0 on success, -errno code on error
408 */
mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request * req)409 int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
410 {
411 unsigned long flags;
412
413 if (!lpbfifo.regs)
414 return -ENODEV;
415
416 spin_lock_irqsave(&lpbfifo.lock, flags);
417
418 /* If the req pointer is already set, then a transfer is in progress */
419 if (lpbfifo.req) {
420 spin_unlock_irqrestore(&lpbfifo.lock, flags);
421 return -EBUSY;
422 }
423
424 /* Setup the transfer */
425 lpbfifo.req = req;
426 req->irq_count = 0;
427 req->irq_ticks = 0;
428 req->buffer_not_done_cnt = 0;
429 req->pos = 0;
430
431 mpc52xx_lpbfifo_kick(req);
432 spin_unlock_irqrestore(&lpbfifo.lock, flags);
433 return 0;
434 }
435 EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
436
mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request * req)437 int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
438 {
439 unsigned long flags;
440
441 if (!lpbfifo.regs)
442 return -ENODEV;
443
444 spin_lock_irqsave(&lpbfifo.lock, flags);
445
446 /*
447 * If the req pointer is already set and a transfer was
448 * started on submit, then this transfer is in progress
449 */
450 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
451 spin_unlock_irqrestore(&lpbfifo.lock, flags);
452 return -EBUSY;
453 }
454
455 /*
456 * If the req was previously submitted but not
457 * started, start it now
458 */
459 if (lpbfifo.req && lpbfifo.req == req &&
460 lpbfifo.req->defer_xfer_start) {
461 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
462 }
463
464 spin_unlock_irqrestore(&lpbfifo.lock, flags);
465 return 0;
466 }
467 EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
468
mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request * req)469 void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
470 {
471 unsigned long flags;
472
473 spin_lock_irqsave(&lpbfifo.lock, flags);
474 if (lpbfifo.req == req) {
475 /* Put it into reset and clear the state */
476 bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
477 bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
478 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
479 lpbfifo.req = NULL;
480 }
481 spin_unlock_irqrestore(&lpbfifo.lock, flags);
482 }
483 EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
484
mpc52xx_lpbfifo_probe(struct platform_device * op)485 static int mpc52xx_lpbfifo_probe(struct platform_device *op)
486 {
487 struct resource res;
488 int rc = -ENOMEM;
489
490 if (lpbfifo.dev != NULL)
491 return -ENOSPC;
492
493 lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
494 if (!lpbfifo.irq)
495 return -ENODEV;
496
497 if (of_address_to_resource(op->dev.of_node, 0, &res))
498 return -ENODEV;
499 lpbfifo.regs_phys = res.start;
500 lpbfifo.regs = of_iomap(op->dev.of_node, 0);
501 if (!lpbfifo.regs)
502 return -ENOMEM;
503
504 spin_lock_init(&lpbfifo.lock);
505
506 /* Put FIFO into reset */
507 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
508
509 /* Register the interrupt handler */
510 rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
511 "mpc52xx-lpbfifo", &lpbfifo);
512 if (rc)
513 goto err_irq;
514
515 /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
516 lpbfifo.bcom_rx_task =
517 bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
518 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
519 16*1024*1024);
520 if (!lpbfifo.bcom_rx_task)
521 goto err_bcom_rx;
522
523 rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
524 mpc52xx_lpbfifo_bcom_irq, 0,
525 "mpc52xx-lpbfifo-rx", &lpbfifo);
526 if (rc)
527 goto err_bcom_rx_irq;
528
529 lpbfifo.dma_irqs_enabled = 1;
530
531 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
532 lpbfifo.bcom_tx_task =
533 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
534 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
535 if (!lpbfifo.bcom_tx_task)
536 goto err_bcom_tx;
537
538 lpbfifo.dev = &op->dev;
539 return 0;
540
541 err_bcom_tx:
542 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
543 err_bcom_rx_irq:
544 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
545 err_bcom_rx:
546 free_irq(lpbfifo.irq, &lpbfifo);
547 err_irq:
548 iounmap(lpbfifo.regs);
549 lpbfifo.regs = NULL;
550
551 dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
552 return -ENODEV;
553 }
554
555
mpc52xx_lpbfifo_remove(struct platform_device * op)556 static int mpc52xx_lpbfifo_remove(struct platform_device *op)
557 {
558 if (lpbfifo.dev != &op->dev)
559 return 0;
560
561 /* Put FIFO in reset */
562 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
563
564 /* Release the bestcomm transmit task */
565 free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
566 bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
567
568 /* Release the bestcomm receive task */
569 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
570 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
571
572 free_irq(lpbfifo.irq, &lpbfifo);
573 iounmap(lpbfifo.regs);
574 lpbfifo.regs = NULL;
575 lpbfifo.dev = NULL;
576
577 return 0;
578 }
579
580 static const struct of_device_id mpc52xx_lpbfifo_match[] = {
581 { .compatible = "fsl,mpc5200-lpbfifo", },
582 {},
583 };
584 MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match);
585
586 static struct platform_driver mpc52xx_lpbfifo_driver = {
587 .driver = {
588 .name = "mpc52xx-lpbfifo",
589 .of_match_table = mpc52xx_lpbfifo_match,
590 },
591 .probe = mpc52xx_lpbfifo_probe,
592 .remove = mpc52xx_lpbfifo_remove,
593 };
594 module_platform_driver(mpc52xx_lpbfifo_driver);
595