1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Core driver for the High Speed UART DMA
4 *
5 * Copyright (C) 2015 Intel Corporation
6 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 *
8 * Partially based on the bits found in drivers/tty/serial/mfd.c.
9 */
10
11 /*
12 * DMA channel allocation:
13 * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA
14 * Write (UART RX).
15 * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to
16 * port 3, and so on.
17 */
18
19 #include <linux/delay.h>
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25
26 #include "hsu.h"
27
28 #define HSU_DMA_BUSWIDTHS \
29 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
30 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
31 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
32 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
33 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
34 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \
35 BIT(DMA_SLAVE_BUSWIDTH_16_BYTES)
36
hsu_chan_disable(struct hsu_dma_chan * hsuc)37 static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc)
38 {
39 hsu_chan_writel(hsuc, HSU_CH_CR, 0);
40 }
41
hsu_chan_enable(struct hsu_dma_chan * hsuc)42 static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc)
43 {
44 u32 cr = HSU_CH_CR_CHA;
45
46 if (hsuc->direction == DMA_MEM_TO_DEV)
47 cr &= ~HSU_CH_CR_CHD;
48 else if (hsuc->direction == DMA_DEV_TO_MEM)
49 cr |= HSU_CH_CR_CHD;
50
51 hsu_chan_writel(hsuc, HSU_CH_CR, cr);
52 }
53
hsu_dma_chan_start(struct hsu_dma_chan * hsuc)54 static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
55 {
56 struct dma_slave_config *config = &hsuc->config;
57 struct hsu_dma_desc *desc = hsuc->desc;
58 u32 bsr = 0, mtsr = 0; /* to shut the compiler up */
59 u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI;
60 unsigned int i, count;
61
62 if (hsuc->direction == DMA_MEM_TO_DEV) {
63 bsr = config->dst_maxburst;
64 mtsr = config->dst_addr_width;
65 } else if (hsuc->direction == DMA_DEV_TO_MEM) {
66 bsr = config->src_maxburst;
67 mtsr = config->src_addr_width;
68 }
69
70 hsu_chan_disable(hsuc);
71
72 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
73 hsu_chan_writel(hsuc, HSU_CH_BSR, bsr);
74 hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr);
75
76 /* Set descriptors */
77 count = desc->nents - desc->active;
78 for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) {
79 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr);
80 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len);
81
82 /* Prepare value for DCR */
83 dcr |= HSU_CH_DCR_DESCA(i);
84 dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */
85
86 desc->active++;
87 }
88 /* Only for the last descriptor in the chain */
89 dcr |= HSU_CH_DCR_CHSOD(count - 1);
90 dcr |= HSU_CH_DCR_CHDI(count - 1);
91
92 hsu_chan_writel(hsuc, HSU_CH_DCR, dcr);
93
94 hsu_chan_enable(hsuc);
95 }
96
hsu_dma_stop_channel(struct hsu_dma_chan * hsuc)97 static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
98 {
99 hsu_chan_disable(hsuc);
100 hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
101 }
102
hsu_dma_start_channel(struct hsu_dma_chan * hsuc)103 static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
104 {
105 hsu_dma_chan_start(hsuc);
106 }
107
hsu_dma_start_transfer(struct hsu_dma_chan * hsuc)108 static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
109 {
110 struct virt_dma_desc *vdesc;
111
112 /* Get the next descriptor */
113 vdesc = vchan_next_desc(&hsuc->vchan);
114 if (!vdesc) {
115 hsuc->desc = NULL;
116 return;
117 }
118
119 list_del(&vdesc->node);
120 hsuc->desc = to_hsu_dma_desc(vdesc);
121
122 /* Start the channel with a new descriptor */
123 hsu_dma_start_channel(hsuc);
124 }
125
126 /*
127 * hsu_dma_get_status() - get DMA channel status
128 * @chip: HSUART DMA chip
129 * @nr: DMA channel number
130 * @status: pointer for DMA Channel Status Register value
131 *
132 * Description:
133 * The function reads and clears the DMA Channel Status Register, checks
134 * if it was a timeout interrupt and returns a corresponding value.
135 *
136 * Caller should provide a valid pointer for the DMA Channel Status
137 * Register value that will be returned in @status.
138 *
139 * Return:
140 * 1 for DMA timeout status, 0 for other DMA status, or error code for
141 * invalid parameters or no interrupt pending.
142 */
hsu_dma_get_status(struct hsu_dma_chip * chip,unsigned short nr,u32 * status)143 int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
144 u32 *status)
145 {
146 struct hsu_dma_chan *hsuc;
147 unsigned long flags;
148 u32 sr;
149
150 /* Sanity check */
151 if (nr >= chip->hsu->nr_channels)
152 return -EINVAL;
153
154 hsuc = &chip->hsu->chan[nr];
155
156 /*
157 * No matter what situation, need read clear the IRQ status
158 * There is a bug, see Errata 5, HSD 2900918
159 */
160 spin_lock_irqsave(&hsuc->vchan.lock, flags);
161 sr = hsu_chan_readl(hsuc, HSU_CH_SR);
162 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
163
164 /* Check if any interrupt is pending */
165 sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
166 if (!sr)
167 return -EIO;
168
169 /* Timeout IRQ, need wait some time, see Errata 2 */
170 if (sr & HSU_CH_SR_DESCTO_ANY)
171 udelay(2);
172
173 /*
174 * At this point, at least one of Descriptor Time Out, Channel Error
175 * or Descriptor Done bits must be set. Clear the Descriptor Time Out
176 * bits and if sr is still non-zero, it must be channel error or
177 * descriptor done which are higher priority than timeout and handled
178 * in hsu_dma_do_irq(). Else, it must be a timeout.
179 */
180 sr &= ~HSU_CH_SR_DESCTO_ANY;
181
182 *status = sr;
183
184 return sr ? 0 : 1;
185 }
186 EXPORT_SYMBOL_GPL(hsu_dma_get_status);
187
188 /*
189 * hsu_dma_do_irq() - DMA interrupt handler
190 * @chip: HSUART DMA chip
191 * @nr: DMA channel number
192 * @status: Channel Status Register value
193 *
194 * Description:
195 * This function handles Channel Error and Descriptor Done interrupts.
196 * This function should be called after determining that the DMA interrupt
197 * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
198 *
199 * Return:
200 * 0 for invalid channel number, 1 otherwise.
201 */
hsu_dma_do_irq(struct hsu_dma_chip * chip,unsigned short nr,u32 status)202 int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status)
203 {
204 struct dma_chan_percpu *stat;
205 struct hsu_dma_chan *hsuc;
206 struct hsu_dma_desc *desc;
207 unsigned long flags;
208
209 /* Sanity check */
210 if (nr >= chip->hsu->nr_channels)
211 return 0;
212
213 hsuc = &chip->hsu->chan[nr];
214 stat = this_cpu_ptr(hsuc->vchan.chan.local);
215
216 spin_lock_irqsave(&hsuc->vchan.lock, flags);
217 desc = hsuc->desc;
218 if (desc) {
219 if (status & HSU_CH_SR_CHE) {
220 desc->status = DMA_ERROR;
221 } else if (desc->active < desc->nents) {
222 hsu_dma_start_channel(hsuc);
223 } else {
224 vchan_cookie_complete(&desc->vdesc);
225 desc->status = DMA_COMPLETE;
226 stat->bytes_transferred += desc->length;
227 hsu_dma_start_transfer(hsuc);
228 }
229 }
230 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
231
232 return 1;
233 }
234 EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
235
hsu_dma_alloc_desc(unsigned int nents)236 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
237 {
238 struct hsu_dma_desc *desc;
239
240 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
241 if (!desc)
242 return NULL;
243
244 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT);
245 if (!desc->sg) {
246 kfree(desc);
247 return NULL;
248 }
249
250 return desc;
251 }
252
hsu_dma_desc_free(struct virt_dma_desc * vdesc)253 static void hsu_dma_desc_free(struct virt_dma_desc *vdesc)
254 {
255 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc);
256
257 kfree(desc->sg);
258 kfree(desc);
259 }
260
hsu_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)261 static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg(
262 struct dma_chan *chan, struct scatterlist *sgl,
263 unsigned int sg_len, enum dma_transfer_direction direction,
264 unsigned long flags, void *context)
265 {
266 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
267 struct hsu_dma_desc *desc;
268 struct scatterlist *sg;
269 unsigned int i;
270
271 desc = hsu_dma_alloc_desc(sg_len);
272 if (!desc)
273 return NULL;
274
275 for_each_sg(sgl, sg, sg_len, i) {
276 desc->sg[i].addr = sg_dma_address(sg);
277 desc->sg[i].len = sg_dma_len(sg);
278
279 desc->length += sg_dma_len(sg);
280 }
281
282 desc->nents = sg_len;
283 desc->direction = direction;
284 /* desc->active = 0 by kzalloc */
285 desc->status = DMA_IN_PROGRESS;
286
287 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
288 }
289
hsu_dma_issue_pending(struct dma_chan * chan)290 static void hsu_dma_issue_pending(struct dma_chan *chan)
291 {
292 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
293 unsigned long flags;
294
295 spin_lock_irqsave(&hsuc->vchan.lock, flags);
296 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc)
297 hsu_dma_start_transfer(hsuc);
298 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
299 }
300
hsu_dma_active_desc_size(struct hsu_dma_chan * hsuc)301 static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
302 {
303 struct hsu_dma_desc *desc = hsuc->desc;
304 size_t bytes = 0;
305 int i;
306
307 for (i = desc->active; i < desc->nents; i++)
308 bytes += desc->sg[i].len;
309
310 i = HSU_DMA_CHAN_NR_DESC - 1;
311 do {
312 bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
313 } while (--i >= 0);
314
315 return bytes;
316 }
317
hsu_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)318 static enum dma_status hsu_dma_tx_status(struct dma_chan *chan,
319 dma_cookie_t cookie, struct dma_tx_state *state)
320 {
321 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
322 struct virt_dma_desc *vdesc;
323 enum dma_status status;
324 size_t bytes;
325 unsigned long flags;
326
327 status = dma_cookie_status(chan, cookie, state);
328 if (status == DMA_COMPLETE)
329 return status;
330
331 spin_lock_irqsave(&hsuc->vchan.lock, flags);
332 vdesc = vchan_find_desc(&hsuc->vchan, cookie);
333 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) {
334 bytes = hsu_dma_active_desc_size(hsuc);
335 dma_set_residue(state, bytes);
336 status = hsuc->desc->status;
337 } else if (vdesc) {
338 bytes = to_hsu_dma_desc(vdesc)->length;
339 dma_set_residue(state, bytes);
340 }
341 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
342
343 return status;
344 }
345
hsu_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * config)346 static int hsu_dma_slave_config(struct dma_chan *chan,
347 struct dma_slave_config *config)
348 {
349 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
350
351 memcpy(&hsuc->config, config, sizeof(hsuc->config));
352
353 return 0;
354 }
355
hsu_dma_pause(struct dma_chan * chan)356 static int hsu_dma_pause(struct dma_chan *chan)
357 {
358 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
359 unsigned long flags;
360
361 spin_lock_irqsave(&hsuc->vchan.lock, flags);
362 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
363 hsu_chan_disable(hsuc);
364 hsuc->desc->status = DMA_PAUSED;
365 }
366 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
367
368 return 0;
369 }
370
hsu_dma_resume(struct dma_chan * chan)371 static int hsu_dma_resume(struct dma_chan *chan)
372 {
373 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
374 unsigned long flags;
375
376 spin_lock_irqsave(&hsuc->vchan.lock, flags);
377 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
378 hsuc->desc->status = DMA_IN_PROGRESS;
379 hsu_chan_enable(hsuc);
380 }
381 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
382
383 return 0;
384 }
385
hsu_dma_terminate_all(struct dma_chan * chan)386 static int hsu_dma_terminate_all(struct dma_chan *chan)
387 {
388 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
389 unsigned long flags;
390 LIST_HEAD(head);
391
392 spin_lock_irqsave(&hsuc->vchan.lock, flags);
393
394 hsu_dma_stop_channel(hsuc);
395 if (hsuc->desc) {
396 hsu_dma_desc_free(&hsuc->desc->vdesc);
397 hsuc->desc = NULL;
398 }
399
400 vchan_get_all_descriptors(&hsuc->vchan, &head);
401 spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
402 vchan_dma_desc_free_list(&hsuc->vchan, &head);
403
404 return 0;
405 }
406
hsu_dma_free_chan_resources(struct dma_chan * chan)407 static void hsu_dma_free_chan_resources(struct dma_chan *chan)
408 {
409 vchan_free_chan_resources(to_virt_chan(chan));
410 }
411
hsu_dma_synchronize(struct dma_chan * chan)412 static void hsu_dma_synchronize(struct dma_chan *chan)
413 {
414 struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
415
416 vchan_synchronize(&hsuc->vchan);
417 }
418
hsu_dma_probe(struct hsu_dma_chip * chip)419 int hsu_dma_probe(struct hsu_dma_chip *chip)
420 {
421 struct hsu_dma *hsu;
422 void __iomem *addr = chip->regs + chip->offset;
423 unsigned short i;
424 int ret;
425
426 hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL);
427 if (!hsu)
428 return -ENOMEM;
429
430 chip->hsu = hsu;
431
432 /* Calculate nr_channels from the IO space length */
433 hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH;
434
435 hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels,
436 sizeof(*hsu->chan), GFP_KERNEL);
437 if (!hsu->chan)
438 return -ENOMEM;
439
440 INIT_LIST_HEAD(&hsu->dma.channels);
441 for (i = 0; i < hsu->nr_channels; i++) {
442 struct hsu_dma_chan *hsuc = &hsu->chan[i];
443
444 hsuc->vchan.desc_free = hsu_dma_desc_free;
445 vchan_init(&hsuc->vchan, &hsu->dma);
446
447 hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
448 hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
449 }
450
451 dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
452 dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask);
453
454 hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources;
455
456 hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg;
457
458 hsu->dma.device_issue_pending = hsu_dma_issue_pending;
459 hsu->dma.device_tx_status = hsu_dma_tx_status;
460
461 hsu->dma.device_config = hsu_dma_slave_config;
462 hsu->dma.device_pause = hsu_dma_pause;
463 hsu->dma.device_resume = hsu_dma_resume;
464 hsu->dma.device_terminate_all = hsu_dma_terminate_all;
465 hsu->dma.device_synchronize = hsu_dma_synchronize;
466
467 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS;
468 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
469 hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
470 hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
471
472 hsu->dma.dev = chip->dev;
473
474 dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK);
475
476 ret = dma_async_device_register(&hsu->dma);
477 if (ret)
478 return ret;
479
480 dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels);
481 return 0;
482 }
483 EXPORT_SYMBOL_GPL(hsu_dma_probe);
484
hsu_dma_remove(struct hsu_dma_chip * chip)485 int hsu_dma_remove(struct hsu_dma_chip *chip)
486 {
487 struct hsu_dma *hsu = chip->hsu;
488 unsigned short i;
489
490 dma_async_device_unregister(&hsu->dma);
491
492 for (i = 0; i < hsu->nr_channels; i++) {
493 struct hsu_dma_chan *hsuc = &hsu->chan[i];
494
495 tasklet_kill(&hsuc->vchan.task);
496 }
497
498 return 0;
499 }
500 EXPORT_SYMBOL_GPL(hsu_dma_remove);
501
502 MODULE_LICENSE("GPL v2");
503 MODULE_DESCRIPTION("High Speed UART DMA core driver");
504 MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
505