1 /*
2  * Copyright (c) 2006-2021, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author        Notes
8  * 2010-11-13     weety     first version
9  */
10 
11 
12 #include <edma.h>
13 
14 /* Offsets matching "struct edmacc_param" */
15 #define PARM_OPT        0x00
16 #define PARM_SRC        0x04
17 #define PARM_A_B_CNT        0x08
18 #define PARM_DST        0x0c
19 #define PARM_SRC_DST_BIDX   0x10
20 #define PARM_LINK_BCNTRLD   0x14
21 #define PARM_SRC_DST_CIDX   0x18
22 #define PARM_CCNT       0x1c
23 
24 #define PARM_SIZE       0x20
25 
26 /* Offsets for EDMA CC global channel registers and their shadows */
27 #define SH_ER       0x00    /* 64 bits */
28 #define SH_ECR      0x08    /* 64 bits */
29 #define SH_ESR      0x10    /* 64 bits */
30 #define SH_CER      0x18    /* 64 bits */
31 #define SH_EER      0x20    /* 64 bits */
32 #define SH_EECR     0x28    /* 64 bits */
33 #define SH_EESR     0x30    /* 64 bits */
34 #define SH_SER      0x38    /* 64 bits */
35 #define SH_SECR     0x40    /* 64 bits */
36 #define SH_IER      0x50    /* 64 bits */
37 #define SH_IECR     0x58    /* 64 bits */
38 #define SH_IESR     0x60    /* 64 bits */
39 #define SH_IPR      0x68    /* 64 bits */
40 #define SH_ICR      0x70    /* 64 bits */
41 #define SH_IEVAL    0x78
42 #define SH_QER      0x80
43 #define SH_QEER     0x84
44 #define SH_QEECR    0x88
45 #define SH_QEESR    0x8c
46 #define SH_QSER     0x90
47 #define SH_QSECR    0x94
48 #define SH_SIZE     0x200
49 
50 /* Offsets for EDMA CC global registers */
51 #define EDMA_REV    0x0000
52 #define EDMA_CCCFG  0x0004
53 #define EDMA_QCHMAP 0x0200  /* 8 registers */
54 #define EDMA_DMAQNUM    0x0240  /* 8 registers (4 on OMAP-L1xx) */
55 #define EDMA_QDMAQNUM   0x0260
56 #define EDMA_QUETCMAP   0x0280
57 #define EDMA_QUEPRI 0x0284
58 #define EDMA_EMR    0x0300  /* 64 bits */
59 #define EDMA_EMCR   0x0308  /* 64 bits */
60 #define EDMA_QEMR   0x0310
61 #define EDMA_QEMCR  0x0314
62 #define EDMA_CCERR  0x0318
63 #define EDMA_CCERRCLR   0x031c
64 #define EDMA_EEVAL  0x0320
65 #define EDMA_DRAE   0x0340  /* 4 x 64 bits*/
66 #define EDMA_QRAE   0x0380  /* 4 registers */
67 #define EDMA_QUEEVTENTRY    0x0400  /* 2 x 16 registers */
68 #define EDMA_QSTAT  0x0600  /* 2 registers */
69 #define EDMA_QWMTHRA    0x0620
70 #define EDMA_QWMTHRB    0x0624
71 #define EDMA_CCSTAT 0x0640
72 
73 #define EDMA_M      0x1000  /* global channel registers */
74 #define EDMA_ECR    0x1008
75 #define EDMA_ECRH   0x100C
76 #define EDMA_SHADOW0    0x2000  /* 4 regions shadowing global channels */
77 #define EDMA_PARM   0x4000  /* 128 param entries */
78 
79 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
80 
81 #define EDMA_DCHMAP 0x0100  /* 64 registers */
82 #define CHMAP_EXIST BIT(24)
83 
84 #define EDMA_MAX_DMACH           64
85 #define EDMA_MAX_PARAMENTRY     512
86 
87 #define EDMA_CC0_BASE_REG   0x01c00000
88 #define EDMA_TC0_BASE_REG   0x01c10000
89 #define EDMA_TC1_BASE_REG   0x01c10400
90 #define EDMA_TC2_BASE_REG   0x01c10800
91 #define EDMA_TC3_BASE_REG   0x01c10c00
92 
93 #define min_t(type, x, y) ({            \
94     type __min1 = (x);          \
95     type __min2 = (y);          \
96     __min1 < __min2 ? __min1: __min2; })
97 
98 
99 
100 /*****************************************************************************/
101 
102 static void volatile *edmacc_regs_base[EDMA_MAX_CC];
103 
edma_read(unsigned ctlr,int offset)104 static inline unsigned int edma_read(unsigned ctlr, int offset)
105 {
106     return (unsigned int)davinci_readl(edmacc_regs_base[ctlr] + offset);
107 }
108 
edma_write(unsigned ctlr,int offset,int val)109 static inline void edma_write(unsigned ctlr, int offset, int val)
110 {
111     davinci_writel(val, edmacc_regs_base[ctlr] + offset);
112 }
edma_modify(unsigned ctlr,int offset,unsigned and,unsigned or)113 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
114         unsigned or)
115 {
116     unsigned val = edma_read(ctlr, offset);
117     val &= and;
118     val |= or;
119     edma_write(ctlr, offset, val);
120 }
edma_and(unsigned ctlr,int offset,unsigned and)121 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
122 {
123     unsigned val = edma_read(ctlr, offset);
124     val &= and;
125     edma_write(ctlr, offset, val);
126 }
edma_or(unsigned ctlr,int offset,unsigned or)127 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
128 {
129     unsigned val = edma_read(ctlr, offset);
130     val |= or;
131     edma_write(ctlr, offset, val);
132 }
edma_read_array(unsigned ctlr,int offset,int i)133 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
134 {
135     return edma_read(ctlr, offset + (i << 2));
136 }
edma_write_array(unsigned ctlr,int offset,int i,unsigned val)137 static inline void edma_write_array(unsigned ctlr, int offset, int i,
138         unsigned val)
139 {
140     edma_write(ctlr, offset + (i << 2), val);
141 }
edma_modify_array(unsigned ctlr,int offset,int i,unsigned and,unsigned or)142 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
143         unsigned and, unsigned or)
144 {
145     edma_modify(ctlr, offset + (i << 2), and, or);
146 }
edma_or_array(unsigned ctlr,int offset,int i,unsigned or)147 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
148 {
149     edma_or(ctlr, offset + (i << 2), or);
150 }
edma_or_array2(unsigned ctlr,int offset,int i,int j,unsigned or)151 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
152         unsigned or)
153 {
154     edma_or(ctlr, offset + ((i*2 + j) << 2), or);
155 }
edma_write_array2(unsigned ctlr,int offset,int i,int j,unsigned val)156 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
157         unsigned val)
158 {
159     edma_write(ctlr, offset + ((i*2 + j) << 2), val);
160 }
edma_shadow0_read(unsigned ctlr,int offset)161 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
162 {
163     return edma_read(ctlr, EDMA_SHADOW0 + offset);
164 }
edma_shadow0_read_array(unsigned ctlr,int offset,int i)165 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
166         int i)
167 {
168     return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
169 }
edma_shadow0_write(unsigned ctlr,int offset,unsigned val)170 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
171 {
172     edma_write(ctlr, EDMA_SHADOW0 + offset, val);
173 }
edma_shadow0_write_array(unsigned ctlr,int offset,int i,unsigned val)174 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
175         unsigned val)
176 {
177     edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
178 }
edma_parm_read(unsigned ctlr,int offset,int param_no)179 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
180         int param_no)
181 {
182     return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
183 }
edma_parm_write(unsigned ctlr,int offset,int param_no,unsigned val)184 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
185         unsigned val)
186 {
187     edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
188 }
edma_parm_modify(unsigned ctlr,int offset,int param_no,unsigned and,unsigned or)189 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
190         unsigned and, unsigned or)
191 {
192     edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
193 }
edma_parm_and(unsigned ctlr,int offset,int param_no,unsigned and)194 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
195         unsigned and)
196 {
197     edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
198 }
edma_parm_or(unsigned ctlr,int offset,int param_no,unsigned or)199 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
200         unsigned or)
201 {
202     edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
203 }
204 #if 0
205 static inline void set_bits(int offset, int len, unsigned long *p)
206 {
207     for (; len > 0; len--)
208         set_bit(offset + (len - 1), p);
209 }
210 
211 static inline void clear_bits(int offset, int len, unsigned long *p)
212 {
213     for (; len > 0; len--)
214         clear_bit(offset + (len - 1), p);
215 }
216 #endif
217 /*****************************************************************************/
218 
219 #define BIT(nr)         (1UL << (nr))
220 #define BITS_PER_LONG 32
221 #define BIT_MASK(nr)        (1UL << ((nr) % BITS_PER_LONG))
222 #define BIT_WORD(nr)        ((nr) / BITS_PER_LONG)
223 #define BITS_PER_BYTE       8
224 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
225 
226 #define BITS_TO_LONGS(nr)   DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
227 
228 #define DECLARE_BITMAP(name,bits) \
229     unsigned long name[BITS_TO_LONGS(bits)]
230 
231 /**
232  * test_bit - Determine whether a bit is set
233  * @nr: bit number to test
234  * @addr: Address to start counting from
235  */
test_bit(int nr,const volatile unsigned long * addr)236 static inline int test_bit(int nr, const volatile unsigned long *addr)
237 {
238     return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
239 }
240 
clear_bit(int nr,volatile unsigned long * addr)241 static inline void clear_bit(int nr, volatile unsigned long *addr)
242 {
243     unsigned long mask = BIT_MASK(nr);
244     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
245     rt_base_t level;
246 
247     level = rt_hw_interrupt_disable();
248     *p &= ~mask;
249     rt_hw_interrupt_enable(level);
250 }
251 
test_and_set_bit(int nr,volatile unsigned long * addr)252 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
253 {
254     unsigned long mask = BIT_MASK(nr);
255     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
256     unsigned long old;
257     rt_base_t level;
258 
259     level = rt_hw_interrupt_disable();
260     old = *p;
261     *p = old | mask;
262     rt_hw_interrupt_enable(level);
263 
264     return (old & mask) != 0;
265 }
266 
set_bit(int nr,volatile unsigned long * addr)267 static inline void set_bit(int nr, volatile unsigned long *addr)
268 {
269     unsigned long mask = BIT_MASK(nr);
270     unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
271     rt_base_t level;
272 
273     level = rt_hw_interrupt_disable();
274     *p  |= mask;
275     rt_hw_interrupt_enable(level);
276 }
277 
278 /*
279  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
280  */
281 extern int _find_first_zero_bit_le(const void * p, unsigned size);
282 extern int _find_next_zero_bit_le(const void * p, int size, int offset);
283 extern int _find_first_bit_le(const unsigned long *p, unsigned size);
284 extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
285 
286 /*
287  * These are the little endian, atomic definitions.
288  */
289 #define find_first_zero_bit(p,sz)   _find_first_zero_bit_le(p,sz)
290 #define find_next_zero_bit(p,sz,off)    _find_next_zero_bit_le(p,sz,off)
291 #define find_first_bit(p,sz)        _find_first_bit_le(p,sz)
292 #define find_next_bit(p,sz,off)     _find_next_bit_le(p,sz,off)
293 
294 
295 
296 /* actual number of DMA channels and slots on this silicon */
297 struct edma {
298     /* how many dma resources of each type */
299     unsigned    num_channels;
300     unsigned    num_region;
301     unsigned    num_slots;
302     unsigned    num_tc;
303     unsigned    num_cc;
304     enum dma_event_q    default_queue;
305 
306     /* list of channels with no even trigger; terminated by "-1" */
307     const rt_int8_t *noevent;
308 
309     /* The edma_inuse bit for each PaRAM slot is clear unless the
310      * channel is in use ... by ARM or DSP, for QDMA, or whatever.
311      */
312     DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
313 
314     /* The edma_unused bit for each channel is clear unless
315      * it is not being used on this platform. It uses a bit
316      * of SOC-specific initialization code.
317      */
318     DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
319 
320     unsigned    irq_res_start;
321     unsigned    irq_res_end;
322 
323     struct dma_interrupt_data {
324         void (*callback)(unsigned channel, unsigned short ch_status,
325                 void *data);
326         void *data;
327     } intr_data[EDMA_MAX_DMACH];
328 };
329 
330 static struct edma *edma_cc[EDMA_MAX_CC];
331 static int arch_num_cc;
332 
333 /* dummy param set used to (re)initialize parameter RAM slots */
334 static const struct edmacc_param dummy_paramset = {
335     .link_bcntrld = 0xffff,
336     .ccnt = 1,
337 };
338 
339 /*****************************************************************************/
340 
map_dmach_queue(unsigned ctlr,unsigned ch_no,enum dma_event_q queue_no)341 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
342         enum dma_event_q queue_no)
343 {
344     int bit = (ch_no & 0x7) * 4;
345 
346     /* default to low priority queue */
347     if (queue_no == EVENTQ_DEFAULT)
348         queue_no = edma_cc[ctlr]->default_queue;
349 
350     queue_no &= 7;
351     edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
352             ~(0x7 << bit), queue_no << bit);
353 }
354 
map_queue_tc(unsigned ctlr,int queue_no,int tc_no)355 static void map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
356 {
357     int bit = queue_no * 4;
358     edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
359 }
360 
assign_priority_to_queue(unsigned ctlr,int queue_no,int priority)361 static void assign_priority_to_queue(unsigned ctlr, int queue_no,
362         int priority)
363 {
364     int bit = queue_no * 4;
365     edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
366             ((priority & 0x7) << bit));
367 }
368 
369 /**
370  * map_dmach_param - Maps channel number to param entry number
371  *
372  * This maps the dma channel number to param entry numberter. In
373  * other words using the DMA channel mapping registers a param entry
374  * can be mapped to any channel
375  *
376  * Callers are responsible for ensuring the channel mapping logic is
377  * included in that particular EDMA variant (Eg : dm646x)
378  *
379  */
map_dmach_param(unsigned ctlr)380 static void map_dmach_param(unsigned ctlr)
381 {
382     int i;
383     for (i = 0; i < EDMA_MAX_DMACH; i++)
384         edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
385 }
386 
387 static inline void
setup_dma_interrupt(unsigned lch,void (* callback)(unsigned channel,rt_uint16_t ch_status,void * data),void * data)388 setup_dma_interrupt(unsigned lch,
389     void (*callback)(unsigned channel, rt_uint16_t ch_status, void *data),
390     void *data)
391 {
392     unsigned ctlr;
393 
394     ctlr = EDMA_CTLR(lch);
395     lch = EDMA_CHAN_SLOT(lch);
396 
397     if (!callback)
398         edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
399                 BIT(lch & 0x1f));
400 
401     edma_cc[ctlr]->intr_data[lch].callback = callback;
402     edma_cc[ctlr]->intr_data[lch].data = data;
403 
404     if (callback) {
405         edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
406                 BIT(lch & 0x1f));
407         edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
408                 BIT(lch & 0x1f));
409     }
410 }
411 
irq2ctlr(int irq)412 static int irq2ctlr(int irq)
413 {
414     if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
415         return 0;
416     else if (irq >= edma_cc[1]->irq_res_start &&
417         irq <= edma_cc[1]->irq_res_end)
418         return 1;
419 
420     return -1;
421 }
422 
423 /******************************************************************************
424  *
425  * DMA interrupt handler
426  *
427  *****************************************************************************/
dma_irq_handler(int irq,void * data)428 static void dma_irq_handler(int irq, void *data)
429 {
430     int i;
431     int ctlr;
432     unsigned int cnt = 0;
433 
434     ctlr = irq2ctlr(irq);
435     if (ctlr < 0)
436         return ;
437 
438     edma_dbg("dma_irq_handler\n");
439 
440     if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
441         (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
442         return ;
443 
444     while (1) {
445         int j;
446         if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
447                 edma_shadow0_read_array(ctlr, SH_IER, 0))
448             j = 0;
449         else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
450                 edma_shadow0_read_array(ctlr, SH_IER, 1))
451             j = 1;
452         else
453             break;
454         edma_dbg("IPR%d %08x\n", j,
455                 edma_shadow0_read_array(ctlr, SH_IPR, j));
456         for (i = 0; i < 32; i++) {
457             int k = (j << 5) + i;
458             if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
459                     && (edma_shadow0_read_array(ctlr,
460                             SH_IER, j) & BIT(i))) {
461                 /* Clear the corresponding IPR bits */
462                 edma_shadow0_write_array(ctlr, SH_ICR, j,
463                             BIT(i));
464                 if (edma_cc[ctlr]->intr_data[k].callback)
465                     edma_cc[ctlr]->intr_data[k].callback(
466                         k, DMA_COMPLETE,
467                         edma_cc[ctlr]->intr_data[k].
468                         data);
469             }
470         }
471         cnt++;
472         if (cnt > 10)
473             break;
474     }
475     edma_shadow0_write(ctlr, SH_IEVAL, 1);
476     return ;
477 }
478 
479 /******************************************************************************
480  *
481  * DMA error interrupt handler
482  *
483  *****************************************************************************/
dma_ccerr_handler(int irq,void * data)484 static void dma_ccerr_handler(int irq, void *data)
485 {
486     int i;
487     int ctlr;
488     unsigned int cnt = 0;
489 
490     ctlr = irq2ctlr(irq);
491     if (ctlr < 0)
492         return ;
493 
494     edma_dbg("dma_ccerr_handler\n");
495 
496     if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
497         (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
498         (edma_read(ctlr, EDMA_QEMR) == 0) &&
499         (edma_read(ctlr, EDMA_CCERR) == 0))
500         return ;
501 
502     while (1) {
503         int j = -1;
504         if (edma_read_array(ctlr, EDMA_EMR, 0))
505             j = 0;
506         else if (edma_read_array(ctlr, EDMA_EMR, 1))
507             j = 1;
508         if (j >= 0) {
509             edma_dbg("EMR%d %08x\n", j,
510                     edma_read_array(ctlr, EDMA_EMR, j));
511             for (i = 0; i < 32; i++) {
512                 int k = (j << 5) + i;
513                 if (edma_read_array(ctlr, EDMA_EMR, j) &
514                             BIT(i)) {
515                     /* Clear the corresponding EMR bits */
516                     edma_write_array(ctlr, EDMA_EMCR, j,
517                             BIT(i));
518                     /* Clear any SER */
519                     edma_shadow0_write_array(ctlr, SH_SECR,
520                                 j, BIT(i));
521                     if (edma_cc[ctlr]->intr_data[k].
522                                 callback) {
523                         edma_cc[ctlr]->intr_data[k].
524                         callback(k,
525                         DMA_CC_ERROR,
526                         edma_cc[ctlr]->intr_data
527                         [k].data);
528                     }
529                 }
530             }
531         } else if (edma_read(ctlr, EDMA_QEMR)) {
532             edma_dbg("QEMR %02x\n",
533                 edma_read(ctlr, EDMA_QEMR));
534             for (i = 0; i < 8; i++) {
535                 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
536                     /* Clear the corresponding IPR bits */
537                     edma_write(ctlr, EDMA_QEMCR, BIT(i));
538                     edma_shadow0_write(ctlr, SH_QSECR,
539                                 BIT(i));
540 
541                     /* NOTE:  not reported!! */
542                 }
543             }
544         } else if (edma_read(ctlr, EDMA_CCERR)) {
545             edma_dbg("CCERR %08x\n",
546                 edma_read(ctlr, EDMA_CCERR));
547             /* FIXME:  CCERR.BIT(16) ignored!  much better
548              * to just write CCERRCLR with CCERR value...
549              */
550             for (i = 0; i < 8; i++) {
551                 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
552                     /* Clear the corresponding IPR bits */
553                     edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
554 
555                     /* NOTE:  not reported!! */
556                 }
557             }
558         }
559         if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
560             (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
561             (edma_read(ctlr, EDMA_QEMR) == 0) &&
562             (edma_read(ctlr, EDMA_CCERR) == 0))
563             break;
564         cnt++;
565         if (cnt > 10)
566             break;
567     }
568     edma_write(ctlr, EDMA_EEVAL, 1);
569     return ;
570 }
571 
572 /******************************************************************************
573  *
574  * Transfer controller error interrupt handlers
575  *
576  *****************************************************************************/
577 
578 #define tc_errs_handled RT_FALSE/* disabled as long as they're NOPs */
579 
dma_tc0err_handler(int irq,void * data)580 static void dma_tc0err_handler(int irq, void *data)
581 {
582     edma_dbg("dma_tc0err_handler\n");
583     return ;
584 }
585 
dma_tc1err_handler(int irq,void * data)586 static void dma_tc1err_handler(int irq, void *data)
587 {
588     edma_dbg("dma_tc1err_handler\n");
589     return ;
590 }
591 
reserve_contiguous_slots(int ctlr,unsigned int id,unsigned int num_slots,unsigned int start_slot)592 static int reserve_contiguous_slots(int ctlr, unsigned int id,
593                      unsigned int num_slots,
594                      unsigned int start_slot)
595 {
596     int i, j;
597     unsigned int count = num_slots;
598     int stop_slot = start_slot;
599     DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
600 
601     for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
602         j = EDMA_CHAN_SLOT(i);
603         if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
604             /* Record our current beginning slot */
605             if (count == num_slots)
606                 stop_slot = i;
607 
608             count--;
609             set_bit(j, tmp_inuse);
610 
611             if (count == 0)
612                 break;
613         } else {
614             clear_bit(j, tmp_inuse);
615 
616             if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
617                 stop_slot = i;
618                 break;
619             } else {
620                 count = num_slots;
621             }
622         }
623     }
624 
625     /*
626      * We have to clear any bits that we set
627      * if we run out parameter RAM slots, i.e we do find a set
628      * of contiguous parameter RAM slots but do not find the exact number
629      * requested as we may reach the total number of parameter RAM slots
630      */
631     if (i == edma_cc[ctlr]->num_slots)
632         stop_slot = i;
633 
634     for (j = start_slot; j < stop_slot; j++)
635         if (test_bit(j, tmp_inuse))
636             clear_bit(j, edma_cc[ctlr]->edma_inuse);
637 
638     if (count)
639         return -RT_EBUSY;
640 
641     for (j = i - num_slots + 1; j <= i; ++j)
642         rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(j)),
643             &dummy_paramset, PARM_SIZE);
644 
645     return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
646 }
647 
648 #if 0
649 static int prepare_unused_channel_list(struct device *dev, void *data)
650 {
651     struct platform_device *pdev = to_platform_device(dev);
652     int i, ctlr;
653 
654     for (i = 0; i < pdev->num_resources; i++) {
655         if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
656                 (int)pdev->resource[i].start >= 0) {
657             ctlr = EDMA_CTLR(pdev->resource[i].start);
658             clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
659                     edma_cc[ctlr]->edma_unused);
660         }
661     }
662 
663     return 0;
664 }
665 #endif
666 
667 /*-----------------------------------------------------------------------*/
668 
669 static rt_bool_t unused_chan_list_done;
670 
671 /* Resource alloc/free:  dma channels, parameter RAM slots */
672 
673 /**
674  * edma_alloc_channel - allocate DMA channel and paired parameter RAM
675  * @channel: specific channel to allocate; negative for "any unmapped channel"
676  * @callback: optional; to be issued on DMA completion or errors
677  * @data: passed to callback
678  * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
679  *  Controller (TC) executes requests using this channel.  Use
680  *  EVENTQ_DEFAULT unless you really need a high priority queue.
681  *
682  * This allocates a DMA channel and its associated parameter RAM slot.
683  * The parameter RAM is initialized to hold a dummy transfer.
684  *
685  * Normal use is to pass a specific channel number as @channel, to make
686  * use of hardware events mapped to that channel.  When the channel will
687  * be used only for software triggering or event chaining, channels not
688  * mapped to hardware events (or mapped to unused events) are preferable.
689  *
690  * DMA transfers start from a channel using edma_start(), or by
691  * chaining.  When the transfer described in that channel's parameter RAM
692  * slot completes, that slot's data may be reloaded through a link.
693  *
694  * DMA errors are only reported to the @callback associated with the
695  * channel driving that transfer, but transfer completion callbacks can
696  * be sent to another channel under control of the TCC field in
697  * the option word of the transfer's parameter RAM set.  Drivers must not
698  * use DMA transfer completion callbacks for channels they did not allocate.
699  * (The same applies to TCC codes used in transfer chaining.)
700  *
701  * Returns the number of the channel, else negative errno.
702  */
edma_alloc_channel(int channel,void (* callback)(unsigned channel,rt_uint16_t ch_status,void * data),void * data,enum dma_event_q eventq_no)703 int edma_alloc_channel(int channel,
704         void (*callback)(unsigned channel, rt_uint16_t ch_status, void *data),
705         void *data,
706         enum dma_event_q eventq_no)
707 {
708     unsigned i, done = 0, ctlr = 0;
709     int ret = 0;
710 #if 0
711     if (!unused_chan_list_done) {
712         /*
713          * Scan all the platform devices to find out the EDMA channels
714          * used and clear them in the unused list, making the rest
715          * available for ARM usage.
716          */
717         ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
718                 prepare_unused_channel_list);
719         if (ret < 0)
720             return ret;
721 
722         unused_chan_list_done = true;
723     }
724 #endif
725 
726     if (channel >= 0) {
727         ctlr = EDMA_CTLR(channel);
728         channel = EDMA_CHAN_SLOT(channel);
729         clear_bit(channel, edma_cc[ctlr]->edma_unused);
730     }
731 
732     if (channel < 0) {
733         for (i = 0; i < arch_num_cc; i++) {
734             channel = 0;
735             for (;;) {
736                 channel = find_next_bit(edma_cc[i]->edma_unused,
737                         edma_cc[i]->num_channels,
738                         channel);
739                 if (channel == edma_cc[i]->num_channels)
740                     break;
741                 if (!test_and_set_bit(channel,
742                         edma_cc[i]->edma_inuse)) {
743                     done = 1;
744                     ctlr = i;
745                     break;
746                 }
747                 channel++;
748             }
749             if (done)
750                 break;
751         }
752         if (!done)
753             return -RT_ENOMEM;
754     } else if (channel >= edma_cc[ctlr]->num_channels) {
755         return -RT_ERROR;
756     } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
757         return -RT_EBUSY;
758     }
759 
760     /* ensure access through shadow region 0 */
761     edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
762 
763     /* ensure no events are pending */
764     edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
765     rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(channel)),
766             &dummy_paramset, PARM_SIZE);
767 
768     if (callback)
769         setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
770                     callback, data);
771 
772     map_dmach_queue(ctlr, channel, eventq_no);
773 
774     return EDMA_CTLR_CHAN(ctlr, channel);
775 }
776 
777 
778 /**
779  * edma_free_channel - deallocate DMA channel
780  * @channel: dma channel returned from edma_alloc_channel()
781  *
782  * This deallocates the DMA channel and associated parameter RAM slot
783  * allocated by edma_alloc_channel().
784  *
785  * Callers are responsible for ensuring the channel is inactive, and
786  * will not be reactivated by linking, chaining, or software calls to
787  * edma_start().
788  */
edma_free_channel(unsigned channel)789 void edma_free_channel(unsigned channel)
790 {
791     unsigned ctlr;
792 
793     ctlr = EDMA_CTLR(channel);
794     channel = EDMA_CHAN_SLOT(channel);
795 
796     if (channel >= edma_cc[ctlr]->num_channels)
797         return;
798 
799     setup_dma_interrupt(channel, RT_NULL, RT_NULL);
800     /* REVISIT should probably take out of shadow region 0 */
801 
802     rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(channel)),
803             &dummy_paramset, PARM_SIZE);
804     clear_bit(channel, edma_cc[ctlr]->edma_inuse);
805 }
806 
807 
808 /**
809  * edma_alloc_slot - allocate DMA parameter RAM
810  * @slot: specific slot to allocate; negative for "any unused slot"
811  *
812  * This allocates a parameter RAM slot, initializing it to hold a
813  * dummy transfer.  Slots allocated using this routine have not been
814  * mapped to a hardware DMA channel, and will normally be used by
815  * linking to them from a slot associated with a DMA channel.
816  *
817  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
818  * slots may be allocated on behalf of DSP firmware.
819  *
820  * Returns the number of the slot, else negative errno.
821  */
edma_alloc_slot(unsigned ctlr,int slot)822 int edma_alloc_slot(unsigned ctlr, int slot)
823 {
824     if (slot >= 0)
825         slot = EDMA_CHAN_SLOT(slot);
826 
827     if (slot < 0) {
828         slot = edma_cc[ctlr]->num_channels;
829         for (;;) {
830             slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
831                     edma_cc[ctlr]->num_slots, slot);
832             if (slot == edma_cc[ctlr]->num_slots)
833                 return -RT_ENOMEM;
834             if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
835                 break;
836         }
837     } else if (slot < edma_cc[ctlr]->num_channels ||
838             slot >= edma_cc[ctlr]->num_slots) {
839         return -RT_ERROR;
840     } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
841         return -RT_EBUSY;
842     }
843 
844     rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
845             &dummy_paramset, PARM_SIZE);
846 
847     return EDMA_CTLR_CHAN(ctlr, slot);
848 }
849 
850 
851 /**
852  * edma_free_slot - deallocate DMA parameter RAM
853  * @slot: parameter RAM slot returned from edma_alloc_slot()
854  *
855  * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
856  * Callers are responsible for ensuring the slot is inactive, and will
857  * not be activated.
858  */
edma_free_slot(unsigned slot)859 void edma_free_slot(unsigned slot)
860 {
861     unsigned ctlr;
862 
863     ctlr = EDMA_CTLR(slot);
864     slot = EDMA_CHAN_SLOT(slot);
865 
866     if (slot < edma_cc[ctlr]->num_channels ||
867         slot >= edma_cc[ctlr]->num_slots)
868         return;
869 
870     rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
871             &dummy_paramset, PARM_SIZE);
872     clear_bit(slot, edma_cc[ctlr]->edma_inuse);
873 }
874 
875 
876 
877 /**
878  * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
879  * The API will return the starting point of a set of
880  * contiguous parameter RAM slots that have been requested
881  *
882  * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
883  * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
884  * @count: number of contiguous Paramter RAM slots
885  * @slot  - the start value of Parameter RAM slot that should be passed if id
886  * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
887  *
888  * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
889  * contiguous Parameter RAM slots from parameter RAM 64 in the case of
890  * DaVinci SOCs and 32 in the case of DA8xx SOCs.
891  *
892  * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
893  * set of contiguous parameter RAM slots from the "slot" that is passed as an
894  * argument to the API.
895  *
896  * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
897  * starts looking for a set of contiguous parameter RAMs from the "slot"
898  * that is passed as an argument to the API. On failure the API will try to
899  * find a set of contiguous Parameter RAM slots from the remaining Parameter
900  * RAM slots
901  */
edma_alloc_cont_slots(unsigned ctlr,unsigned int id,int slot,int count)902 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
903 {
904     /*
905      * The start slot requested should be greater than
906      * the number of channels and lesser than the total number
907      * of slots
908      */
909     if ((id != EDMA_CONT_PARAMS_ANY) &&
910         (slot < edma_cc[ctlr]->num_channels ||
911         slot >= edma_cc[ctlr]->num_slots))
912         return -RT_ERROR;
913 
914     /*
915      * The number of parameter RAM slots requested cannot be less than 1
916      * and cannot be more than the number of slots minus the number of
917      * channels
918      */
919     if (count < 1 || count >
920         (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
921         return -RT_ERROR;
922 
923     switch (id) {
924     case EDMA_CONT_PARAMS_ANY:
925         return reserve_contiguous_slots(ctlr, id, count,
926                          edma_cc[ctlr]->num_channels);
927     case EDMA_CONT_PARAMS_FIXED_EXACT:
928     case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
929         return reserve_contiguous_slots(ctlr, id, count, slot);
930     default:
931         return -RT_ERROR;
932     }
933 
934 }
935 
936 
937 /**
938  * edma_free_cont_slots - deallocate DMA parameter RAM slots
939  * @slot: first parameter RAM of a set of parameter RAM slots to be freed
940  * @count: the number of contiguous parameter RAM slots to be freed
941  *
942  * This deallocates the parameter RAM slots allocated by
943  * edma_alloc_cont_slots.
944  * Callers/applications need to keep track of sets of contiguous
945  * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
946  * API.
947  * Callers are responsible for ensuring the slots are inactive, and will
948  * not be activated.
949  */
edma_free_cont_slots(unsigned slot,int count)950 int edma_free_cont_slots(unsigned slot, int count)
951 {
952     unsigned ctlr, slot_to_free;
953     int i;
954 
955     ctlr = EDMA_CTLR(slot);
956     slot = EDMA_CHAN_SLOT(slot);
957 
958     if (slot < edma_cc[ctlr]->num_channels ||
959         slot >= edma_cc[ctlr]->num_slots ||
960         count < 1)
961         return -RT_ERROR;
962 
963     for (i = slot; i < slot + count; ++i) {
964         ctlr = EDMA_CTLR(i);
965         slot_to_free = EDMA_CHAN_SLOT(i);
966 
967         rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free)),
968             &dummy_paramset, PARM_SIZE);
969         clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
970     }
971 
972     return 0;
973 }
974 
975 
976 /*-----------------------------------------------------------------------*/
977 
978 /* Parameter RAM operations (i) -- read/write partial slots */
979 
980 /**
981  * edma_set_src - set initial DMA source address in parameter RAM slot
982  * @slot: parameter RAM slot being configured
983  * @src_port: physical address of source (memory, controller FIFO, etc)
984  * @addressMode: INCR, except in very rare cases
985  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
986  *  width to use when addressing the fifo (e.g. W8BIT, W32BIT)
987  *
988  * Note that the source address is modified during the DMA transfer
989  * according to edma_set_src_index().
990  */
edma_set_src(unsigned slot,rt_uint32_t src_port,enum address_mode mode,enum fifo_width width)991 void edma_set_src(unsigned slot, rt_uint32_t src_port,
992                 enum address_mode mode, enum fifo_width width)
993 {
994     unsigned ctlr;
995 
996     ctlr = EDMA_CTLR(slot);
997     slot = EDMA_CHAN_SLOT(slot);
998 
999     if (slot < edma_cc[ctlr]->num_slots) {
1000         unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
1001 
1002         if (mode) {
1003             /* set SAM and program FWID */
1004             i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
1005         } else {
1006             /* clear SAM */
1007             i &= ~SAM;
1008         }
1009         edma_parm_write(ctlr, PARM_OPT, slot, i);
1010 
1011         /* set the source port address
1012            in source register of param structure */
1013         edma_parm_write(ctlr, PARM_SRC, slot, src_port);
1014     }
1015 }
1016 
1017 
1018 /**
1019  * edma_set_dest - set initial DMA destination address in parameter RAM slot
1020  * @slot: parameter RAM slot being configured
1021  * @dest_port: physical address of destination (memory, controller FIFO, etc)
1022  * @addressMode: INCR, except in very rare cases
1023  * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
1024  *  width to use when addressing the fifo (e.g. W8BIT, W32BIT)
1025  *
1026  * Note that the destination address is modified during the DMA transfer
1027  * according to edma_set_dest_index().
1028  */
edma_set_dest(unsigned slot,rt_uint32_t dest_port,enum address_mode mode,enum fifo_width width)1029 void edma_set_dest(unsigned slot, rt_uint32_t dest_port,
1030                  enum address_mode mode, enum fifo_width width)
1031 {
1032     unsigned ctlr;
1033 
1034     ctlr = EDMA_CTLR(slot);
1035     slot = EDMA_CHAN_SLOT(slot);
1036 
1037     if (slot < edma_cc[ctlr]->num_slots) {
1038         unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
1039 
1040         if (mode) {
1041             /* set DAM and program FWID */
1042             i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
1043         } else {
1044             /* clear DAM */
1045             i &= ~DAM;
1046         }
1047         edma_parm_write(ctlr, PARM_OPT, slot, i);
1048         /* set the destination port address
1049            in dest register of param structure */
1050         edma_parm_write(ctlr, PARM_DST, slot, dest_port);
1051     }
1052 }
1053 
1054 
1055 /**
1056  * edma_get_position - returns the current transfer points
1057  * @slot: parameter RAM slot being examined
1058  * @src: pointer to source port position
1059  * @dst: pointer to destination port position
1060  *
1061  * Returns current source and destination addresses for a particular
1062  * parameter RAM slot.  Its channel should not be active when this is called.
1063  */
edma_get_position(unsigned slot,rt_uint32_t * src,rt_uint32_t * dst)1064 void edma_get_position(unsigned slot, rt_uint32_t *src, rt_uint32_t *dst)
1065 {
1066     struct edmacc_param temp;
1067     unsigned ctlr;
1068 
1069     ctlr = EDMA_CTLR(slot);
1070     slot = EDMA_CHAN_SLOT(slot);
1071 
1072     edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
1073     if (src != RT_NULL)
1074         *src = temp.src;
1075     if (dst != RT_NULL)
1076         *dst = temp.dst;
1077 }
1078 
1079 
1080 /**
1081  * edma_set_src_index - configure DMA source address indexing
1082  * @slot: parameter RAM slot being configured
1083  * @src_bidx: byte offset between source arrays in a frame
1084  * @src_cidx: byte offset between source frames in a block
1085  *
1086  * Offsets are specified to support either contiguous or discontiguous
1087  * memory transfers, or repeated access to a hardware register, as needed.
1088  * When accessing hardware registers, both offsets are normally zero.
1089  */
edma_set_src_index(unsigned slot,rt_int16_t src_bidx,rt_int16_t src_cidx)1090 void edma_set_src_index(unsigned slot, rt_int16_t src_bidx, rt_int16_t src_cidx)
1091 {
1092     unsigned ctlr;
1093 
1094     ctlr = EDMA_CTLR(slot);
1095     slot = EDMA_CHAN_SLOT(slot);
1096 
1097     if (slot < edma_cc[ctlr]->num_slots) {
1098         edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1099                 0xffff0000, src_bidx);
1100         edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1101                 0xffff0000, src_cidx);
1102     }
1103 }
1104 
1105 
1106 /**
1107  * edma_set_dest_index - configure DMA destination address indexing
1108  * @slot: parameter RAM slot being configured
1109  * @dest_bidx: byte offset between destination arrays in a frame
1110  * @dest_cidx: byte offset between destination frames in a block
1111  *
1112  * Offsets are specified to support either contiguous or discontiguous
1113  * memory transfers, or repeated access to a hardware register, as needed.
1114  * When accessing hardware registers, both offsets are normally zero.
1115  */
edma_set_dest_index(unsigned slot,rt_int16_t dest_bidx,rt_int16_t dest_cidx)1116 void edma_set_dest_index(unsigned slot, rt_int16_t dest_bidx, rt_int16_t dest_cidx)
1117 {
1118     unsigned ctlr;
1119 
1120     ctlr = EDMA_CTLR(slot);
1121     slot = EDMA_CHAN_SLOT(slot);
1122 
1123     if (slot < edma_cc[ctlr]->num_slots) {
1124         edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1125                 0x0000ffff, dest_bidx << 16);
1126         edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1127                 0x0000ffff, dest_cidx << 16);
1128     }
1129 }
1130 
1131 
1132 /**
1133  * edma_set_transfer_params - configure DMA transfer parameters
1134  * @slot: parameter RAM slot being configured
1135  * @acnt: how many bytes per array (at least one)
1136  * @bcnt: how many arrays per frame (at least one)
1137  * @ccnt: how many frames per block (at least one)
1138  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1139  *  the value to reload into bcnt when it decrements to zero
1140  * @sync_mode: ASYNC or ABSYNC
1141  *
1142  * See the EDMA3 documentation to understand how to configure and link
1143  * transfers using the fields in PaRAM slots.  If you are not doing it
1144  * all at once with edma_write_slot(), you will use this routine
1145  * plus two calls each for source and destination, setting the initial
1146  * address and saying how to index that address.
1147  *
1148  * An example of an A-Synchronized transfer is a serial link using a
1149  * single word shift register.  In that case, @acnt would be equal to
1150  * that word size; the serial controller issues a DMA synchronization
1151  * event to transfer each word, and memory access by the DMA transfer
1152  * controller will be word-at-a-time.
1153  *
1154  * An example of an AB-Synchronized transfer is a device using a FIFO.
1155  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1156  * The controller with the FIFO issues DMA synchronization events when
1157  * the FIFO threshold is reached, and the DMA transfer controller will
1158  * transfer one frame to (or from) the FIFO.  It will probably use
1159  * efficient burst modes to access memory.
1160  */
edma_set_transfer_params(unsigned slot,rt_uint16_t acnt,rt_uint16_t bcnt,rt_uint16_t ccnt,rt_uint16_t bcnt_rld,enum sync_dimension sync_mode)1161 void edma_set_transfer_params(unsigned slot,
1162         rt_uint16_t acnt, rt_uint16_t bcnt, rt_uint16_t ccnt,
1163         rt_uint16_t bcnt_rld, enum sync_dimension sync_mode)
1164 {
1165     unsigned ctlr;
1166 
1167     ctlr = EDMA_CTLR(slot);
1168     slot = EDMA_CHAN_SLOT(slot);
1169 
1170     if (slot < edma_cc[ctlr]->num_slots) {
1171         edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1172                 0x0000ffff, bcnt_rld << 16);
1173         if (sync_mode == ASYNC)
1174             edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1175         else
1176             edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1177         /* Set the acount, bcount, ccount registers */
1178         edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1179         edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1180     }
1181 }
1182 
1183 
1184 /**
1185  * edma_link - link one parameter RAM slot to another
1186  * @from: parameter RAM slot originating the link
1187  * @to: parameter RAM slot which is the link target
1188  *
1189  * The originating slot should not be part of any active DMA transfer.
1190  */
edma_link(unsigned from,unsigned to)1191 void edma_link(unsigned from, unsigned to)
1192 {
1193     unsigned ctlr_from, ctlr_to;
1194 
1195     ctlr_from = EDMA_CTLR(from);
1196     from = EDMA_CHAN_SLOT(from);
1197     ctlr_to = EDMA_CTLR(to);
1198     to = EDMA_CHAN_SLOT(to);
1199 
1200     if (from >= edma_cc[ctlr_from]->num_slots)
1201         return;
1202     if (to >= edma_cc[ctlr_to]->num_slots)
1203         return;
1204     edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1205                 PARM_OFFSET(to));
1206 }
1207 
1208 
1209 /**
1210  * edma_unlink - cut link from one parameter RAM slot
1211  * @from: parameter RAM slot originating the link
1212  *
1213  * The originating slot should not be part of any active DMA transfer.
1214  * Its link is set to 0xffff.
1215  */
edma_unlink(unsigned from)1216 void edma_unlink(unsigned from)
1217 {
1218     unsigned ctlr;
1219 
1220     ctlr = EDMA_CTLR(from);
1221     from = EDMA_CHAN_SLOT(from);
1222 
1223     if (from >= edma_cc[ctlr]->num_slots)
1224         return;
1225     edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1226 }
1227 
1228 
1229 /*-----------------------------------------------------------------------*/
1230 
1231 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1232 
1233 /**
1234  * edma_write_slot - write parameter RAM data for slot
1235  * @slot: number of parameter RAM slot being modified
1236  * @param: data to be written into parameter RAM slot
1237  *
1238  * Use this to assign all parameters of a transfer at once.  This
1239  * allows more efficient setup of transfers than issuing multiple
1240  * calls to set up those parameters in small pieces, and provides
1241  * complete control over all transfer options.
1242  */
edma_write_slot(unsigned slot,const struct edmacc_param * param)1243 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1244 {
1245     unsigned ctlr;
1246 
1247     ctlr = EDMA_CTLR(slot);
1248     slot = EDMA_CHAN_SLOT(slot);
1249 
1250     if (slot >= edma_cc[ctlr]->num_slots)
1251         return;
1252     rt_memcpy((void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)), param,
1253             PARM_SIZE);
1254 }
1255 
1256 
1257 /**
1258  * edma_read_slot - read parameter RAM data from slot
1259  * @slot: number of parameter RAM slot being copied
1260  * @param: where to store copy of parameter RAM data
1261  *
1262  * Use this to read data from a parameter RAM slot, perhaps to
1263  * save them as a template for later reuse.
1264  */
edma_read_slot(unsigned slot,struct edmacc_param * param)1265 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1266 {
1267     unsigned ctlr;
1268 
1269     ctlr = EDMA_CTLR(slot);
1270     slot = EDMA_CHAN_SLOT(slot);
1271 
1272     if (slot >= edma_cc[ctlr]->num_slots)
1273         return;
1274     rt_memcpy(param, (void *)(edmacc_regs_base[ctlr] + PARM_OFFSET(slot)),
1275             PARM_SIZE);
1276 }
1277 
1278 
1279 /*-----------------------------------------------------------------------*/
1280 
1281 /* Various EDMA channel control operations */
1282 
1283 /**
1284  * edma_pause - pause dma on a channel
1285  * @channel: on which edma_start() has been called
1286  *
1287  * This temporarily disables EDMA hardware events on the specified channel,
1288  * preventing them from triggering new transfers on its behalf
1289  */
edma_pause(unsigned channel)1290 void edma_pause(unsigned channel)
1291 {
1292     unsigned ctlr;
1293 
1294     ctlr = EDMA_CTLR(channel);
1295     channel = EDMA_CHAN_SLOT(channel);
1296 
1297     if (channel < edma_cc[ctlr]->num_channels) {
1298         unsigned int mask = BIT(channel & 0x1f);
1299 
1300         edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1301     }
1302 }
1303 
1304 
1305 /**
1306  * edma_resume - resumes dma on a paused channel
1307  * @channel: on which edma_pause() has been called
1308  *
1309  * This re-enables EDMA hardware events on the specified channel.
1310  */
edma_resume(unsigned channel)1311 void edma_resume(unsigned channel)
1312 {
1313     unsigned ctlr;
1314 
1315     ctlr = EDMA_CTLR(channel);
1316     channel = EDMA_CHAN_SLOT(channel);
1317 
1318     if (channel < edma_cc[ctlr]->num_channels) {
1319         unsigned int mask = BIT(channel & 0x1f);
1320 
1321         edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1322     }
1323 }
1324 
1325 
1326 /**
1327  * edma_start - start dma on a channel
1328  * @channel: channel being activated
1329  *
1330  * Channels with event associations will be triggered by their hardware
1331  * events, and channels without such associations will be triggered by
1332  * software.  (At this writing there is no interface for using software
1333  * triggers except with channels that don't support hardware triggers.)
1334  *
1335  * Returns zero on success, else negative errno.
1336  */
edma_start(unsigned channel)1337 int edma_start(unsigned channel)
1338 {
1339     unsigned ctlr;
1340 
1341     ctlr = EDMA_CTLR(channel);
1342     channel = EDMA_CHAN_SLOT(channel);
1343 
1344     if (channel < edma_cc[ctlr]->num_channels) {
1345         int j = channel >> 5;
1346         unsigned int mask = BIT(channel & 0x1f);
1347 
1348         /* EDMA channels without event association */
1349         if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1350             edma_dbg("EDMA: ESR%d %08x\n", j,
1351                 edma_shadow0_read_array(ctlr, SH_ESR, j));
1352             edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1353             return 0;
1354         }
1355 
1356         /* EDMA channel with event association */
1357         edma_dbg("EDMA: ER%d %08x\n", j,
1358             edma_shadow0_read_array(ctlr, SH_ER, j));
1359         /* Clear any pending event or error */
1360         edma_write_array(ctlr, EDMA_ECR, j, mask);
1361         edma_write_array(ctlr, EDMA_EMCR, j, mask);
1362         /* Clear any SER */
1363         edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1364         edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1365         edma_dbg("EDMA: EER%d %08x\n", j,
1366             edma_shadow0_read_array(ctlr, SH_EER, j));
1367         return 0;
1368     }
1369 
1370     return -RT_ERROR;
1371 }
1372 
1373 
1374 /**
1375  * edma_stop - stops dma on the channel passed
1376  * @channel: channel being deactivated
1377  *
1378  * When @lch is a channel, any active transfer is paused and
1379  * all pending hardware events are cleared.  The current transfer
1380  * may not be resumed, and the channel's Parameter RAM should be
1381  * reinitialized before being reused.
1382  */
edma_stop(unsigned channel)1383 void edma_stop(unsigned channel)
1384 {
1385     unsigned ctlr;
1386 
1387     ctlr = EDMA_CTLR(channel);
1388     channel = EDMA_CHAN_SLOT(channel);
1389 
1390     if (channel < edma_cc[ctlr]->num_channels) {
1391         int j = channel >> 5;
1392         unsigned int mask = BIT(channel & 0x1f);
1393 
1394         edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1395         edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1396         edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1397         edma_write_array(ctlr, EDMA_EMCR, j, mask);
1398 
1399         edma_dbg("EDMA: EER%d %08x\n", j,
1400                 edma_shadow0_read_array(ctlr, SH_EER, j));
1401 
1402         /* REVISIT:  consider guarding against inappropriate event
1403          * chaining by overwriting with dummy_paramset.
1404          */
1405     }
1406 }
1407 
1408 
1409 /******************************************************************************
1410  *
1411  * It cleans ParamEntry qand bring back EDMA to initial state if media has
1412  * been removed before EDMA has finished.It is usedful for removable media.
1413  * Arguments:
1414  *      ch_no     - channel no
1415  *
1416  * Return: zero on success, or corresponding error no on failure
1417  *
1418  * FIXME this should not be needed ... edma_stop() should suffice.
1419  *
1420  *****************************************************************************/
1421 
edma_clean_channel(unsigned channel)1422 void edma_clean_channel(unsigned channel)
1423 {
1424     unsigned ctlr;
1425 
1426     ctlr = EDMA_CTLR(channel);
1427     channel = EDMA_CHAN_SLOT(channel);
1428 
1429     if (channel < edma_cc[ctlr]->num_channels) {
1430         int j = (channel >> 5);
1431         unsigned int mask = BIT(channel & 0x1f);
1432 
1433         edma_dbg("EDMA: EMR%d %08x\n", j,
1434                 edma_read_array(ctlr, EDMA_EMR, j));
1435         edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1436         /* Clear the corresponding EMR bits */
1437         edma_write_array(ctlr, EDMA_EMCR, j, mask);
1438         /* Clear any SER */
1439         edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1440         edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1441     }
1442 }
1443 
1444 
1445 /*
1446  * edma_clear_event - clear an outstanding event on the DMA channel
1447  * Arguments:
1448  *  channel - channel number
1449  */
edma_clear_event(unsigned channel)1450 void edma_clear_event(unsigned channel)
1451 {
1452     unsigned ctlr;
1453 
1454     ctlr = EDMA_CTLR(channel);
1455     channel = EDMA_CHAN_SLOT(channel);
1456 
1457     if (channel >= edma_cc[ctlr]->num_channels)
1458         return;
1459     if (channel < 32)
1460         edma_write(ctlr, EDMA_ECR, BIT(channel));
1461     else
1462         edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1463 }
1464 
1465 
1466 /*-----------------------------------------------------------------------*/
1467 
edma_init(struct edma_soc_info ** info)1468 int edma_init(struct edma_soc_info **info)
1469 {
1470     //struct edma_soc_info  **info = pdev->dev.platform_data;
1471     const rt_int8_t     (*queue_priority_mapping)[2];
1472     const rt_int8_t     (*queue_tc_mapping)[2];
1473     int         i, j, off, ln, found = 0;
1474     int         status = -1;
1475     const rt_int16_t        (*rsv_chans)[2];
1476     const rt_int16_t        (*rsv_slots)[2];
1477     int         irq[EDMA_MAX_CC] = {0, 0};
1478     int         err_irq[EDMA_MAX_CC] = {0, 0};
1479 
1480     RT_ASSERT(info != RT_NULL);
1481 
1482     psc_change_state(DAVINCI_DM365_LPSC_TPCC, PSC_ENABLE);
1483     psc_change_state(DAVINCI_DM365_LPSC_TPTC0, PSC_ENABLE);
1484     psc_change_state(DAVINCI_DM365_LPSC_TPTC1, PSC_ENABLE);
1485     psc_change_state(DAVINCI_DM365_LPSC_TPTC2, PSC_ENABLE);
1486     psc_change_state(DAVINCI_DM365_LPSC_TPTC3, PSC_ENABLE);
1487 
1488     edmacc_regs_base[0] = (void *)EDMA_CC0_BASE_REG;
1489 
1490     edma_cc[0] = rt_malloc(sizeof(struct edma));
1491     if (!edma_cc[0]) {
1492         status = -RT_ENOMEM;
1493         goto fail1;
1494     }
1495     rt_memset(edma_cc[0], 0, sizeof(struct edma));
1496 
1497     edma_cc[0]->num_channels = min_t(unsigned, info[0]->n_channel,
1498                         EDMA_MAX_DMACH);
1499     edma_cc[0]->num_slots = min_t(unsigned, info[0]->n_slot,
1500                         EDMA_MAX_PARAMENTRY);
1501     edma_cc[0]->num_cc = min_t(unsigned, info[0]->n_cc,
1502                         EDMA_MAX_CC);
1503 
1504     edma_cc[0]->default_queue = info[0]->default_queue;
1505     if (!edma_cc[0]->default_queue)
1506         edma_cc[0]->default_queue = EVENTQ_1;
1507 
1508     edma_dbg("DMA REG BASE ADDR=%p\n",
1509         edmacc_regs_base[j]);
1510 
1511     for (i = 0; i < edma_cc[0]->num_slots; i++)
1512         rt_memcpy((void *)(edmacc_regs_base[0] + PARM_OFFSET(i)),
1513                 &dummy_paramset, PARM_SIZE);
1514 
1515     /* Mark all channels as unused */
1516     rt_memset(edma_cc[0]->edma_unused, 0xff,
1517         sizeof(edma_cc[0]->edma_unused));
1518 
1519     edma_cc[0]->irq_res_start = IRQ_CCINT0;
1520     rt_hw_interrupt_install(IRQ_CCINT0, dma_irq_handler, RT_NULL, "edma");
1521     rt_hw_interrupt_umask(IRQ_CCINT0);
1522 
1523     edma_cc[0]->irq_res_end = IRQ_CCERRINT;
1524     rt_hw_interrupt_install(IRQ_CCERRINT, dma_ccerr_handler, RT_NULL, "edma_error");
1525     rt_hw_interrupt_umask(IRQ_CCERRINT);
1526 
1527     /* Everything lives on transfer controller 1 until otherwise
1528      * specified. This way, long transfers on the low priority queue
1529      * started by the codec engine will not cause audio defects.
1530      */
1531     for (i = 0; i < edma_cc[0]->num_channels; i++)
1532         map_dmach_queue(0, i, EVENTQ_1);
1533 
1534     queue_tc_mapping = info[0]->queue_tc_mapping;
1535     queue_priority_mapping = info[0]->queue_priority_mapping;
1536 
1537     /* Event queue to TC mapping */
1538     for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1539         map_queue_tc(0, queue_tc_mapping[i][0],
1540                 queue_tc_mapping[i][1]);
1541 
1542     /* Event queue priority mapping */
1543     for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1544         assign_priority_to_queue(0,
1545                     queue_priority_mapping[i][0],
1546                     queue_priority_mapping[i][1]);
1547 
1548     /* Map the channel to param entry if channel mapping logic
1549      * exist
1550      */
1551     if (edma_read(0, EDMA_CCCFG) & CHMAP_EXIST)
1552         map_dmach_param(0);
1553 
1554     for (i = 0; i < info[0]->n_region; i++) {
1555         edma_write_array2(0, EDMA_DRAE, i, 0, 0x0);
1556         edma_write_array2(0, EDMA_DRAE, i, 1, 0x0);
1557         edma_write_array(0, EDMA_QRAE, i, 0x0);
1558     }
1559     arch_num_cc++;
1560 
1561     if (tc_errs_handled) {
1562         rt_hw_interrupt_install(IRQ_TCERRINT0, dma_tc0err_handler, "edma_tc0");
1563         rt_hw_interrupt_umask(IRQ_TCERRINT0);
1564         rt_hw_interrupt_install(IRQ_TCERRINT, dma_tc1err_handler, "edma_tc1");
1565         rt_hw_interrupt_umask(IRQ_TCERRINT);
1566     }
1567 
1568     return 0;
1569 
1570 fail:
1571 
1572 fail1:
1573     rt_free(edma_cc[0]);
1574 
1575     return status;
1576 }
1577 
1578 
1579