1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Enhanced Direct Memory Access (EDMA3) Controller
4  *
5  * (C) Copyright 2014
6  *     Texas Instruments Incorporated, <www.ti.com>
7  *
8  * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>
9  */
10 
11 #include <asm/cache.h>
12 #include <asm/io.h>
13 #include <common.h>
14 #include <dm.h>
15 #include <dma-uclass.h>
16 #include <linux/dma-mapping.h>
17 #include <asm/omap_common.h>
18 #include <asm/ti-common/ti-edma3.h>
19 
20 #define EDMA3_SL_BASE(slot)			(0x4000 + ((slot) << 5))
21 #define EDMA3_SL_MAX_NUM			512
22 #define EDMA3_SLOPT_FIFO_WIDTH_MASK		(0x7 << 8)
23 
24 #define EDMA3_QCHMAP(ch)			0x0200 + ((ch) << 2)
25 #define EDMA3_CHMAP_PARSET_MASK			0x1ff
26 #define EDMA3_CHMAP_PARSET_SHIFT		0x5
27 #define EDMA3_CHMAP_TRIGWORD_SHIFT		0x2
28 
29 #define EDMA3_QEMCR				0x314
30 #define EDMA3_IPR				0x1068
31 #define EDMA3_IPRH				0x106c
32 #define EDMA3_ICR				0x1070
33 #define EDMA3_ICRH				0x1074
34 #define EDMA3_QEECR				0x1088
35 #define EDMA3_QEESR				0x108c
36 #define EDMA3_QSECR				0x1094
37 
38 #define EDMA_FILL_BUFFER_SIZE			512
39 
40 struct ti_edma3_priv {
41 	u32 base;
42 };
43 
44 static u8 edma_fill_buffer[EDMA_FILL_BUFFER_SIZE] __aligned(ARCH_DMA_MINALIGN);
45 
46 /**
47  * qedma3_start - start qdma on a channel
48  * @base: base address of edma
49  * @cfg: pinter to struct edma3_channel_config where you can set
50  * the slot number to associate with, the chnum, which corresponds
51  * your quick channel number 0-7, complete code - transfer complete code
52  * and trigger slot word - which has to correspond to the word number in
53  * edma3_slot_layout struct for generating event.
54  *
55  */
qedma3_start(u32 base,struct edma3_channel_config * cfg)56 void qedma3_start(u32 base, struct edma3_channel_config *cfg)
57 {
58 	u32 qchmap;
59 
60 	/* Clear the pending int bit */
61 	if (cfg->complete_code < 32)
62 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
63 	else
64 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
65 
66 	/* Map parameter set and trigger word 7 to quick channel */
67 	qchmap = ((EDMA3_CHMAP_PARSET_MASK & cfg->slot)
68 		  << EDMA3_CHMAP_PARSET_SHIFT) |
69 		  (cfg->trigger_slot_word << EDMA3_CHMAP_TRIGWORD_SHIFT);
70 
71 	__raw_writel(qchmap, base + EDMA3_QCHMAP(cfg->chnum));
72 
73 	/* Clear missed event if set*/
74 	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
75 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
76 
77 	/* Enable qdma channel event */
78 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEESR);
79 }
80 
81 /**
82  * edma3_set_dest - set initial DMA destination address in parameter RAM slot
83  * @base: base address of edma
84  * @slot: parameter RAM slot being configured
85  * @dst: physical address of destination (memory, controller FIFO, etc)
86  * @addressMode: INCR, except in very rare cases
87  * @width: ignored unless @addressMode is FIFO, else specifies the
88  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
89  *
90  * Note that the destination address is modified during the DMA transfer
91  * according to edma3_set_dest_index().
92  */
edma3_set_dest(u32 base,int slot,u32 dst,enum edma3_address_mode mode,enum edma3_fifo_width width)93 void edma3_set_dest(u32 base, int slot, u32 dst, enum edma3_address_mode mode,
94 		    enum edma3_fifo_width width)
95 {
96 	u32 opt;
97 	struct edma3_slot_layout *rg;
98 
99 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
100 
101 	opt = __raw_readl(&rg->opt);
102 	if (mode == FIFO)
103 		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
104 		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
105 			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
106 	else
107 		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
108 
109 	__raw_writel(opt, &rg->opt);
110 	__raw_writel(dst, &rg->dst);
111 }
112 
113 /**
114  * edma3_set_dest_index - configure DMA destination address indexing
115  * @base: base address of edma
116  * @slot: parameter RAM slot being configured
117  * @bidx: byte offset between destination arrays in a frame
118  * @cidx: byte offset between destination frames in a block
119  *
120  * Offsets are specified to support either contiguous or discontiguous
121  * memory transfers, or repeated access to a hardware register, as needed.
122  * When accessing hardware registers, both offsets are normally zero.
123  */
edma3_set_dest_index(u32 base,unsigned slot,int bidx,int cidx)124 void edma3_set_dest_index(u32 base, unsigned slot, int bidx, int cidx)
125 {
126 	u32 src_dst_bidx;
127 	u32 src_dst_cidx;
128 	struct edma3_slot_layout *rg;
129 
130 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
131 
132 	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
133 	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
134 
135 	__raw_writel((src_dst_bidx & 0x0000ffff) | (bidx << 16),
136 		     &rg->src_dst_bidx);
137 	__raw_writel((src_dst_cidx & 0x0000ffff) | (cidx << 16),
138 		     &rg->src_dst_cidx);
139 }
140 
141 /**
142  * edma3_set_dest_addr - set destination address for slot only
143  */
edma3_set_dest_addr(u32 base,int slot,u32 dst)144 void edma3_set_dest_addr(u32 base, int slot, u32 dst)
145 {
146 	struct edma3_slot_layout *rg;
147 
148 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
149 	__raw_writel(dst, &rg->dst);
150 }
151 
152 /**
153  * edma3_set_src - set initial DMA source address in parameter RAM slot
154  * @base: base address of edma
155  * @slot: parameter RAM slot being configured
156  * @src_port: physical address of source (memory, controller FIFO, etc)
157  * @mode: INCR, except in very rare cases
158  * @width: ignored unless @addressMode is FIFO, else specifies the
159  *	width to use when addressing the fifo (e.g. W8BIT, W32BIT)
160  *
161  * Note that the source address is modified during the DMA transfer
162  * according to edma3_set_src_index().
163  */
edma3_set_src(u32 base,int slot,u32 src,enum edma3_address_mode mode,enum edma3_fifo_width width)164 void edma3_set_src(u32 base, int slot, u32 src, enum edma3_address_mode mode,
165 		   enum edma3_fifo_width width)
166 {
167 	u32 opt;
168 	struct edma3_slot_layout *rg;
169 
170 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
171 
172 	opt = __raw_readl(&rg->opt);
173 	if (mode == FIFO)
174 		opt = (opt & EDMA3_SLOPT_FIFO_WIDTH_MASK) |
175 		       (EDMA3_SLOPT_DST_ADDR_CONST_MODE |
176 			EDMA3_SLOPT_FIFO_WIDTH_SET(width));
177 	else
178 		opt &= ~EDMA3_SLOPT_DST_ADDR_CONST_MODE;
179 
180 	__raw_writel(opt, &rg->opt);
181 	__raw_writel(src, &rg->src);
182 }
183 
184 /**
185  * edma3_set_src_index - configure DMA source address indexing
186  * @base: base address of edma
187  * @slot: parameter RAM slot being configured
188  * @bidx: byte offset between source arrays in a frame
189  * @cidx: byte offset between source frames in a block
190  *
191  * Offsets are specified to support either contiguous or discontiguous
192  * memory transfers, or repeated access to a hardware register, as needed.
193  * When accessing hardware registers, both offsets are normally zero.
194  */
edma3_set_src_index(u32 base,unsigned slot,int bidx,int cidx)195 void edma3_set_src_index(u32 base, unsigned slot, int bidx, int cidx)
196 {
197 	u32 src_dst_bidx;
198 	u32 src_dst_cidx;
199 	struct edma3_slot_layout *rg;
200 
201 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
202 
203 	src_dst_bidx = __raw_readl(&rg->src_dst_bidx);
204 	src_dst_cidx = __raw_readl(&rg->src_dst_cidx);
205 
206 	__raw_writel((src_dst_bidx & 0xffff0000) | bidx,
207 		     &rg->src_dst_bidx);
208 	__raw_writel((src_dst_cidx & 0xffff0000) | cidx,
209 		     &rg->src_dst_cidx);
210 }
211 
212 /**
213  * edma3_set_src_addr - set source address for slot only
214  */
edma3_set_src_addr(u32 base,int slot,u32 src)215 void edma3_set_src_addr(u32 base, int slot, u32 src)
216 {
217 	struct edma3_slot_layout *rg;
218 
219 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
220 	__raw_writel(src, &rg->src);
221 }
222 
223 /**
224  * edma3_set_transfer_params - configure DMA transfer parameters
225  * @base: base address of edma
226  * @slot: parameter RAM slot being configured
227  * @acnt: how many bytes per array (at least one)
228  * @bcnt: how many arrays per frame (at least one)
229  * @ccnt: how many frames per block (at least one)
230  * @bcnt_rld: used only for A-Synchronized transfers; this specifies
231  *	the value to reload into bcnt when it decrements to zero
232  * @sync_mode: ASYNC or ABSYNC
233  *
234  * See the EDMA3 documentation to understand how to configure and link
235  * transfers using the fields in PaRAM slots.  If you are not doing it
236  * all at once with edma3_write_slot(), you will use this routine
237  * plus two calls each for source and destination, setting the initial
238  * address and saying how to index that address.
239  *
240  * An example of an A-Synchronized transfer is a serial link using a
241  * single word shift register.  In that case, @acnt would be equal to
242  * that word size; the serial controller issues a DMA synchronization
243  * event to transfer each word, and memory access by the DMA transfer
244  * controller will be word-at-a-time.
245  *
246  * An example of an AB-Synchronized transfer is a device using a FIFO.
247  * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
248  * The controller with the FIFO issues DMA synchronization events when
249  * the FIFO threshold is reached, and the DMA transfer controller will
250  * transfer one frame to (or from) the FIFO.  It will probably use
251  * efficient burst modes to access memory.
252  */
edma3_set_transfer_params(u32 base,int slot,int acnt,int bcnt,int ccnt,u16 bcnt_rld,enum edma3_sync_dimension sync_mode)253 void edma3_set_transfer_params(u32 base, int slot, int acnt,
254 			       int bcnt, int ccnt, u16 bcnt_rld,
255 			       enum edma3_sync_dimension sync_mode)
256 {
257 	u32 opt;
258 	u32 link_bcntrld;
259 	struct edma3_slot_layout *rg;
260 
261 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
262 
263 	link_bcntrld = __raw_readl(&rg->link_bcntrld);
264 
265 	__raw_writel((bcnt_rld << 16) | (0x0000ffff & link_bcntrld),
266 		     &rg->link_bcntrld);
267 
268 	opt = __raw_readl(&rg->opt);
269 	if (sync_mode == ASYNC)
270 		__raw_writel(opt & ~EDMA3_SLOPT_AB_SYNC, &rg->opt);
271 	else
272 		__raw_writel(opt | EDMA3_SLOPT_AB_SYNC, &rg->opt);
273 
274 	/* Set the acount, bcount, ccount registers */
275 	__raw_writel((bcnt << 16) | (acnt & 0xffff), &rg->a_b_cnt);
276 	__raw_writel(0xffff & ccnt, &rg->ccnt);
277 }
278 
279 /**
280  * edma3_write_slot - write parameter RAM data for slot
281  * @base: base address of edma
282  * @slot: number of parameter RAM slot being modified
283  * @param: data to be written into parameter RAM slot
284  *
285  * Use this to assign all parameters of a transfer at once.  This
286  * allows more efficient setup of transfers than issuing multiple
287  * calls to set up those parameters in small pieces, and provides
288  * complete control over all transfer options.
289  */
edma3_write_slot(u32 base,int slot,struct edma3_slot_layout * param)290 void edma3_write_slot(u32 base, int slot, struct edma3_slot_layout *param)
291 {
292 	int i;
293 	u32 *p = (u32 *)param;
294 	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
295 
296 	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
297 		__raw_writel(*p++, addr++);
298 }
299 
300 /**
301  * edma3_read_slot - read parameter RAM data from slot
302  * @base: base address of edma
303  * @slot: number of parameter RAM slot being copied
304  * @param: where to store copy of parameter RAM data
305  *
306  * Use this to read data from a parameter RAM slot, perhaps to
307  * save them as a template for later reuse.
308  */
edma3_read_slot(u32 base,int slot,struct edma3_slot_layout * param)309 void edma3_read_slot(u32 base, int slot, struct edma3_slot_layout *param)
310 {
311 	int i;
312 	u32 *p = (u32 *)param;
313 	u32 *addr = (u32 *)(base + EDMA3_SL_BASE(slot));
314 
315 	for (i = 0; i < sizeof(struct edma3_slot_layout)/4; i += 4)
316 		*p++ = __raw_readl(addr++);
317 }
318 
edma3_slot_configure(u32 base,int slot,struct edma3_slot_config * cfg)319 void edma3_slot_configure(u32 base, int slot, struct edma3_slot_config *cfg)
320 {
321 	struct edma3_slot_layout *rg;
322 
323 	rg = (struct edma3_slot_layout *)(base + EDMA3_SL_BASE(slot));
324 
325 	__raw_writel(cfg->opt, &rg->opt);
326 	__raw_writel(cfg->src, &rg->src);
327 	__raw_writel((cfg->bcnt << 16) | (cfg->acnt & 0xffff), &rg->a_b_cnt);
328 	__raw_writel(cfg->dst, &rg->dst);
329 	__raw_writel((cfg->dst_bidx << 16) |
330 		     (cfg->src_bidx & 0xffff), &rg->src_dst_bidx);
331 	__raw_writel((cfg->bcntrld << 16) |
332 		     (cfg->link & 0xffff), &rg->link_bcntrld);
333 	__raw_writel((cfg->dst_cidx << 16) |
334 		     (cfg->src_cidx & 0xffff), &rg->src_dst_cidx);
335 	__raw_writel(0xffff & cfg->ccnt, &rg->ccnt);
336 }
337 
338 /**
339  * edma3_check_for_transfer - check if transfer coplete by checking
340  * interrupt pending bit. Clear interrupt pending bit if complete.
341  * @base: base address of edma
342  * @cfg: pinter to struct edma3_channel_config which was passed
343  * to qedma3_start when you started qdma channel
344  *
345  * Return 0 if complete, 1 if not.
346  */
edma3_check_for_transfer(u32 base,struct edma3_channel_config * cfg)347 int edma3_check_for_transfer(u32 base, struct edma3_channel_config *cfg)
348 {
349 	u32 inum;
350 	u32 ipr_base;
351 	u32 icr_base;
352 
353 	if (cfg->complete_code < 32) {
354 		ipr_base = base + EDMA3_IPR;
355 		icr_base = base + EDMA3_ICR;
356 		inum = 1 << cfg->complete_code;
357 	} else {
358 		ipr_base = base + EDMA3_IPRH;
359 		icr_base = base + EDMA3_ICRH;
360 		inum = 1 << (cfg->complete_code - 32);
361 	}
362 
363 	/* check complete interrupt */
364 	if (!(__raw_readl(ipr_base) & inum))
365 		return 1;
366 
367 	/* clean up the pending int bit */
368 	__raw_writel(inum, icr_base);
369 
370 	return 0;
371 }
372 
373 /**
374  * qedma3_stop - stops dma on the channel passed
375  * @base: base address of edma
376  * @cfg: pinter to struct edma3_channel_config which was passed
377  * to qedma3_start when you started qdma channel
378  */
qedma3_stop(u32 base,struct edma3_channel_config * cfg)379 void qedma3_stop(u32 base, struct edma3_channel_config *cfg)
380 {
381 	/* Disable qdma channel event */
382 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEECR);
383 
384 	/* clean up the interrupt indication */
385 	if (cfg->complete_code < 32)
386 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICR);
387 	else
388 		__raw_writel(1 << cfg->complete_code, base + EDMA3_ICRH);
389 
390 	/* Clear missed event if set*/
391 	__raw_writel(1 << cfg->chnum, base + EDMA3_QSECR);
392 	__raw_writel(1 << cfg->chnum, base + EDMA3_QEMCR);
393 
394 	/* Clear the channel map */
395 	__raw_writel(0, base + EDMA3_QCHMAP(cfg->chnum));
396 }
397 
__edma3_transfer(unsigned long edma3_base_addr,unsigned int edma_slot_num,dma_addr_t dst,dma_addr_t src,size_t len,size_t s_len)398 void __edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
399 		      dma_addr_t dst, dma_addr_t src, size_t len, size_t s_len)
400 {
401 	struct edma3_slot_config        slot;
402 	struct edma3_channel_config     edma_channel;
403 	int                             b_cnt_value = 1;
404 	int                             rem_bytes  = 0;
405 	int                             a_cnt_value = len;
406 	unsigned int                    addr = (unsigned int) (dst);
407 	unsigned int                    max_acnt  = 0x7FFFU;
408 
409 	if (len > s_len) {
410 		b_cnt_value = (len / s_len);
411 		rem_bytes = (len % s_len);
412 		a_cnt_value = s_len;
413 	} else if (len > max_acnt) {
414 		b_cnt_value = (len / max_acnt);
415 		rem_bytes  = (len % max_acnt);
416 		a_cnt_value = max_acnt;
417 	}
418 
419 	slot.opt        = 0;
420 	slot.src        = ((unsigned int) src);
421 	slot.acnt       = a_cnt_value;
422 	slot.bcnt       = b_cnt_value;
423 	slot.ccnt       = 1;
424 	if (len == s_len)
425 		slot.src_bidx = a_cnt_value;
426 	else
427 		slot.src_bidx = 0;
428 	slot.dst_bidx   = a_cnt_value;
429 	slot.src_cidx   = 0;
430 	slot.dst_cidx   = 0;
431 	slot.link       = EDMA3_PARSET_NULL_LINK;
432 	slot.bcntrld    = 0;
433 	slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
434 			  EDMA3_SLOPT_COMP_CODE(0) |
435 			  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
436 
437 	edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
438 	edma_channel.slot = edma_slot_num;
439 	edma_channel.chnum = 0;
440 	edma_channel.complete_code = 0;
441 	 /* set event trigger to dst update */
442 	edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
443 
444 	qedma3_start(edma3_base_addr, &edma_channel);
445 	edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr);
446 
447 	while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
448 		;
449 	qedma3_stop(edma3_base_addr, &edma_channel);
450 
451 	if (rem_bytes != 0) {
452 		slot.opt        = 0;
453 		if (len == s_len)
454 			slot.src =
455 				(b_cnt_value * max_acnt) + ((unsigned int) src);
456 		else
457 			slot.src = (unsigned int) src;
458 		slot.acnt       = rem_bytes;
459 		slot.bcnt       = 1;
460 		slot.ccnt       = 1;
461 		slot.src_bidx   = rem_bytes;
462 		slot.dst_bidx   = rem_bytes;
463 		slot.src_cidx   = 0;
464 		slot.dst_cidx   = 0;
465 		slot.link       = EDMA3_PARSET_NULL_LINK;
466 		slot.bcntrld    = 0;
467 		slot.opt        = EDMA3_SLOPT_TRANS_COMP_INT_ENB |
468 				  EDMA3_SLOPT_COMP_CODE(0) |
469 				  EDMA3_SLOPT_STATIC | EDMA3_SLOPT_AB_SYNC;
470 		edma3_slot_configure(edma3_base_addr, edma_slot_num, &slot);
471 		edma_channel.slot = edma_slot_num;
472 		edma_channel.chnum = 0;
473 		edma_channel.complete_code = 0;
474 		/* set event trigger to dst update */
475 		edma_channel.trigger_slot_word = EDMA3_TWORD(dst);
476 
477 		qedma3_start(edma3_base_addr, &edma_channel);
478 		edma3_set_dest_addr(edma3_base_addr, edma_channel.slot, addr +
479 				    (max_acnt * b_cnt_value));
480 		while (edma3_check_for_transfer(edma3_base_addr, &edma_channel))
481 			;
482 		qedma3_stop(edma3_base_addr, &edma_channel);
483 	}
484 }
485 
__edma3_fill(unsigned long edma3_base_addr,unsigned int edma_slot_num,dma_addr_t dst,u8 val,size_t len)486 void __edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
487 		  dma_addr_t dst, u8 val, size_t len)
488 {
489 	int xfer_len;
490 	int max_xfer = EDMA_FILL_BUFFER_SIZE * 65535;
491 	dma_addr_t source;
492 
493 	memset((void *)edma_fill_buffer, val, sizeof(edma_fill_buffer));
494 	source = dma_map_single(edma_fill_buffer, len, DMA_TO_DEVICE);
495 
496 	while (len) {
497 		xfer_len = len;
498 		if (xfer_len > max_xfer)
499 			xfer_len = max_xfer;
500 
501 		__edma3_transfer(edma3_base_addr, edma_slot_num, dst,
502 				 source, xfer_len,
503 				 EDMA_FILL_BUFFER_SIZE);
504 		len -= xfer_len;
505 		dst += xfer_len;
506 	}
507 
508 	dma_unmap_single(source, len, DMA_FROM_DEVICE);
509 }
510 
511 #ifndef CONFIG_DMA
512 
edma3_transfer(unsigned long edma3_base_addr,unsigned int edma_slot_num,void * dst,void * src,size_t len)513 void edma3_transfer(unsigned long edma3_base_addr, unsigned int edma_slot_num,
514 		    void *dst, void *src, size_t len)
515 {
516 	/* Clean the areas, so no writeback into the RAM races with DMA */
517 	dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
518 	dma_addr_t source = dma_map_single(src, len, DMA_TO_DEVICE);
519 
520 	__edma3_transfer(edma3_base_addr, edma_slot_num, destination, source, len, len);
521 
522 	/* Clean+Invalidate the areas after, so we can see DMA'd data */
523 	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
524 	dma_unmap_single(source, len, DMA_TO_DEVICE);
525 }
526 
edma3_fill(unsigned long edma3_base_addr,unsigned int edma_slot_num,void * dst,u8 val,size_t len)527 void edma3_fill(unsigned long edma3_base_addr, unsigned int edma_slot_num,
528 		void *dst, u8 val, size_t len)
529 {
530 	/* Clean the area, so no writeback into the RAM races with DMA */
531 	dma_addr_t destination = dma_map_single(dst, len, DMA_FROM_DEVICE);
532 
533 	__edma3_fill(edma3_base_addr, edma_slot_num, destination, val, len);
534 
535 	/* Clean+Invalidate the area after, so we can see DMA'd data */
536 	dma_unmap_single(destination, len, DMA_FROM_DEVICE);
537 }
538 
539 #else
540 
ti_edma3_transfer(struct udevice * dev,int direction,dma_addr_t dst,dma_addr_t src,size_t len)541 static int ti_edma3_transfer(struct udevice *dev, int direction,
542 			     dma_addr_t dst, dma_addr_t src, size_t len)
543 {
544 	struct ti_edma3_priv *priv = dev_get_priv(dev);
545 
546 	/* enable edma3 clocks */
547 	enable_edma3_clocks();
548 
549 	switch (direction) {
550 	case DMA_MEM_TO_MEM:
551 		__edma3_transfer(priv->base, 1, dst, src, len, len);
552 		break;
553 	default:
554 		pr_err("Transfer type not implemented in DMA driver\n");
555 		break;
556 	}
557 
558 	/* disable edma3 clocks */
559 	disable_edma3_clocks();
560 
561 	return 0;
562 }
563 
ti_edma3_of_to_plat(struct udevice * dev)564 static int ti_edma3_of_to_plat(struct udevice *dev)
565 {
566 	struct ti_edma3_priv *priv = dev_get_priv(dev);
567 
568 	priv->base = dev_read_addr(dev);
569 
570 	return 0;
571 }
572 
ti_edma3_probe(struct udevice * dev)573 static int ti_edma3_probe(struct udevice *dev)
574 {
575 	struct dma_dev_priv *uc_priv = dev_get_uclass_priv(dev);
576 
577 	uc_priv->supported = DMA_SUPPORTS_MEM_TO_MEM;
578 
579 	return 0;
580 }
581 
582 static const struct dma_ops ti_edma3_ops = {
583 	.transfer	= ti_edma3_transfer,
584 };
585 
586 static const struct udevice_id ti_edma3_ids[] = {
587 	{ .compatible = "ti,edma3" },
588 	{ }
589 };
590 
591 U_BOOT_DRIVER(ti_edma3) = {
592 	.name	= "ti_edma3",
593 	.id	= UCLASS_DMA,
594 	.of_match = ti_edma3_ids,
595 	.ops	= &ti_edma3_ops,
596 	.of_to_plat = ti_edma3_of_to_plat,
597 	.probe	= ti_edma3_probe,
598 	.priv_auto	= sizeof(struct ti_edma3_priv),
599 };
600 #endif /* CONFIG_DMA */
601