1 #include "bflb_dma.h"
2 #include "bflb_l1c.h"
3 #include "hardware/dma_reg.h"
4
5 struct bflb_dma_irq_callback {
6 void (*handler)(void *arg);
7 void *arg;
8 };
9
10 #if defined(BL702) || defined(BL602) || defined(BL702L)
11 const uint32_t dma_base[] = { 0x4000C000 };
12 struct bflb_dma_irq_callback dma_callback[1][8];
13 #elif defined(BL616)
14 const uint32_t dma_base[] = { 0x2000C000 };
15 struct bflb_dma_irq_callback dma_callback[1][4];
16 #elif defined(BL606P) || defined(BL808)
17 const uint32_t dma_base[] = { 0x2000C000, 0x20071000, 0x30001000 };
18 struct bflb_dma_irq_callback dma_callback[3][8];
19 #elif defined(BL628)
20 const uint32_t dma_base[] = { 0x20081000 };
21 struct bflb_dma_irq_callback dma_callback[1][8];
22 #endif
23
dma0_isr(int irq,void * arg)24 void dma0_isr(int irq, void *arg)
25 {
26 uint32_t regval;
27
28 regval = getreg32(dma_base[0] + DMA_INTTCSTATUS_OFFSET);
29 putreg32(regval, dma_base[0] + DMA_INTTCCLEAR_OFFSET);
30
31 for (uint8_t i = 0; i < 8; i++) {
32 if (regval & (1 << i)) {
33 dma_callback[0][i].handler(dma_callback[0][i].arg);
34 }
35 }
36 }
37
38 #if defined(BL606P) || defined(BL808)
dma1_isr(int irq,void * arg)39 void dma1_isr(int irq, void *arg)
40 {
41 uint32_t regval;
42
43 regval = getreg32(dma_base[1] + DMA_INTTCSTATUS_OFFSET);
44 putreg32(regval, dma_base[1] + DMA_INTTCCLEAR_OFFSET);
45
46 for (uint8_t i = 0; i < 8; i++) {
47 if (regval & (1 << i)) {
48 dma_callback[1][i].handler(dma_callback[1][i].arg);
49 }
50 }
51 }
52
dma2_isr(int irq,void * arg)53 void dma2_isr(int irq, void *arg)
54 {
55 uint32_t regval;
56
57 regval = getreg32(dma_base[2] + DMA_INTTCSTATUS_OFFSET);
58 putreg32(regval, dma_base[2] + DMA_INTTCCLEAR_OFFSET);
59
60 for (uint8_t i = 0; i < 8; i++) {
61 if (regval & (1 << i)) {
62 dma_callback[1][i].handler(dma_callback[2][i].arg);
63 }
64 }
65 }
66 #endif
67
bflb_dma_channel_init(struct bflb_device_s * dev,const struct bflb_dma_channel_config_s * config)68 void bflb_dma_channel_init(struct bflb_device_s *dev, const struct bflb_dma_channel_config_s *config)
69 {
70 uint32_t regval;
71 uint32_t channel_base;
72
73 channel_base = dev->reg_base;
74
75 /* dma global enable */
76 regval = getreg32(dma_base[dev->idx] + DMA_TOP_CONFIG_OFFSET);
77 regval |= DMA_E;
78 putreg32(regval, dma_base[dev->idx] + DMA_TOP_CONFIG_OFFSET);
79
80 /* dma channel disable */
81 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
82 regval &= ~DMA_E;
83 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
84
85 #if defined(BL602)
86 regval = 0;
87 #else
88 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
89 regval &= DMA_DST_ADD_MODE | DMA_DST_MIN_MODE | DMA_FIX_CNT_MASK;
90 #endif
91
92 if (config->src_addr_inc) {
93 regval |= DMA_SI;
94 }
95 if (config->dst_addr_inc) {
96 regval |= DMA_DI;
97 }
98
99 regval |= (config->src_width << DMA_SWIDTH_SHIFT);
100 regval |= (config->dst_width << DMA_DWIDTH_SHIFT);
101 regval |= (config->src_burst_count << DMA_SBSIZE_SHIFT);
102 regval |= (config->dst_burst_count << DMA_DBSIZE_SHIFT);
103 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
104
105 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
106 regval &= ~DMA_SRCPERIPHERAL_MASK;
107 regval &= ~DMA_DSTPERIPHERAL_MASK;
108 regval &= ~DMA_FLOWCNTRL_MASK;
109 regval &= ~DMA_LLICOUNTER_MASK;
110 regval |= (config->src_req << DMA_SRCPERIPHERAL_SHIFT);
111 regval |= (config->dst_req << DMA_DSTPERIPHERAL_SHIFT);
112 regval |= (config->direction << DMA_FLOWCNTRL_SHIFT);
113 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
114
115 /* enable dma error and tc interrupt */
116 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
117 regval |= (DMA_ITC | DMA_IE);
118 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
119
120 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
121 regval &= ~DMA_I;
122 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
123
124 /* clear irq status */
125 putreg32(1 << dev->sub_idx, dma_base[dev->idx] + DMA_INTTCCLEAR_OFFSET);
126 putreg32(1 << dev->sub_idx, dma_base[dev->idx] + DMA_INTERRCLR_OFFSET);
127
128 #if (defined(BL606P) || defined(BL808)) && (defined(CPU_M0) || defined(CPU_LP))
129 bflb_irq_attach(31, dma0_isr, NULL);
130 bflb_irq_attach(32, dma1_isr, NULL);
131 bflb_irq_enable(31);
132 bflb_irq_enable(32);
133 #elif (defined(BL606P) || defined(BL808)) && defined(CPU_D0)
134 bflb_irq_attach(40, dma2_isr, NULL);
135 bflb_irq_attach(41, dma2_isr, NULL);
136 bflb_irq_attach(42, dma2_isr, NULL);
137 bflb_irq_attach(43, dma2_isr, NULL);
138 bflb_irq_attach(44, dma2_isr, NULL);
139 bflb_irq_attach(45, dma2_isr, NULL);
140 bflb_irq_attach(46, dma2_isr, NULL);
141 bflb_irq_attach(47, dma2_isr, NULL);
142 bflb_irq_enable(40);
143 bflb_irq_enable(41);
144 bflb_irq_enable(42);
145 bflb_irq_enable(43);
146 bflb_irq_enable(44);
147 bflb_irq_enable(45);
148 bflb_irq_enable(46);
149 bflb_irq_enable(47);
150 #else
151 bflb_irq_attach(dev->irq_num, dma0_isr, NULL);
152 bflb_irq_enable(dev->irq_num);
153 #endif
154 }
155
bflb_dma_lli_config(struct bflb_device_s * dev,struct bflb_dma_channel_lli_pool_s * lli_pool,uint32_t lli_count,uint32_t src_addr,uint32_t dst_addr,uint32_t transfer_offset,uint32_t last_transfer_len)156 void bflb_dma_lli_config(struct bflb_device_s *dev, struct bflb_dma_channel_lli_pool_s *lli_pool, uint32_t lli_count, uint32_t src_addr, uint32_t dst_addr, uint32_t transfer_offset, uint32_t last_transfer_len)
157 {
158 uint32_t channel_base;
159 union bflb_dma_lli_control_s dma_ctrl_cfg;
160
161 channel_base = dev->reg_base;
162
163 dma_ctrl_cfg = (union bflb_dma_lli_control_s)getreg32(channel_base + DMA_CxCONTROL_OFFSET);
164
165 dma_ctrl_cfg.bits.TransferSize = 4064;
166 dma_ctrl_cfg.bits.I = 0;
167
168 /* nbytes will be integer multiple of 4064*n or 4064*2*n or 4064*4*n,(n>0) */
169 for (uint32_t i = 0; i < lli_count; i++) {
170 lli_pool[i].src_addr = src_addr;
171 lli_pool[i].dst_addr = dst_addr;
172 lli_pool[i].nextlli = 0;
173
174 if (dma_ctrl_cfg.bits.SI) {
175 src_addr += transfer_offset;
176 }
177
178 if (dma_ctrl_cfg.bits.DI) {
179 dst_addr += transfer_offset;
180 }
181
182 if (i == lli_count - 1) {
183 dma_ctrl_cfg.bits.TransferSize = last_transfer_len;
184 dma_ctrl_cfg.bits.I = 1;
185 }
186
187 if (i) {
188 lli_pool[i - 1].nextlli = (uint32_t)(uintptr_t)&lli_pool[i];
189 }
190
191 lli_pool[i].control = dma_ctrl_cfg;
192 }
193 }
194
bflb_dma_channel_lli_reload(struct bflb_device_s * dev,struct bflb_dma_channel_lli_pool_s * lli_pool,uint32_t max_lli_count,struct bflb_dma_channel_lli_transfer_s * transfer,uint32_t count)195 int bflb_dma_channel_lli_reload(struct bflb_device_s *dev, struct bflb_dma_channel_lli_pool_s *lli_pool, uint32_t max_lli_count, struct bflb_dma_channel_lli_transfer_s *transfer, uint32_t count)
196 {
197 uint32_t channel_base;
198 uint32_t actual_transfer_offset = 0;
199 uint32_t actual_transfer_len = 0;
200 uint32_t last_transfer_len = 0;
201 uint32_t current_lli_count = 0;
202 uint32_t lli_count_used_offset = 0;
203 union bflb_dma_lli_control_s dma_ctrl_cfg;
204
205 channel_base = dev->reg_base;
206
207 dma_ctrl_cfg = (union bflb_dma_lli_control_s)getreg32(channel_base + DMA_CxCONTROL_OFFSET);
208
209 switch (dma_ctrl_cfg.bits.SWidth) {
210 case DMA_DATA_WIDTH_8BIT:
211 actual_transfer_offset = 4064;
212 break;
213 case DMA_DATA_WIDTH_16BIT:
214 actual_transfer_offset = 4064 << 1;
215 break;
216 case DMA_DATA_WIDTH_32BIT:
217 actual_transfer_offset = 4064 << 2;
218 break;
219 default:
220 break;
221 }
222
223 for (size_t i = 0; i < count; i++) {
224 switch (dma_ctrl_cfg.bits.SWidth) {
225 case DMA_DATA_WIDTH_8BIT:
226 actual_transfer_len = transfer[i].nbytes;
227 break;
228 case DMA_DATA_WIDTH_16BIT:
229 if (transfer[i].nbytes % 2) {
230 return -1;
231 }
232 actual_transfer_len = transfer[i].nbytes >> 1;
233 break;
234 case DMA_DATA_WIDTH_32BIT:
235 if (transfer[i].nbytes % 4) {
236 return -1;
237 }
238 actual_transfer_len = transfer[i].nbytes >> 2;
239 break;
240
241 default:
242 break;
243 }
244
245 current_lli_count = actual_transfer_len / 4064 + 1;
246 last_transfer_len = actual_transfer_len % 4064;
247
248 /* The maximum transfer capacity of the last node is 4095 */
249 if (current_lli_count > 1 && last_transfer_len < (4095 - 4064)) {
250 current_lli_count--;
251 last_transfer_len += 4064;
252 }
253
254 bflb_dma_lli_config(dev, &lli_pool[lli_count_used_offset], current_lli_count, transfer[i].src_addr, transfer[i].dst_addr, actual_transfer_offset, last_transfer_len);
255 if (i) {
256 lli_pool[lli_count_used_offset - 1].nextlli = (uint32_t)(uintptr_t)&lli_pool[lli_count_used_offset];
257 }
258 lli_count_used_offset += current_lli_count;
259
260 if (lli_count_used_offset > max_lli_count) {
261 return -ENOMEM;
262 }
263 }
264
265 putreg32(lli_pool[0].src_addr, channel_base + DMA_CxSRCADDR_OFFSET);
266 putreg32(lli_pool[0].dst_addr, channel_base + DMA_CxDSTADDR_OFFSET);
267 putreg32(lli_pool[0].nextlli, channel_base + DMA_CxLLI_OFFSET);
268 putreg32(lli_pool[0].control.WORD, channel_base + DMA_CxCONTROL_OFFSET);
269 #if defined(BL616) || defined(BL606P) || defined(BL808)
270 /* clean cache, DMA does not pass through the cache */
271 bflb_l1c_dcache_clean_range((uint32_t *)(uintptr_t)lli_pool, sizeof(struct bflb_dma_channel_lli_pool_s) * lli_count_used_offset);
272 #endif
273 return lli_count_used_offset;
274 }
275
bflb_dma_channel_lli_link_head(struct bflb_device_s * dev,struct bflb_dma_channel_lli_pool_s * lli_pool,uint32_t used_lli_count)276 void bflb_dma_channel_lli_link_head(struct bflb_device_s *dev,
277 struct bflb_dma_channel_lli_pool_s *lli_pool,
278 uint32_t used_lli_count)
279 {
280 uint32_t channel_base;
281
282 channel_base = dev->reg_base;
283
284 lli_pool[used_lli_count - 1].nextlli = (uint32_t)(uintptr_t)&lli_pool[0];
285
286 putreg32(lli_pool[0].nextlli, channel_base + DMA_CxLLI_OFFSET);
287 #if defined(BL616) || defined(BL606P) || defined(BL808)
288 /* clean cache, DMA does not pass through the cache */
289 bflb_l1c_dcache_clean_range((uint32_t *)lli_pool, sizeof(struct bflb_dma_channel_lli_pool_s) * used_lli_count);
290 #endif
291 }
292
bflb_dma_channel_start(struct bflb_device_s * dev)293 void bflb_dma_channel_start(struct bflb_device_s *dev)
294 {
295 uint32_t regval;
296 uint32_t channel_base;
297
298 channel_base = dev->reg_base;
299
300 /* dma channel enable */
301 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
302 regval |= DMA_E;
303 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
304 }
305
bflb_dma_channel_stop(struct bflb_device_s * dev)306 void bflb_dma_channel_stop(struct bflb_device_s *dev)
307 {
308 uint32_t regval;
309 uint32_t channel_base;
310
311 channel_base = dev->reg_base;
312
313 /* dma channel disable */
314 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
315 regval &= ~DMA_E;
316 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
317 }
318
bflb_dma_channel_isbusy(struct bflb_device_s * dev)319 bool bflb_dma_channel_isbusy(struct bflb_device_s *dev)
320 {
321 uint32_t regval;
322 uint32_t channel_base;
323
324 channel_base = dev->reg_base;
325
326 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
327 if (regval & (1 << 0)) {
328 return true;
329 } else {
330 return false;
331 }
332 }
333
bflb_dma_channel_tcint_mask(struct bflb_device_s * dev,bool mask)334 void bflb_dma_channel_tcint_mask(struct bflb_device_s *dev, bool mask)
335 {
336 uint32_t regval;
337 uint32_t channel_base;
338
339 channel_base = dev->reg_base;
340
341 if (mask) {
342 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
343 regval |= DMA_ITC;
344 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
345
346 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
347 regval &= ~DMA_I;
348 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
349 } else {
350 regval = getreg32(channel_base + DMA_CxCONFIG_OFFSET);
351 regval &= ~DMA_ITC;
352 putreg32(regval, channel_base + DMA_CxCONFIG_OFFSET);
353
354 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
355 regval |= DMA_I;
356 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
357 }
358 }
359
bflb_dma_channel_irq_attach(struct bflb_device_s * dev,void (* callback)(void * arg),void * arg)360 void bflb_dma_channel_irq_attach(struct bflb_device_s *dev, void (*callback)(void *arg), void *arg)
361 {
362 dma_callback[dev->idx][dev->sub_idx].handler = callback;
363 dma_callback[dev->idx][dev->sub_idx].arg = arg;
364
365 bflb_dma_channel_tcint_mask(dev, false);
366 }
367
bflb_dma_channel_irq_detach(struct bflb_device_s * dev)368 void bflb_dma_channel_irq_detach(struct bflb_device_s *dev)
369 {
370 dma_callback[dev->idx][dev->sub_idx].handler = NULL;
371 dma_callback[dev->idx][dev->sub_idx].arg = NULL;
372
373 bflb_dma_channel_tcint_mask(dev, true);
374 }
375
bflb_dma_channel_get_tcint_status(struct bflb_device_s * dev)376 bool bflb_dma_channel_get_tcint_status(struct bflb_device_s *dev)
377 {
378 uint32_t regval;
379
380 regval = getreg32(dma_base[dev->idx] + DMA_INTTCSTATUS_OFFSET);
381 if (regval & (1 << dev->sub_idx)) {
382 return true;
383 } else {
384 return false;
385 }
386 }
387
bflb_dma_channel_tcint_clear(struct bflb_device_s * dev)388 void bflb_dma_channel_tcint_clear(struct bflb_device_s *dev)
389 {
390 putreg32(1 << dev->sub_idx, dma_base[dev->idx] + DMA_INTTCCLEAR_OFFSET);
391 }
392
bflb_dma_feature_control(struct bflb_device_s * dev,int cmd,size_t arg)393 int bflb_dma_feature_control(struct bflb_device_s *dev, int cmd, size_t arg)
394 {
395 int ret = 0;
396 uint32_t regval;
397 uint32_t channel_base;
398
399 channel_base = dev->reg_base;
400
401 switch (cmd) {
402 case DMA_CMD_SET_SRCADDR_INCREMENT:
403 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
404 if (arg) {
405 regval |= DMA_SI;
406 } else {
407 regval &= ~DMA_SI;
408 }
409 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
410 break;
411
412 case DMA_CMD_SET_DSTADDR_INCREMENT:
413 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
414 if (arg) {
415 regval |= DMA_DI;
416 } else {
417 regval &= ~DMA_DI;
418 }
419 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
420 break;
421
422 #if !defined(BL602)
423 case DMA_CMD_SET_ADD_MODE:
424 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
425 if (arg) {
426 regval |= DMA_DST_ADD_MODE;
427 } else {
428 regval &= ~DMA_DST_ADD_MODE;
429 }
430 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
431 break;
432
433 case DMA_CMD_SET_REDUCE_MODE:
434 regval = getreg32(channel_base + DMA_CxCONTROL_OFFSET);
435 if (arg) {
436 regval |= DMA_DST_MIN_MODE;
437 regval &= ~DMA_FIX_CNT_MASK;
438 regval |= (arg & 0x7) << DMA_FIX_CNT_SHIFT;
439 } else {
440 regval &= ~DMA_DST_MIN_MODE;
441 }
442 putreg32(regval, channel_base + DMA_CxCONTROL_OFFSET);
443 break;
444 #endif
445 case DMA_CMD_SET_LLI_CONFIG:
446 arch_memcpy4((uint32_t *)(channel_base + DMA_CxSRCADDR_OFFSET), (uint32_t *)arg, 4);
447 break;
448 case DMA_CMD_GET_LLI_CONTROL:
449 return getreg32(channel_base + DMA_CxCONTROL_OFFSET);
450 default:
451 ret = -EPERM;
452 break;
453 }
454 return ret;
455 }