Lines Matching refs:chan
154 struct nbpf_channel *chan; member
236 struct nbpf_channel chan[]; member
304 static inline u32 nbpf_chan_read(struct nbpf_channel *chan, in nbpf_chan_read() argument
307 u32 data = ioread32(chan->base + offset); in nbpf_chan_read()
308 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_read()
309 __func__, chan->base, offset, data); in nbpf_chan_read()
313 static inline void nbpf_chan_write(struct nbpf_channel *chan, in nbpf_chan_write() argument
316 iowrite32(data, chan->base + offset); in nbpf_chan_write()
317 dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", in nbpf_chan_write()
318 __func__, chan->base, offset, data); in nbpf_chan_write()
338 static void nbpf_chan_halt(struct nbpf_channel *chan) in nbpf_chan_halt() argument
340 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_chan_halt()
343 static bool nbpf_status_get(struct nbpf_channel *chan) in nbpf_status_get() argument
345 u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); in nbpf_status_get()
347 return status & BIT(chan - chan->nbpf->chan); in nbpf_status_get()
350 static void nbpf_status_ack(struct nbpf_channel *chan) in nbpf_status_ack() argument
352 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); in nbpf_status_ack()
362 return nbpf->chan + __ffs(error); in nbpf_error_get_channel()
365 static void nbpf_error_clear(struct nbpf_channel *chan) in nbpf_error_clear() argument
371 nbpf_chan_halt(chan); in nbpf_error_clear()
374 status = nbpf_chan_read(chan, NBPF_CHAN_STAT); in nbpf_error_clear()
381 dev_err(chan->dma_chan.device->dev, in nbpf_error_clear()
384 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); in nbpf_error_clear()
389 struct nbpf_channel *chan = desc->chan; in nbpf_start() local
392 nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); in nbpf_start()
393 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); in nbpf_start()
394 chan->paused = false; in nbpf_start()
398 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); in nbpf_start()
400 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, in nbpf_start()
401 nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); in nbpf_start()
406 static void nbpf_chan_prepare(struct nbpf_channel *chan) in nbpf_chan_prepare() argument
408 chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | in nbpf_chan_prepare()
409 (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | in nbpf_chan_prepare()
410 (chan->flags & NBPF_SLAVE_RQ_LEVEL ? in nbpf_chan_prepare()
412 chan->terminal; in nbpf_chan_prepare()
415 static void nbpf_chan_prepare_default(struct nbpf_channel *chan) in nbpf_chan_prepare_default() argument
418 chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; in nbpf_chan_prepare_default()
419 chan->terminal = 0; in nbpf_chan_prepare_default()
420 chan->flags = 0; in nbpf_chan_prepare_default()
423 static void nbpf_chan_configure(struct nbpf_channel *chan) in nbpf_chan_configure() argument
430 nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); in nbpf_chan_configure()
511 struct nbpf_channel *chan = desc->chan; in nbpf_prep_one() local
512 struct device *dev = chan->dma_chan.device->dev; in nbpf_prep_one()
539 mem_xfer = nbpf_xfer_ds(chan->nbpf, size, direction); in nbpf_prep_one()
543 can_burst = chan->slave_src_width >= 3; in nbpf_prep_one()
545 chan->slave_src_burst : chan->slave_src_width); in nbpf_prep_one()
550 if (mem_xfer > chan->slave_src_burst && !can_burst) in nbpf_prep_one()
551 mem_xfer = chan->slave_src_burst; in nbpf_prep_one()
559 slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? in nbpf_prep_one()
560 chan->slave_dst_burst : chan->slave_dst_width); in nbpf_prep_one()
575 hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | in nbpf_prep_one()
588 static size_t nbpf_bytes_left(struct nbpf_channel *chan) in nbpf_bytes_left() argument
590 return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); in nbpf_bytes_left()
603 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_issue_pending() local
608 spin_lock_irqsave(&chan->lock, flags); in nbpf_issue_pending()
609 if (list_empty(&chan->queued)) in nbpf_issue_pending()
612 list_splice_tail_init(&chan->queued, &chan->active); in nbpf_issue_pending()
614 if (!chan->running) { in nbpf_issue_pending()
615 struct nbpf_desc *desc = list_first_entry(&chan->active, in nbpf_issue_pending()
618 chan->running = desc; in nbpf_issue_pending()
622 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_issue_pending()
628 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_tx_status() local
635 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_status()
636 running = chan->running ? chan->running->async_tx.cookie : -EINVAL; in nbpf_tx_status()
639 state->residue = nbpf_bytes_left(chan); in nbpf_tx_status()
646 list_for_each_entry(desc, &chan->active, node) in nbpf_tx_status()
653 list_for_each_entry(desc, &chan->queued, node) in nbpf_tx_status()
663 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_status()
666 if (chan->paused) in nbpf_tx_status()
675 struct nbpf_channel *chan = desc->chan; in nbpf_tx_submit() local
679 spin_lock_irqsave(&chan->lock, flags); in nbpf_tx_submit()
681 list_add_tail(&desc->node, &chan->queued); in nbpf_tx_submit()
682 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_tx_submit()
684 dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); in nbpf_tx_submit()
689 static int nbpf_desc_page_alloc(struct nbpf_channel *chan) in nbpf_desc_page_alloc() argument
691 struct dma_chan *dchan = &chan->dma_chan; in nbpf_desc_page_alloc()
727 desc->chan = chan; in nbpf_desc_page_alloc()
736 spin_lock_irq(&chan->lock); in nbpf_desc_page_alloc()
737 list_splice_tail(&lhead, &chan->free_links); in nbpf_desc_page_alloc()
738 list_splice_tail(&head, &chan->free); in nbpf_desc_page_alloc()
739 list_add(&dpage->node, &chan->desc_page); in nbpf_desc_page_alloc()
740 spin_unlock_irq(&chan->lock); in nbpf_desc_page_alloc()
757 struct nbpf_channel *chan = desc->chan; in nbpf_desc_put() local
761 spin_lock_irqsave(&chan->lock, flags); in nbpf_desc_put()
763 list_move(&ldesc->node, &chan->free_links); in nbpf_desc_put()
765 list_add(&desc->node, &chan->free); in nbpf_desc_put()
766 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_desc_put()
769 static void nbpf_scan_acked(struct nbpf_channel *chan) in nbpf_scan_acked() argument
775 spin_lock_irqsave(&chan->lock, flags); in nbpf_scan_acked()
776 list_for_each_entry_safe(desc, tmp, &chan->done, node) in nbpf_scan_acked()
781 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_scan_acked()
795 static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) in nbpf_desc_get() argument
800 nbpf_scan_acked(chan); in nbpf_desc_get()
802 spin_lock_irq(&chan->lock); in nbpf_desc_get()
807 if (list_empty(&chan->free)) { in nbpf_desc_get()
809 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
810 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
813 spin_lock_irq(&chan->lock); in nbpf_desc_get()
816 desc = list_first_entry(&chan->free, struct nbpf_desc, node); in nbpf_desc_get()
820 if (list_empty(&chan->free_links)) { in nbpf_desc_get()
822 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
823 ret = nbpf_desc_page_alloc(chan); in nbpf_desc_get()
828 spin_lock_irq(&chan->lock); in nbpf_desc_get()
832 ldesc = list_first_entry(&chan->free_links, in nbpf_desc_get()
847 spin_unlock_irq(&chan->lock); in nbpf_desc_get()
852 static void nbpf_chan_idle(struct nbpf_channel *chan) in nbpf_chan_idle() argument
858 spin_lock_irqsave(&chan->lock, flags); in nbpf_chan_idle()
860 list_splice_init(&chan->done, &head); in nbpf_chan_idle()
861 list_splice_init(&chan->active, &head); in nbpf_chan_idle()
862 list_splice_init(&chan->queued, &head); in nbpf_chan_idle()
864 chan->running = NULL; in nbpf_chan_idle()
866 spin_unlock_irqrestore(&chan->lock, flags); in nbpf_chan_idle()
869 dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", in nbpf_chan_idle()
878 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_pause() local
882 chan->paused = true; in nbpf_pause()
883 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); in nbpf_pause()
885 nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); in nbpf_pause()
892 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_terminate_all() local
897 nbpf_chan_halt(chan); in nbpf_terminate_all()
898 nbpf_chan_idle(chan); in nbpf_terminate_all()
906 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_config() local
916 chan->slave_dst_addr = config->dst_addr; in nbpf_config()
917 chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
919 chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
922 chan->slave_src_addr = config->src_addr; in nbpf_config()
923 chan->slave_src_width = nbpf_xfer_size(chan->nbpf, in nbpf_config()
925 chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, in nbpf_config()
932 static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, in nbpf_prep_sg() argument
964 desc = nbpf_desc_get(chan, len); in nbpf_prep_sg()
1005 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_memcpy() local
1021 return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, in nbpf_prep_memcpy()
1029 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_prep_slave_sg() local
1038 sg_dma_address(&slave_sg) = chan->slave_dst_addr; in nbpf_prep_slave_sg()
1039 return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, in nbpf_prep_slave_sg()
1043 sg_dma_address(&slave_sg) = chan->slave_src_addr; in nbpf_prep_slave_sg()
1044 return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, in nbpf_prep_slave_sg()
1054 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_alloc_chan_resources() local
1057 INIT_LIST_HEAD(&chan->free); in nbpf_alloc_chan_resources()
1058 INIT_LIST_HEAD(&chan->free_links); in nbpf_alloc_chan_resources()
1059 INIT_LIST_HEAD(&chan->queued); in nbpf_alloc_chan_resources()
1060 INIT_LIST_HEAD(&chan->active); in nbpf_alloc_chan_resources()
1061 INIT_LIST_HEAD(&chan->done); in nbpf_alloc_chan_resources()
1063 ret = nbpf_desc_page_alloc(chan); in nbpf_alloc_chan_resources()
1068 chan->terminal); in nbpf_alloc_chan_resources()
1070 nbpf_chan_configure(chan); in nbpf_alloc_chan_resources()
1077 struct nbpf_channel *chan = nbpf_to_chan(dchan); in nbpf_free_chan_resources() local
1082 nbpf_chan_halt(chan); in nbpf_free_chan_resources()
1083 nbpf_chan_idle(chan); in nbpf_free_chan_resources()
1085 nbpf_chan_prepare_default(chan); in nbpf_free_chan_resources()
1087 list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { in nbpf_free_chan_resources()
1105 struct nbpf_channel *chan; in nbpf_of_xlate() local
1117 chan = nbpf_to_chan(dchan); in nbpf_of_xlate()
1119 chan->terminal = dma_spec->args[0]; in nbpf_of_xlate()
1120 chan->flags = dma_spec->args[1]; in nbpf_of_xlate()
1122 nbpf_chan_prepare(chan); in nbpf_of_xlate()
1123 nbpf_chan_configure(chan); in nbpf_of_xlate()
1130 struct nbpf_channel *chan = from_tasklet(chan, t, tasklet); in nbpf_chan_tasklet() local
1134 while (!list_empty(&chan->done)) { in nbpf_chan_tasklet()
1137 spin_lock_irq(&chan->lock); in nbpf_chan_tasklet()
1139 list_for_each_entry_safe(desc, tmp, &chan->done, node) { in nbpf_chan_tasklet()
1150 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1162 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1183 spin_unlock_irq(&chan->lock); in nbpf_chan_tasklet()
1194 struct nbpf_channel *chan = dev; in nbpf_chan_irq() local
1195 bool done = nbpf_status_get(chan); in nbpf_chan_irq()
1203 nbpf_status_ack(chan); in nbpf_chan_irq()
1205 dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); in nbpf_chan_irq()
1207 spin_lock(&chan->lock); in nbpf_chan_irq()
1208 desc = chan->running; in nbpf_chan_irq()
1217 list_move_tail(&desc->node, &chan->done); in nbpf_chan_irq()
1218 chan->running = NULL; in nbpf_chan_irq()
1220 if (!list_empty(&chan->active)) { in nbpf_chan_irq()
1221 desc = list_first_entry(&chan->active, in nbpf_chan_irq()
1224 chan->running = desc; in nbpf_chan_irq()
1228 spin_unlock(&chan->lock); in nbpf_chan_irq()
1231 tasklet_schedule(&chan->tasklet); in nbpf_chan_irq()
1247 struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); in nbpf_err_irq() local
1249 nbpf_error_clear(chan); in nbpf_err_irq()
1250 nbpf_chan_idle(chan); in nbpf_err_irq()
1260 struct nbpf_channel *chan = nbpf->chan + n; in nbpf_chan_probe() local
1263 chan->nbpf = nbpf; in nbpf_chan_probe()
1264 chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; in nbpf_chan_probe()
1265 INIT_LIST_HEAD(&chan->desc_page); in nbpf_chan_probe()
1266 spin_lock_init(&chan->lock); in nbpf_chan_probe()
1267 chan->dma_chan.device = dma_dev; in nbpf_chan_probe()
1268 dma_cookie_init(&chan->dma_chan); in nbpf_chan_probe()
1269 nbpf_chan_prepare_default(chan); in nbpf_chan_probe()
1271 dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); in nbpf_chan_probe()
1273 snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); in nbpf_chan_probe()
1275 tasklet_setup(&chan->tasklet, nbpf_chan_tasklet); in nbpf_chan_probe()
1276 ret = devm_request_irq(dma_dev->dev, chan->irq, in nbpf_chan_probe()
1278 chan->name, chan); in nbpf_chan_probe()
1283 list_add_tail(&chan->dma_chan.device_node, in nbpf_chan_probe()
1324 nbpf = devm_kzalloc(dev, struct_size(nbpf, chan, num_channels), in nbpf_probe()
1368 nbpf->chan[i].irq = irqbuf[0]; in nbpf_probe()
1375 struct nbpf_channel *chan; in nbpf_probe() local
1377 for (i = 0, chan = nbpf->chan; i < num_channels; in nbpf_probe()
1378 i++, chan++) { in nbpf_probe()
1384 chan->irq = irqbuf[i]; in nbpf_probe()
1394 nbpf->chan[i].irq = irq; in nbpf_probe()
1477 struct nbpf_channel *chan = nbpf->chan + i; in nbpf_remove() local
1479 devm_free_irq(&pdev->dev, chan->irq, chan); in nbpf_remove()
1481 tasklet_kill(&chan->tasklet); in nbpf_remove()