Lines Matching refs:mdev

210 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)  in msgdma_get_descriptor()  argument
215 spin_lock_irqsave(&mdev->lock, flags); in msgdma_get_descriptor()
216 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); in msgdma_get_descriptor()
218 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_get_descriptor()
230 static void msgdma_free_descriptor(struct msgdma_device *mdev, in msgdma_free_descriptor() argument
235 mdev->desc_free_cnt++; in msgdma_free_descriptor()
236 list_add_tail(&desc->node, &mdev->free_list); in msgdma_free_descriptor()
238 mdev->desc_free_cnt++; in msgdma_free_descriptor()
239 list_move_tail(&child->node, &mdev->free_list); in msgdma_free_descriptor()
248 static void msgdma_free_desc_list(struct msgdma_device *mdev, in msgdma_free_desc_list() argument
254 msgdma_free_descriptor(mdev, desc); in msgdma_free_desc_list()
306 struct msgdma_device *mdev = to_mdev(tx->chan); in msgdma_tx_submit() local
312 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tx_submit()
315 list_add_tail(&new->node, &mdev->pending_list); in msgdma_tx_submit()
316 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tx_submit()
335 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_memcpy() local
344 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_memcpy()
345 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_memcpy()
346 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
347 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_memcpy()
350 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_memcpy()
351 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
355 new = msgdma_get_descriptor(mdev); in msgdma_prep_memcpy()
393 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_slave_sg() local
394 struct dma_slave_config *cfg = &mdev->slave_cfg; in msgdma_prep_slave_sg()
407 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
408 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_slave_sg()
409 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
410 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_slave_sg()
413 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_slave_sg()
414 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
421 new = msgdma_get_descriptor(mdev); in msgdma_prep_slave_sg()
464 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_dma_config() local
466 memcpy(&mdev->slave_cfg, config, sizeof(*config)); in msgdma_dma_config()
471 static void msgdma_reset(struct msgdma_device *mdev) in msgdma_reset() argument
477 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
478 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
480 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, in msgdma_reset()
484 dev_err(mdev->dev, "DMA channel did not reset\n"); in msgdma_reset()
487 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
491 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
493 mdev->idle = true; in msgdma_reset()
496 static void msgdma_copy_one(struct msgdma_device *mdev, in msgdma_copy_one() argument
499 void __iomem *hw_desc = mdev->desc; in msgdma_copy_one()
505 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & in msgdma_copy_one()
522 mdev->idle = false; in msgdma_copy_one()
534 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, in msgdma_copy_desc_to_fifo() argument
539 msgdma_copy_one(mdev, desc); in msgdma_copy_desc_to_fifo()
542 msgdma_copy_one(mdev, sdesc); in msgdma_copy_desc_to_fifo()
549 static void msgdma_start_transfer(struct msgdma_device *mdev) in msgdma_start_transfer() argument
553 if (!mdev->idle) in msgdma_start_transfer()
556 desc = list_first_entry_or_null(&mdev->pending_list, in msgdma_start_transfer()
561 list_splice_tail_init(&mdev->pending_list, &mdev->active_list); in msgdma_start_transfer()
562 msgdma_copy_desc_to_fifo(mdev, desc); in msgdma_start_transfer()
571 struct msgdma_device *mdev = to_mdev(chan); in msgdma_issue_pending() local
574 spin_lock_irqsave(&mdev->lock, flags); in msgdma_issue_pending()
575 msgdma_start_transfer(mdev); in msgdma_issue_pending()
576 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_issue_pending()
583 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) in msgdma_chan_desc_cleanup() argument
587 list_for_each_entry_safe(desc, next, &mdev->done_list, node) { in msgdma_chan_desc_cleanup()
594 spin_unlock(&mdev->lock); in msgdma_chan_desc_cleanup()
596 spin_lock(&mdev->lock); in msgdma_chan_desc_cleanup()
600 msgdma_free_descriptor(mdev, desc); in msgdma_chan_desc_cleanup()
608 static void msgdma_complete_descriptor(struct msgdma_device *mdev) in msgdma_complete_descriptor() argument
612 desc = list_first_entry_or_null(&mdev->active_list, in msgdma_complete_descriptor()
618 list_add_tail(&desc->node, &mdev->done_list); in msgdma_complete_descriptor()
625 static void msgdma_free_descriptors(struct msgdma_device *mdev) in msgdma_free_descriptors() argument
627 msgdma_free_desc_list(mdev, &mdev->active_list); in msgdma_free_descriptors()
628 msgdma_free_desc_list(mdev, &mdev->pending_list); in msgdma_free_descriptors()
629 msgdma_free_desc_list(mdev, &mdev->done_list); in msgdma_free_descriptors()
638 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_free_chan_resources() local
641 spin_lock_irqsave(&mdev->lock, flags); in msgdma_free_chan_resources()
642 msgdma_free_descriptors(mdev); in msgdma_free_chan_resources()
643 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_free_chan_resources()
644 kfree(mdev->sw_desq); in msgdma_free_chan_resources()
655 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_alloc_chan_resources() local
659 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); in msgdma_alloc_chan_resources()
660 if (!mdev->sw_desq) in msgdma_alloc_chan_resources()
663 mdev->idle = true; in msgdma_alloc_chan_resources()
664 mdev->desc_free_cnt = MSGDMA_DESC_NUM; in msgdma_alloc_chan_resources()
666 INIT_LIST_HEAD(&mdev->free_list); in msgdma_alloc_chan_resources()
669 desc = mdev->sw_desq + i; in msgdma_alloc_chan_resources()
670 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
672 list_add_tail(&desc->node, &mdev->free_list); in msgdma_alloc_chan_resources()
684 struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet); in msgdma_tasklet() local
690 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tasklet()
692 if (mdev->resp) { in msgdma_tasklet()
694 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); in msgdma_tasklet()
695 dev_dbg(mdev->dev, "%s (%d): response count=%d\n", in msgdma_tasklet()
708 if (mdev->resp) { in msgdma_tasklet()
709 size = ioread32(mdev->resp + in msgdma_tasklet()
711 status = ioread32(mdev->resp + in msgdma_tasklet()
715 msgdma_complete_descriptor(mdev); in msgdma_tasklet()
716 msgdma_chan_desc_cleanup(mdev); in msgdma_tasklet()
719 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tasklet()
731 struct msgdma_device *mdev = data; in msgdma_irq_handler() local
734 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
737 spin_lock(&mdev->lock); in msgdma_irq_handler()
738 mdev->idle = true; in msgdma_irq_handler()
739 msgdma_start_transfer(mdev); in msgdma_irq_handler()
740 spin_unlock(&mdev->lock); in msgdma_irq_handler()
743 tasklet_schedule(&mdev->irq_tasklet); in msgdma_irq_handler()
746 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
755 static void msgdma_dev_remove(struct msgdma_device *mdev) in msgdma_dev_remove() argument
757 if (!mdev) in msgdma_dev_remove()
760 devm_free_irq(mdev->dev, mdev->irq, mdev); in msgdma_dev_remove()
761 tasklet_kill(&mdev->irq_tasklet); in msgdma_dev_remove()
762 list_del(&mdev->dmachan.device_node); in msgdma_dev_remove()
809 struct msgdma_device *mdev; in msgdma_probe() local
814 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); in msgdma_probe()
815 if (!mdev) in msgdma_probe()
818 mdev->dev = &pdev->dev; in msgdma_probe()
821 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false); in msgdma_probe()
826 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false); in msgdma_probe()
831 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true); in msgdma_probe()
835 platform_set_drvdata(pdev, mdev); in msgdma_probe()
838 mdev->irq = platform_get_irq(pdev, 0); in msgdma_probe()
839 if (mdev->irq < 0) in msgdma_probe()
842 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, in msgdma_probe()
843 0, dev_name(&pdev->dev), mdev); in msgdma_probe()
847 tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet); in msgdma_probe()
849 dma_cookie_init(&mdev->dmachan); in msgdma_probe()
851 spin_lock_init(&mdev->lock); in msgdma_probe()
853 INIT_LIST_HEAD(&mdev->active_list); in msgdma_probe()
854 INIT_LIST_HEAD(&mdev->pending_list); in msgdma_probe()
855 INIT_LIST_HEAD(&mdev->done_list); in msgdma_probe()
856 INIT_LIST_HEAD(&mdev->free_list); in msgdma_probe()
858 dma_dev = &mdev->dmadev; in msgdma_probe()
887 mdev->dmachan.device = dma_dev; in msgdma_probe()
888 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); in msgdma_probe()
899 msgdma_reset(mdev); in msgdma_probe()
917 msgdma_dev_remove(mdev); in msgdma_probe()
930 struct msgdma_device *mdev = platform_get_drvdata(pdev); in msgdma_remove() local
934 dma_async_device_unregister(&mdev->dmadev); in msgdma_remove()
935 msgdma_dev_remove(mdev); in msgdma_remove()