| /linux/drivers/dma/ |
| A D | mmp_pdma.c | 377 desc->async_tx.phys = pdesc; in mmp_pdma_alloc_descriptor() 487 new->async_tx.cookie = 0; in mmp_pdma_prep_memcpy() 488 async_tx_ack(&new->async_tx); in mmp_pdma_prep_memcpy() 515 return &first->async_tx; in mmp_pdma_prep_memcpy() 572 new->async_tx.cookie = 0; in mmp_pdma_prep_slave_sg() 573 async_tx_ack(&new->async_tx); in mmp_pdma_prep_slave_sg() 586 first->async_tx.flags = flags; in mmp_pdma_prep_slave_sg() 595 return &first->async_tx; in mmp_pdma_prep_slave_sg() 660 new->async_tx.cookie = 0; in mmp_pdma_prep_dma_cyclic() 661 async_tx_ack(&new->async_tx); in mmp_pdma_prep_dma_cyclic() [all …]
|
| A D | mv_xor_v2.c | 183 struct dma_async_tx_descriptor async_tx; member 314 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_v2_tx_submit() 355 if (async_tx_test_ack(&sw_desc->async_tx)) { in mv_xor_v2_prep_sw_desc() 395 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_memcpy() 424 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_memcpy() 451 sw_desc->async_tx.flags = flags; in mv_xor_v2_prep_dma_xor() 483 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_xor() 513 return &sw_desc->async_tx; in mv_xor_v2_prep_dma_interrupt() 578 if (next_pending_sw_desc->async_tx.cookie > 0) { in mv_xor_v2_tasklet() 588 &next_pending_sw_desc->async_tx, NULL); in mv_xor_v2_tasklet() [all …]
|
| A D | mv_xor.c | 196 BUG_ON(desc->async_tx.cookie < 0); in mv_desc_run_tx_complete_actions() 198 if (desc->async_tx.cookie > 0) { in mv_desc_run_tx_complete_actions() 199 cookie = desc->async_tx.cookie; in mv_desc_run_tx_complete_actions() 201 dma_descriptor_unmap(&desc->async_tx); in mv_desc_run_tx_complete_actions() 209 dma_run_dependencies(&desc->async_tx); in mv_desc_run_tx_complete_actions() 365 async_tx_ack(&iter->async_tx); in mv_chan_alloc_slot() 366 iter->async_tx.cookie = -EBUSY; in mv_chan_alloc_slot() 392 __func__, sw_desc, &sw_desc->async_tx); in mv_xor_tx_submit() 408 &old_chain_tail->async_tx.phys); in mv_xor_tx_submit() 579 sw_desc->async_tx.flags = flags; in mv_xor_prep_dma_xor() [all …]
|
| A D | fsldma.c | 429 cookie = dma_cookie_assign(&child->async_tx); in fsl_dma_tx_submit() 472 desc->async_tx.tx_submit = fsl_dma_tx_submit; in fsl_dma_alloc_descriptor() 473 desc->async_tx.phys = pdesc; in fsl_dma_alloc_descriptor() 494 if (async_tx_test_ack(&desc->async_tx)) in fsldma_clean_completed_descriptor() 548 if (!async_tx_test_ack(&desc->async_tx)) { in fsldma_clean_running_descriptor() 620 set_cdar(chan, desc->async_tx.phys); in fsl_chan_xfer_ld_queue() 659 if (desc->async_tx.phys == curr_phys) { in fsldma_cleanup_descriptors() 796 new->async_tx.cookie = 0; in fsl_dma_prep_memcpy() 797 async_tx_ack(&new->async_tx); in fsl_dma_prep_memcpy() 809 new->async_tx.cookie = -EBUSY; in fsl_dma_prep_memcpy() [all …]
|
| A D | fsl_raid.c | 138 dma_cookie_complete(&desc->async_tx); in fsl_re_desc_done() 139 dma_descriptor_unmap(&desc->async_tx); in fsl_re_desc_done() 140 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); in fsl_re_desc_done() 150 if (async_tx_test_ack(&desc->async_tx)) in fsl_re_cleanup_descs() 255 desc->async_tx.tx_submit = fsl_re_tx_submit; in fsl_re_init_desc() 256 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); in fsl_re_init_desc() 288 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc() 305 desc->async_tx.flags = flags; in fsl_re_chan_alloc_desc() 383 return &desc->async_tx; in fsl_re_prep_dma_genq() 517 return &desc->async_tx; in fsl_re_prep_dma_pq() [all …]
|
| A D | altera-msgdma.c | 161 struct dma_async_tx_descriptor async_tx; member 202 #define tx_to_desc(tx) container_of(tx, struct msgdma_sw_desc, async_tx) 371 async_tx_ack(&first->async_tx); in msgdma_prep_memcpy() 372 first->async_tx.flags = flags; in msgdma_prep_memcpy() 374 return &first->async_tx; in msgdma_prep_memcpy() 456 first->async_tx.flags = flags; in msgdma_prep_slave_sg() 458 return &first->async_tx; in msgdma_prep_slave_sg() 593 dmaengine_desc_get_callback(&desc->async_tx, &cb); in msgdma_chan_desc_cleanup() 620 dma_cookie_complete(&desc->async_tx); in msgdma_complete_descriptor() 673 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources() [all …]
|
| A D | nbpfaxi.c | 151 struct dma_async_tx_descriptor async_tx; member 647 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 654 if (desc->async_tx.cookie == cookie) { in nbpf_tx_status() 723 desc->async_tx.tx_submit = nbpf_tx_submit; in nbpf_desc_page_alloc() 857 __func__, desc, desc->async_tx.cookie); in nbpf_chan_idle() 955 desc->async_tx.flags = flags; in nbpf_prep_sg() 956 desc->async_tx.cookie = -EBUSY; in nbpf_prep_sg() 985 return &desc->async_tx; in nbpf_prep_sg() 1131 } else if (async_tx_test_ack(&desc->async_tx)) { in nbpf_chan_tasklet() 1153 dma_cookie_complete(&desc->async_tx); in nbpf_chan_tasklet() [all …]
|
| A D | fsldma.h | 104 struct dma_async_tx_descriptor async_tx; member 192 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
|
| A D | mv_xor.h | 147 struct dma_async_tx_descriptor async_tx; member
|
| A D | fsl_raid.h | 294 struct dma_async_tx_descriptor async_tx; member
|
| /linux/drivers/dma/sh/ |
| A D | shdma-base.c | 92 chunk->async_tx.cookie > 0 || in shdma_tx_submit() 98 chunk->async_tx.callback = callback; in shdma_tx_submit() 102 chunk->async_tx.callback = NULL; in shdma_tx_submit() 389 async_tx_ack(&desc->async_tx); in __ld_cleanup() 524 new->async_tx.cookie = -EBUSY; in shdma_add_desc() 528 new->async_tx.cookie = -EINVAL; in shdma_add_desc() 534 new->async_tx.cookie); in shdma_add_desc() 537 new->async_tx.flags = flags; in shdma_add_desc() 620 new->async_tx.cookie = -ENOSPC; in shdma_prep_sg() 627 return &first->async_tx; in shdma_prep_sg() [all …]
|
| A D | rcar-dmac.c | 73 struct dma_async_tx_descriptor async_tx; member 440 else if (desc->async_tx.callback) in rcar_dmac_chan_start_xfer() 599 if (async_tx_test_ack(&desc->async_tx)) { in rcar_dmac_desc_recycle_acked() 950 desc->async_tx.flags = dma_flags; in rcar_dmac_chan_prep_sg() 951 desc->async_tx.cookie = -EBUSY; in rcar_dmac_chan_prep_sg() 1047 return &desc->async_tx; in rcar_dmac_chan_prep_sg() 1351 if (cookie != desc->async_tx.cookie) { in rcar_dmac_chan_get_residue() 1353 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue() 1357 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue() 1361 if (cookie == desc->async_tx.cookie) in rcar_dmac_chan_get_residue() [all …]
|
| A D | shdma.h | 57 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
| /linux/Documentation/crypto/ |
| A D | async-tx-api.rst | 31 The async_tx API provides methods for describing a chain of asynchronous 106 async_tx call will implicitly set the acknowledged state. 153 #include <linux/async_tx.h> 191 See include/linux/async_tx.h for more information on the flags. See the 202 accommodate assumptions made by applications using the async_tx API: 263 include/linux/async_tx.h: 264 core header file for the async_tx api 265 crypto/async_tx/async_tx.c: 266 async_tx interface to dmaengine and common code 267 crypto/async_tx/async_memcpy.c: [all …]
|
| /linux/drivers/dma/ppc4xx/ |
| A D | adma.c | 1467 if (desc->async_tx.cookie > 0) { in ppc440spe_adma_run_tx_complete_actions() 1468 cookie = desc->async_tx.cookie; in ppc440spe_adma_run_tx_complete_actions() 1469 desc->async_tx.cookie = 0; in ppc440spe_adma_run_tx_complete_actions() 1563 prefetch(&_iter->async_tx); in __ppc440spe_adma_slot_cleanup() 1709 prefetch(&_iter->async_tx); in ppc440spe_adma_alloc_slots() 1732 iter->async_tx.cookie = 0; in ppc440spe_adma_alloc_slots() 1965 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_interrupt() 2006 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_memcpy() 2051 sw_desc->async_tx.flags = flags; in ppc440spe_adma_prep_dma_xor() 2156 sw_desc->async_tx.flags = flags; in ppc440spe_dma01_prep_mult() [all …]
|
| A D | adma.h | 20 container_of(tx, struct ppc440spe_adma_desc_slot, async_tx) 147 struct dma_async_tx_descriptor async_tx; member
|
| /linux/drivers/dma/xilinx/ |
| A D | xilinx_dma.c | 1501 head_desc->async_tx.phys); in xilinx_cdma_start_transfer() 1569 head_desc->async_tx.phys); in xilinx_dma_start_transfer() 1653 head_desc->async_tx.phys); in xilinx_mcdma_start_transfer() 2069 async_tx_ack(&desc->async_tx); in xilinx_vdma_dma_prep_interleaved() 2107 desc->async_tx.phys = segment->phys; in xilinx_vdma_dma_prep_interleaved() 2109 return &desc->async_tx; in xilinx_vdma_dma_prep_interleaved() 2162 desc->async_tx.phys = segment->phys; in xilinx_cdma_prep_memcpy() 2165 return &desc->async_tx; in xilinx_cdma_prep_memcpy() 2267 return &desc->async_tx; in xilinx_dma_prep_slave_sg() 2374 return &desc->async_tx; in xilinx_dma_prep_dma_cyclic() [all …]
|
| A D | zynqmp_dma.c | 146 async_tx) 186 struct dma_async_tx_descriptor async_tx; member 497 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); in zynqmp_dma_alloc_chan_resources() 498 desc->async_tx.tx_submit = zynqmp_dma_tx_submit; in zynqmp_dma_alloc_chan_resources() 625 dmaengine_desc_get_callback(&desc->async_tx, &cb); in zynqmp_dma_chan_desc_cleanup() 652 dma_cookie_complete(&desc->async_tx); in zynqmp_dma_complete_descriptor() 876 async_tx_ack(&first->async_tx); in zynqmp_dma_prep_memcpy() 877 first->async_tx.flags = (enum dma_ctrl_flags)flags; in zynqmp_dma_prep_memcpy() 878 return &first->async_tx; in zynqmp_dma_prep_memcpy()
|
| /linux/crypto/async_tx/ |
| A D | Makefile | 2 obj-$(CONFIG_ASYNC_CORE) += async_tx.o
|
| /linux/include/linux/platform_data/ |
| A D | dma-iop32x.h | 92 struct dma_async_tx_descriptor async_tx; member
|
| /linux/drivers/dma/sf-pdma/ |
| A D | sf-pdma.h | 83 struct dma_async_tx_descriptor *async_tx; member
|
| A D | sf-pdma.c | 103 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); in sf_pdma_prep_dma_memcpy() 109 return desc->async_tx; in sf_pdma_prep_dma_memcpy() 332 dmaengine_desc_get_callback_invoke(desc->async_tx, NULL); in sf_pdma_errbh_tasklet()
|
| /linux/include/linux/ |
| A D | shdma-base.h | 48 struct dma_async_tx_descriptor async_tx; member
|
| /linux/Documentation/driver-api/dmaengine/ |
| A D | client.rst | 7 .. note:: For DMA Engine usage in async_tx please see: 148 Although the async_tx API specifies that completion callback
|
| /linux/crypto/ |
| A D | Makefile | 201 obj-$(CONFIG_ASYNC_CORE) += async_tx/
|