Lines Matching refs:submit
20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument
22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product()
39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product()
58 async_tx_submit(chan, tx, submit); in async_sum_product()
70 async_tx_quiesce(&submit->depend_tx); in async_sum_product()
89 struct async_submit_ctl *submit) in async_mult() argument
91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult()
107 if (submit->flags & ASYNC_TX_FENCE) in async_mult()
128 async_tx_submit(chan, tx, submit); in async_mult()
141 async_tx_quiesce(&submit->depend_tx); in async_mult()
155 struct async_submit_ctl *submit) in __2data_recov_4() argument
163 enum async_tx_flags flags = submit->flags; in __2data_recov_4()
164 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_4()
165 void *cb_param = submit->cb_param; in __2data_recov_4()
166 void *scribble = submit->scribble; in __2data_recov_4()
186 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_4()
187 tx = async_sum_product(b, b_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_4()
194 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, in __2data_recov_4()
196 tx = async_xor_offs(a, a_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_4()
205 struct async_submit_ctl *submit) in __2data_recov_5() argument
213 enum async_tx_flags flags = submit->flags; in __2data_recov_5()
214 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_5()
215 void *cb_param = submit->cb_param; in __2data_recov_5()
216 void *scribble = submit->scribble; in __2data_recov_5()
247 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
248 tx = async_memcpy(dp, g, dp_off, g_off, bytes, submit); in __2data_recov_5()
249 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
251 raid6_gfexp[good], bytes, submit); in __2data_recov_5()
258 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
260 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
267 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_5()
269 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
278 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_5()
279 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_5()
286 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_5()
288 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_5()
296 struct async_submit_ctl *submit) in __2data_recov_n() argument
304 enum async_tx_flags flags = submit->flags; in __2data_recov_n()
305 dma_async_tx_callback cb_fn = submit->cb_fn; in __2data_recov_n()
306 void *cb_param = submit->cb_param; in __2data_recov_n()
307 void *scribble = submit->scribble; in __2data_recov_n()
329 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
330 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); in __2data_recov_n()
347 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
349 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
356 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in __2data_recov_n()
358 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
367 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in __2data_recov_n()
368 tx = async_sum_product(dq, dq_off, srcs, src_offs, coef, bytes, submit); in __2data_recov_n()
375 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in __2data_recov_n()
377 tx = async_xor_offs(dp, dp_off, srcs, src_offs, 2, bytes, submit); in __2data_recov_n()
395 struct async_submit_ctl *submit) in async_raid6_2data_recov() argument
397 void *scribble = submit->scribble; in async_raid6_2data_recov()
414 async_tx_quiesce(&submit->depend_tx); in async_raid6_2data_recov()
423 async_tx_sync_epilog(submit); in async_raid6_2data_recov()
445 blocks, offs, submit); in async_raid6_2data_recov()
453 blocks, offs, submit); in async_raid6_2data_recov()
456 blocks, offs, submit); in async_raid6_2data_recov()
473 struct async_submit_ctl *submit) in async_raid6_datap_recov() argument
479 enum async_tx_flags flags = submit->flags; in async_raid6_datap_recov()
480 dma_async_tx_callback cb_fn = submit->cb_fn; in async_raid6_datap_recov()
481 void *cb_param = submit->cb_param; in async_raid6_datap_recov()
482 void *scribble = submit->scribble; in async_raid6_datap_recov()
497 async_tx_quiesce(&submit->depend_tx); in async_raid6_datap_recov()
506 async_tx_sync_epilog(submit); in async_raid6_datap_recov()
546 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
548 tx = async_memcpy(p, g, p_off, g_off, bytes, submit); in async_raid6_datap_recov()
550 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
553 raid6_gfexp[good], bytes, submit); in async_raid6_datap_recov()
555 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, in async_raid6_datap_recov()
557 tx = async_gen_syndrome(blocks, offs, disks, bytes, submit); in async_raid6_datap_recov()
573 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, in async_raid6_datap_recov()
575 tx = async_xor_offs(dq, dq_off, srcs, src_offs, 2, bytes, submit); in async_raid6_datap_recov()
577 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); in async_raid6_datap_recov()
578 tx = async_mult(dq, dq_off, dq, dq_off, coef, bytes, submit); in async_raid6_datap_recov()
584 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, in async_raid6_datap_recov()
586 tx = async_xor_offs(p, p_off, srcs, src_offs, 2, bytes, submit); in async_raid6_datap_recov()