Lines Matching refs:edesc

124 	struct ahash_edesc *edesc;  member
552 struct ahash_edesc *edesc, in ahash_unmap() argument
557 if (edesc->src_nents) in ahash_unmap()
558 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
560 if (edesc->sec4_sg_bytes) in ahash_unmap()
561 dma_unmap_single(dev, edesc->sec4_sg_dma, in ahash_unmap()
562 edesc->sec4_sg_bytes, DMA_TO_DEVICE); in ahash_unmap()
572 struct ahash_edesc *edesc, in ahash_unmap_ctx() argument
581 ahash_unmap(dev, edesc, req, dst_len); in ahash_unmap_ctx()
589 struct ahash_edesc *edesc; in ahash_done_cpy() local
599 edesc = state->edesc; in ahash_done_cpy()
600 has_bklog = edesc->bklog; in ahash_done_cpy()
605 ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); in ahash_done_cpy()
607 kfree(edesc); in ahash_done_cpy()
640 struct ahash_edesc *edesc; in ahash_done_switch() local
650 edesc = state->edesc; in ahash_done_switch()
651 has_bklog = edesc->bklog; in ahash_done_switch()
655 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); in ahash_done_switch()
656 kfree(edesc); in ahash_done_switch()
709 struct ahash_edesc *edesc; in ahash_edesc_alloc() local
712 edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); in ahash_edesc_alloc()
713 if (!edesc) in ahash_edesc_alloc()
716 state->edesc = edesc; in ahash_edesc_alloc()
718 init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), in ahash_edesc_alloc()
721 return edesc; in ahash_edesc_alloc()
725 struct ahash_edesc *edesc, in ahash_edesc_add_src() argument
734 struct sec4_sg_entry *sg = edesc->sec4_sg; in ahash_edesc_add_src()
746 edesc->sec4_sg_bytes = sgsize; in ahash_edesc_add_src()
747 edesc->sec4_sg_dma = src_dma; in ahash_edesc_add_src()
754 append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, in ahash_edesc_add_src()
766 u32 *desc = state->edesc->hw_desc; in ahash_do_one_req()
769 state->edesc->bklog = true; in ahash_do_one_req()
777 ahash_unmap(jrdev, state->edesc, req, 0); in ahash_do_one_req()
778 kfree(state->edesc); in ahash_do_one_req()
794 struct ahash_edesc *edesc = state->edesc; in ahash_enqueue_req() local
795 u32 *desc = edesc->hw_desc; in ahash_enqueue_req()
812 ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); in ahash_enqueue_req()
813 kfree(edesc); in ahash_enqueue_req()
833 struct ahash_edesc *edesc; in ahash_update_ctx() local
879 edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, in ahash_update_ctx()
881 if (!edesc) { in ahash_update_ctx()
886 edesc->src_nents = src_nents; in ahash_update_ctx()
887 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_ctx()
890 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_update_ctx()
894 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_update_ctx()
900 edesc->sec4_sg + sec4_sg_src_index, in ahash_update_ctx()
903 sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - in ahash_update_ctx()
906 desc = edesc->hw_desc; in ahash_update_ctx()
908 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_ctx()
911 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_ctx()
917 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + in ahash_update_ctx()
940 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); in ahash_update_ctx()
941 kfree(edesc); in ahash_update_ctx()
955 struct ahash_edesc *edesc; in ahash_final_ctx() local
962 edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, in ahash_final_ctx()
964 if (!edesc) in ahash_final_ctx()
967 desc = edesc->hw_desc; in ahash_final_ctx()
969 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_final_ctx()
972 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_final_ctx()
976 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_final_ctx()
980 sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); in ahash_final_ctx()
982 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_final_ctx()
984 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_final_ctx()
990 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, in ahash_final_ctx()
1001 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_final_ctx()
1002 kfree(edesc); in ahash_final_ctx()
1017 struct ahash_edesc *edesc; in ahash_finup_ctx() local
1040 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_ctx()
1042 if (!edesc) { in ahash_finup_ctx()
1047 desc = edesc->hw_desc; in ahash_finup_ctx()
1049 edesc->src_nents = src_nents; in ahash_finup_ctx()
1052 edesc->sec4_sg, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1056 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); in ahash_finup_ctx()
1060 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, in ahash_finup_ctx()
1075 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
1076 kfree(edesc); in ahash_finup_ctx()
1089 struct ahash_edesc *edesc; in ahash_digest() local
1112 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, in ahash_digest()
1114 if (!edesc) { in ahash_digest()
1119 edesc->src_nents = src_nents; in ahash_digest()
1121 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_digest()
1124 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1125 kfree(edesc); in ahash_digest()
1129 desc = edesc->hw_desc; in ahash_digest()
1133 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_digest()
1134 kfree(edesc); in ahash_digest()
1157 struct ahash_edesc *edesc; in ahash_final_no_ctx() local
1161 edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, in ahash_final_no_ctx()
1163 if (!edesc) in ahash_final_no_ctx()
1166 desc = edesc->hw_desc; in ahash_final_no_ctx()
1190 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_final_no_ctx()
1191 kfree(edesc); in ahash_final_no_ctx()
1208 struct ahash_edesc *edesc; in ahash_update_no_ctx() local
1254 edesc = ahash_edesc_alloc(req, pad_nents, in ahash_update_no_ctx()
1257 if (!edesc) { in ahash_update_no_ctx()
1262 edesc->src_nents = src_nents; in ahash_update_no_ctx()
1263 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_update_no_ctx()
1265 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_update_no_ctx()
1269 sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); in ahash_update_no_ctx()
1271 desc = edesc->hw_desc; in ahash_update_no_ctx()
1273 edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, in ahash_update_no_ctx()
1276 if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { in ahash_update_no_ctx()
1282 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); in ahash_update_no_ctx()
1311 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_no_ctx()
1312 kfree(edesc); in ahash_update_no_ctx()
1327 struct ahash_edesc *edesc; in ahash_finup_no_ctx() local
1352 edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, in ahash_finup_no_ctx()
1354 if (!edesc) { in ahash_finup_no_ctx()
1359 desc = edesc->hw_desc; in ahash_finup_no_ctx()
1361 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
1362 edesc->sec4_sg_bytes = sec4_sg_bytes; in ahash_finup_no_ctx()
1364 ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); in ahash_finup_no_ctx()
1368 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, in ahash_finup_no_ctx()
1386 ahash_unmap(jrdev, edesc, req, digestsize); in ahash_finup_no_ctx()
1387 kfree(edesc); in ahash_finup_no_ctx()
1406 struct ahash_edesc *edesc; in ahash_update_first() local
1446 edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? in ahash_update_first()
1450 if (!edesc) { in ahash_update_first()
1455 edesc->src_nents = src_nents; in ahash_update_first()
1457 ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, in ahash_update_first()
1462 desc = edesc->hw_desc; in ahash_update_first()
1494 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); in ahash_update_first()
1495 kfree(edesc); in ahash_update_first()