Lines Matching refs:lfs

133 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs)  in cleanup_tasklet_work()  argument
137 for (i = 0; i < lfs->lfs_num; i++) { in cleanup_tasklet_work()
138 if (!lfs->lf[i].wqe) in cleanup_tasklet_work()
141 tasklet_kill(&lfs->lf[i].wqe->work); in cleanup_tasklet_work()
142 kfree(lfs->lf[i].wqe); in cleanup_tasklet_work()
143 lfs->lf[i].wqe = NULL; in cleanup_tasklet_work()
147 static int init_tasklet_work(struct otx2_cptlfs_info *lfs) in init_tasklet_work() argument
152 for (i = 0; i < lfs->lfs_num; i++) { in init_tasklet_work()
160 wqe->lfs = lfs; in init_tasklet_work()
162 lfs->lf[i].wqe = wqe; in init_tasklet_work()
167 cleanup_tasklet_work(lfs); in init_tasklet_work()
171 static void free_pending_queues(struct otx2_cptlfs_info *lfs) in free_pending_queues() argument
175 for (i = 0; i < lfs->lfs_num; i++) { in free_pending_queues()
176 kfree(lfs->lf[i].pqueue.head); in free_pending_queues()
177 lfs->lf[i].pqueue.head = NULL; in free_pending_queues()
181 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs) in alloc_pending_queues() argument
185 if (!lfs->lfs_num) in alloc_pending_queues()
188 for (i = 0; i < lfs->lfs_num; i++) { in alloc_pending_queues()
189 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS; in alloc_pending_queues()
190 size = lfs->lf[i].pqueue.qlen * in alloc_pending_queues()
193 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL); in alloc_pending_queues()
194 if (!lfs->lf[i].pqueue.head) { in alloc_pending_queues()
200 spin_lock_init(&lfs->lf[i].pqueue.lock); in alloc_pending_queues()
205 free_pending_queues(lfs); in alloc_pending_queues()
209 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs) in lf_sw_cleanup() argument
211 cleanup_tasklet_work(lfs); in lf_sw_cleanup()
212 free_pending_queues(lfs); in lf_sw_cleanup()
215 static int lf_sw_init(struct otx2_cptlfs_info *lfs) in lf_sw_init() argument
219 ret = alloc_pending_queues(lfs); in lf_sw_init()
221 dev_err(&lfs->pdev->dev, in lf_sw_init()
225 ret = init_tasklet_work(lfs); in lf_sw_init()
227 dev_err(&lfs->pdev->dev, in lf_sw_init()
234 free_pending_queues(lfs); in lf_sw_init()
238 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs) in cptvf_lf_shutdown() argument
240 atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET); in cptvf_lf_shutdown()
243 otx2_cptlf_free_irqs_affinity(lfs); in cptvf_lf_shutdown()
245 otx2_cptlf_disable_iqueues(lfs); in cptvf_lf_shutdown()
247 otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE); in cptvf_lf_shutdown()
249 otx2_cptlf_unregister_interrupts(lfs); in cptvf_lf_shutdown()
251 lf_sw_cleanup(lfs); in cptvf_lf_shutdown()
253 otx2_cpt_detach_rsrcs_msg(lfs); in cptvf_lf_shutdown()
258 struct otx2_cptlfs_info *lfs = &cptvf->lfs; in cptvf_lf_init() local
264 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP; in cptvf_lf_init()
269 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { in cptvf_lf_init()
274 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num; in cptvf_lf_init()
280 lfs->reg_base = cptvf->reg_base; in cptvf_lf_init()
281 lfs->pdev = cptvf->pdev; in cptvf_lf_init()
282 lfs->mbox = &cptvf->pfvf_mbox; in cptvf_lf_init()
284 lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : in cptvf_lf_init()
286 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, in cptvf_lf_init()
292 ret = otx2_cpt_msix_offset_msg(lfs); in cptvf_lf_init()
297 ret = lf_sw_init(lfs); in cptvf_lf_init()
302 ret = otx2_cptlf_register_interrupts(lfs); in cptvf_lf_init()
307 ret = otx2_cptlf_set_irqs_affinity(lfs); in cptvf_lf_init()
311 atomic_set(&lfs->state, OTX2_CPTLF_STARTED); in cptvf_lf_init()
313 ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1); in cptvf_lf_init()
315 dev_err(&lfs->pdev->dev, "algorithms registration failed\n"); in cptvf_lf_init()
321 otx2_cptlf_free_irqs_affinity(lfs); in cptvf_lf_init()
323 otx2_cptlf_unregister_interrupts(lfs); in cptvf_lf_init()
325 lf_sw_cleanup(lfs); in cptvf_lf_init()
327 otx2_cptlf_shutdown(lfs); in cptvf_lf_init()
408 cptvf_lf_shutdown(&cptvf->lfs); in otx2_cptvf_remove()