Lines Matching refs:qp
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument
105 qp->resp.res_head = 0; in alloc_rd_atomic_resources()
106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources()
107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources()
109 if (!qp->resp.resources) in alloc_rd_atomic_resources()
115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument
117 if (qp->resp.resources) { in free_rd_atomic_resources()
120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources()
121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources()
125 kfree(qp->resp.resources); in free_rd_atomic_resources()
126 qp->resp.resources = NULL; in free_rd_atomic_resources()
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp) in cleanup_rd_atomic_resources() argument
140 if (qp->resp.resources) { in cleanup_rd_atomic_resources()
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in cleanup_rd_atomic_resources()
142 res = &qp->resp.resources[i]; in cleanup_rd_atomic_resources()
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_misc() argument
154 qp->sq_sig_type = init->sq_sig_type; in rxe_qp_init_misc()
155 qp->attr.path_mtu = 1; in rxe_qp_init_misc()
156 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); in rxe_qp_init_misc()
158 qpn = qp->elem.index; in rxe_qp_init_misc()
163 qp->ibqp.qp_num = 1; in rxe_qp_init_misc()
165 qp->attr.port_num = init->port_num; in rxe_qp_init_misc()
169 qp->ibqp.qp_num = qpn; in rxe_qp_init_misc()
173 spin_lock_init(&qp->state_lock); in rxe_qp_init_misc()
175 spin_lock_init(&qp->sq.sq_lock); in rxe_qp_init_misc()
176 spin_lock_init(&qp->rq.producer_lock); in rxe_qp_init_misc()
177 spin_lock_init(&qp->rq.consumer_lock); in rxe_qp_init_misc()
179 atomic_set(&qp->ssn, 0); in rxe_qp_init_misc()
180 atomic_set(&qp->skb_out, 0); in rxe_qp_init_misc()
183 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_req() argument
191 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk); in rxe_qp_init_req()
194 qp->sk->sk->sk_user_data = qp; in rxe_qp_init_req()
203 qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); in rxe_qp_init_req()
204 qp->sq.max_wr = init->cap.max_send_wr; in rxe_qp_init_req()
209 qp->sq.max_sge = init->cap.max_send_sge = in rxe_qp_init_req()
211 qp->sq.max_inline = init->cap.max_inline_data = wqe_size; in rxe_qp_init_req()
215 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, in rxe_qp_init_req()
217 if (!qp->sq.queue) in rxe_qp_init_req()
221 qp->sq.queue->buf, qp->sq.queue->buf_size, in rxe_qp_init_req()
222 &qp->sq.queue->ip); in rxe_qp_init_req()
225 vfree(qp->sq.queue->buf); in rxe_qp_init_req()
226 kfree(qp->sq.queue); in rxe_qp_init_req()
227 qp->sq.queue = NULL; in rxe_qp_init_req()
231 qp->req.wqe_index = queue_get_producer(qp->sq.queue, in rxe_qp_init_req()
234 qp->req.state = QP_STATE_RESET; in rxe_qp_init_req()
235 qp->comp.state = QP_STATE_RESET; in rxe_qp_init_req()
236 qp->req.opcode = -1; in rxe_qp_init_req()
237 qp->comp.opcode = -1; in rxe_qp_init_req()
239 skb_queue_head_init(&qp->req_pkts); in rxe_qp_init_req()
241 rxe_init_task(&qp->req.task, qp, rxe_requester); in rxe_qp_init_req()
242 rxe_init_task(&qp->comp.task, qp, rxe_completer); in rxe_qp_init_req()
244 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */ in rxe_qp_init_req()
246 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0); in rxe_qp_init_req()
247 timer_setup(&qp->retrans_timer, retransmit_timer, 0); in rxe_qp_init_req()
252 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_init_resp() argument
261 if (!qp->srq) { in rxe_qp_init_resp()
262 qp->rq.max_wr = init->cap.max_recv_wr; in rxe_qp_init_resp()
263 qp->rq.max_sge = init->cap.max_recv_sge; in rxe_qp_init_resp()
265 wqe_size = rcv_wqe_size(qp->rq.max_sge); in rxe_qp_init_resp()
268 qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, in rxe_qp_init_resp()
270 if (!qp->rq.queue) in rxe_qp_init_resp()
274 qp->rq.queue->buf, qp->rq.queue->buf_size, in rxe_qp_init_resp()
275 &qp->rq.queue->ip); in rxe_qp_init_resp()
277 vfree(qp->rq.queue->buf); in rxe_qp_init_resp()
278 kfree(qp->rq.queue); in rxe_qp_init_resp()
279 qp->rq.queue = NULL; in rxe_qp_init_resp()
284 skb_queue_head_init(&qp->resp_pkts); in rxe_qp_init_resp()
286 rxe_init_task(&qp->resp.task, qp, rxe_responder); in rxe_qp_init_resp()
288 qp->resp.opcode = OPCODE_NONE; in rxe_qp_init_resp()
289 qp->resp.msn = 0; in rxe_qp_init_resp()
290 qp->resp.state = QP_STATE_RESET; in rxe_qp_init_resp()
296 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, in rxe_qp_from_init() argument
313 qp->pd = pd; in rxe_qp_from_init()
314 qp->rcq = rcq; in rxe_qp_from_init()
315 qp->scq = scq; in rxe_qp_from_init()
316 qp->srq = srq; in rxe_qp_from_init()
321 rxe_qp_init_misc(rxe, qp, init); in rxe_qp_from_init()
323 err = rxe_qp_init_req(rxe, qp, init, udata, uresp); in rxe_qp_from_init()
327 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp); in rxe_qp_from_init()
331 qp->attr.qp_state = IB_QPS_RESET; in rxe_qp_from_init()
332 qp->valid = 1; in rxe_qp_from_init()
337 rxe_queue_cleanup(qp->sq.queue); in rxe_qp_from_init()
338 qp->sq.queue = NULL; in rxe_qp_from_init()
343 qp->pd = NULL; in rxe_qp_from_init()
344 qp->rcq = NULL; in rxe_qp_from_init()
345 qp->scq = NULL; in rxe_qp_from_init()
346 qp->srq = NULL; in rxe_qp_from_init()
358 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) in rxe_qp_to_init() argument
360 init->event_handler = qp->ibqp.event_handler; in rxe_qp_to_init()
361 init->qp_context = qp->ibqp.qp_context; in rxe_qp_to_init()
362 init->send_cq = qp->ibqp.send_cq; in rxe_qp_to_init()
363 init->recv_cq = qp->ibqp.recv_cq; in rxe_qp_to_init()
364 init->srq = qp->ibqp.srq; in rxe_qp_to_init()
366 init->cap.max_send_wr = qp->sq.max_wr; in rxe_qp_to_init()
367 init->cap.max_send_sge = qp->sq.max_sge; in rxe_qp_to_init()
368 init->cap.max_inline_data = qp->sq.max_inline; in rxe_qp_to_init()
370 if (!qp->srq) { in rxe_qp_to_init()
371 init->cap.max_recv_wr = qp->rq.max_wr; in rxe_qp_to_init()
372 init->cap.max_recv_sge = qp->rq.max_sge; in rxe_qp_to_init()
375 init->sq_sig_type = qp->sq_sig_type; in rxe_qp_to_init()
377 init->qp_type = qp->ibqp.qp_type; in rxe_qp_to_init()
386 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, in rxe_qp_chk_attr() argument
390 attr->cur_qp_state : qp->attr.qp_state; in rxe_qp_chk_attr()
394 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) { in rxe_qp_chk_attr()
395 rxe_dbg_qp(qp, "invalid mask or state\n"); in rxe_qp_chk_attr()
401 if (qp->req.state == QP_STATE_DRAIN && in rxe_qp_chk_attr()
409 rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num); in rxe_qp_chk_attr()
414 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq)) in rxe_qp_chk_attr()
417 if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr)) in rxe_qp_chk_attr()
421 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr)) in rxe_qp_chk_attr()
424 rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num); in rxe_qp_chk_attr()
428 rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n", in rxe_qp_chk_attr()
441 rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n", in rxe_qp_chk_attr()
450 rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n", in rxe_qp_chk_attr()
459 rxe_dbg_qp(qp, "invalid timeout %d > 31\n", in rxe_qp_chk_attr()
472 static void rxe_qp_reset(struct rxe_qp *qp) in rxe_qp_reset() argument
475 rxe_disable_task(&qp->resp.task); in rxe_qp_reset()
478 if (qp->sq.queue) { in rxe_qp_reset()
479 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_reset()
480 rxe_disable_task(&qp->comp.task); in rxe_qp_reset()
481 rxe_disable_task(&qp->req.task); in rxe_qp_reset()
485 qp->req.state = QP_STATE_RESET; in rxe_qp_reset()
486 qp->comp.state = QP_STATE_RESET; in rxe_qp_reset()
487 qp->resp.state = QP_STATE_RESET; in rxe_qp_reset()
492 __rxe_do_task(&qp->resp.task); in rxe_qp_reset()
494 if (qp->sq.queue) { in rxe_qp_reset()
495 __rxe_do_task(&qp->comp.task); in rxe_qp_reset()
496 __rxe_do_task(&qp->req.task); in rxe_qp_reset()
497 rxe_queue_reset(qp->sq.queue); in rxe_qp_reset()
501 atomic_set(&qp->ssn, 0); in rxe_qp_reset()
502 qp->req.opcode = -1; in rxe_qp_reset()
503 qp->req.need_retry = 0; in rxe_qp_reset()
504 qp->req.wait_for_rnr_timer = 0; in rxe_qp_reset()
505 qp->req.noack_pkts = 0; in rxe_qp_reset()
506 qp->resp.msn = 0; in rxe_qp_reset()
507 qp->resp.opcode = -1; in rxe_qp_reset()
508 qp->resp.drop_msg = 0; in rxe_qp_reset()
509 qp->resp.goto_error = 0; in rxe_qp_reset()
510 qp->resp.sent_psn_nak = 0; in rxe_qp_reset()
512 if (qp->resp.mr) { in rxe_qp_reset()
513 rxe_put(qp->resp.mr); in rxe_qp_reset()
514 qp->resp.mr = NULL; in rxe_qp_reset()
517 cleanup_rd_atomic_resources(qp); in rxe_qp_reset()
520 rxe_enable_task(&qp->resp.task); in rxe_qp_reset()
522 if (qp->sq.queue) { in rxe_qp_reset()
523 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_reset()
524 rxe_enable_task(&qp->comp.task); in rxe_qp_reset()
526 rxe_enable_task(&qp->req.task); in rxe_qp_reset()
531 static void rxe_qp_drain(struct rxe_qp *qp) in rxe_qp_drain() argument
533 if (qp->sq.queue) { in rxe_qp_drain()
534 if (qp->req.state != QP_STATE_DRAINED) { in rxe_qp_drain()
535 qp->req.state = QP_STATE_DRAIN; in rxe_qp_drain()
536 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_drain()
537 rxe_sched_task(&qp->comp.task); in rxe_qp_drain()
539 __rxe_do_task(&qp->comp.task); in rxe_qp_drain()
540 rxe_sched_task(&qp->req.task); in rxe_qp_drain()
546 void rxe_qp_error(struct rxe_qp *qp) in rxe_qp_error() argument
548 qp->req.state = QP_STATE_ERROR; in rxe_qp_error()
549 qp->resp.state = QP_STATE_ERROR; in rxe_qp_error()
550 qp->comp.state = QP_STATE_ERROR; in rxe_qp_error()
551 qp->attr.qp_state = IB_QPS_ERR; in rxe_qp_error()
554 rxe_sched_task(&qp->resp.task); in rxe_qp_error()
556 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_error()
557 rxe_sched_task(&qp->comp.task); in rxe_qp_error()
559 __rxe_do_task(&qp->comp.task); in rxe_qp_error()
560 rxe_sched_task(&qp->req.task); in rxe_qp_error()
564 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, in rxe_qp_from_attr() argument
573 qp->attr.max_rd_atomic = max_rd_atomic; in rxe_qp_from_attr()
574 atomic_set(&qp->req.rd_atomic, max_rd_atomic); in rxe_qp_from_attr()
581 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic; in rxe_qp_from_attr()
583 free_rd_atomic_resources(qp); in rxe_qp_from_attr()
585 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic); in rxe_qp_from_attr()
591 qp->attr.cur_qp_state = attr->qp_state; in rxe_qp_from_attr()
594 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify; in rxe_qp_from_attr()
597 qp->attr.qp_access_flags = attr->qp_access_flags; in rxe_qp_from_attr()
600 qp->attr.pkey_index = attr->pkey_index; in rxe_qp_from_attr()
603 qp->attr.port_num = attr->port_num; in rxe_qp_from_attr()
606 qp->attr.qkey = attr->qkey; in rxe_qp_from_attr()
609 rxe_init_av(&attr->ah_attr, &qp->pri_av); in rxe_qp_from_attr()
612 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); in rxe_qp_from_attr()
613 qp->attr.alt_port_num = attr->alt_port_num; in rxe_qp_from_attr()
614 qp->attr.alt_pkey_index = attr->alt_pkey_index; in rxe_qp_from_attr()
615 qp->attr.alt_timeout = attr->alt_timeout; in rxe_qp_from_attr()
619 qp->attr.path_mtu = attr->path_mtu; in rxe_qp_from_attr()
620 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu); in rxe_qp_from_attr()
624 qp->attr.timeout = attr->timeout; in rxe_qp_from_attr()
626 qp->qp_timeout_jiffies = 0; in rxe_qp_from_attr()
631 qp->qp_timeout_jiffies = j ? j : 1; in rxe_qp_from_attr()
636 qp->attr.retry_cnt = attr->retry_cnt; in rxe_qp_from_attr()
637 qp->comp.retry_cnt = attr->retry_cnt; in rxe_qp_from_attr()
638 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt); in rxe_qp_from_attr()
642 qp->attr.rnr_retry = attr->rnr_retry; in rxe_qp_from_attr()
643 qp->comp.rnr_retry = attr->rnr_retry; in rxe_qp_from_attr()
644 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry); in rxe_qp_from_attr()
648 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK); in rxe_qp_from_attr()
649 qp->resp.psn = qp->attr.rq_psn; in rxe_qp_from_attr()
650 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn); in rxe_qp_from_attr()
654 qp->attr.min_rnr_timer = attr->min_rnr_timer; in rxe_qp_from_attr()
655 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n", in rxe_qp_from_attr()
660 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK); in rxe_qp_from_attr()
661 qp->req.psn = qp->attr.sq_psn; in rxe_qp_from_attr()
662 qp->comp.psn = qp->attr.sq_psn; in rxe_qp_from_attr()
663 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn); in rxe_qp_from_attr()
667 qp->attr.path_mig_state = attr->path_mig_state; in rxe_qp_from_attr()
670 qp->attr.dest_qp_num = attr->dest_qp_num; in rxe_qp_from_attr()
673 qp->attr.qp_state = attr->qp_state; in rxe_qp_from_attr()
677 rxe_dbg_qp(qp, "state -> RESET\n"); in rxe_qp_from_attr()
678 rxe_qp_reset(qp); in rxe_qp_from_attr()
682 rxe_dbg_qp(qp, "state -> INIT\n"); in rxe_qp_from_attr()
683 qp->req.state = QP_STATE_INIT; in rxe_qp_from_attr()
684 qp->resp.state = QP_STATE_INIT; in rxe_qp_from_attr()
685 qp->comp.state = QP_STATE_INIT; in rxe_qp_from_attr()
689 rxe_dbg_qp(qp, "state -> RTR\n"); in rxe_qp_from_attr()
690 qp->resp.state = QP_STATE_READY; in rxe_qp_from_attr()
694 rxe_dbg_qp(qp, "state -> RTS\n"); in rxe_qp_from_attr()
695 qp->req.state = QP_STATE_READY; in rxe_qp_from_attr()
696 qp->comp.state = QP_STATE_READY; in rxe_qp_from_attr()
700 rxe_dbg_qp(qp, "state -> SQD\n"); in rxe_qp_from_attr()
701 rxe_qp_drain(qp); in rxe_qp_from_attr()
705 rxe_dbg_qp(qp, "state -> SQE !!?\n"); in rxe_qp_from_attr()
710 rxe_dbg_qp(qp, "state -> ERR\n"); in rxe_qp_from_attr()
711 rxe_qp_error(qp); in rxe_qp_from_attr()
720 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) in rxe_qp_to_attr() argument
722 *attr = qp->attr; in rxe_qp_to_attr()
724 attr->rq_psn = qp->resp.psn; in rxe_qp_to_attr()
725 attr->sq_psn = qp->req.psn; in rxe_qp_to_attr()
727 attr->cap.max_send_wr = qp->sq.max_wr; in rxe_qp_to_attr()
728 attr->cap.max_send_sge = qp->sq.max_sge; in rxe_qp_to_attr()
729 attr->cap.max_inline_data = qp->sq.max_inline; in rxe_qp_to_attr()
731 if (!qp->srq) { in rxe_qp_to_attr()
732 attr->cap.max_recv_wr = qp->rq.max_wr; in rxe_qp_to_attr()
733 attr->cap.max_recv_sge = qp->rq.max_sge; in rxe_qp_to_attr()
736 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr); in rxe_qp_to_attr()
737 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr); in rxe_qp_to_attr()
739 if (qp->req.state == QP_STATE_DRAIN) { in rxe_qp_to_attr()
750 rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining); in rxe_qp_to_attr()
755 int rxe_qp_chk_destroy(struct rxe_qp *qp) in rxe_qp_chk_destroy() argument
761 if (atomic_read(&qp->mcg_num)) { in rxe_qp_chk_destroy()
762 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n"); in rxe_qp_chk_destroy()
772 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); in rxe_qp_do_cleanup() local
774 qp->valid = 0; in rxe_qp_do_cleanup()
775 qp->qp_timeout_jiffies = 0; in rxe_qp_do_cleanup()
776 rxe_cleanup_task(&qp->resp.task); in rxe_qp_do_cleanup()
778 if (qp_type(qp) == IB_QPT_RC) { in rxe_qp_do_cleanup()
779 del_timer_sync(&qp->retrans_timer); in rxe_qp_do_cleanup()
780 del_timer_sync(&qp->rnr_nak_timer); in rxe_qp_do_cleanup()
783 rxe_cleanup_task(&qp->req.task); in rxe_qp_do_cleanup()
784 rxe_cleanup_task(&qp->comp.task); in rxe_qp_do_cleanup()
787 if (qp->req.task.func) in rxe_qp_do_cleanup()
788 __rxe_do_task(&qp->req.task); in rxe_qp_do_cleanup()
790 if (qp->sq.queue) { in rxe_qp_do_cleanup()
791 __rxe_do_task(&qp->comp.task); in rxe_qp_do_cleanup()
792 __rxe_do_task(&qp->req.task); in rxe_qp_do_cleanup()
795 if (qp->sq.queue) in rxe_qp_do_cleanup()
796 rxe_queue_cleanup(qp->sq.queue); in rxe_qp_do_cleanup()
798 if (qp->srq) in rxe_qp_do_cleanup()
799 rxe_put(qp->srq); in rxe_qp_do_cleanup()
801 if (qp->rq.queue) in rxe_qp_do_cleanup()
802 rxe_queue_cleanup(qp->rq.queue); in rxe_qp_do_cleanup()
804 if (qp->scq) { in rxe_qp_do_cleanup()
805 atomic_dec(&qp->scq->num_wq); in rxe_qp_do_cleanup()
806 rxe_put(qp->scq); in rxe_qp_do_cleanup()
809 if (qp->rcq) { in rxe_qp_do_cleanup()
810 atomic_dec(&qp->rcq->num_wq); in rxe_qp_do_cleanup()
811 rxe_put(qp->rcq); in rxe_qp_do_cleanup()
814 if (qp->pd) in rxe_qp_do_cleanup()
815 rxe_put(qp->pd); in rxe_qp_do_cleanup()
817 if (qp->resp.mr) in rxe_qp_do_cleanup()
818 rxe_put(qp->resp.mr); in rxe_qp_do_cleanup()
820 free_rd_atomic_resources(qp); in rxe_qp_do_cleanup()
822 if (qp->sk) { in rxe_qp_do_cleanup()
823 if (qp_type(qp) == IB_QPT_RC) in rxe_qp_do_cleanup()
824 sk_dst_reset(qp->sk->sk); in rxe_qp_do_cleanup()
826 kernel_sock_shutdown(qp->sk, SHUT_RDWR); in rxe_qp_do_cleanup()
827 sock_release(qp->sk); in rxe_qp_do_cleanup()
834 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); in rxe_qp_cleanup() local
836 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); in rxe_qp_cleanup()