Lines Matching refs:t

160 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))  argument
197 struct smb_direct_transport *t; member
226 static int smb_direct_post_send_data(struct smb_direct_transport *t,
232 smb_trans_direct_transfort(struct ksmbd_transport *t) in smb_trans_direct_transfort() argument
234 return container_of(t, struct smb_direct_transport, transport); in smb_trans_direct_transfort()
251 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) in get_free_recvmsg() argument
255 spin_lock(&t->recvmsg_queue_lock); in get_free_recvmsg()
256 if (!list_empty(&t->recvmsg_queue)) { in get_free_recvmsg()
257 recvmsg = list_first_entry(&t->recvmsg_queue, in get_free_recvmsg()
262 spin_unlock(&t->recvmsg_queue_lock); in get_free_recvmsg()
266 static void put_recvmsg(struct smb_direct_transport *t, in put_recvmsg() argument
269 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_recvmsg()
272 spin_lock(&t->recvmsg_queue_lock); in put_recvmsg()
273 list_add(&recvmsg->list, &t->recvmsg_queue); in put_recvmsg()
274 spin_unlock(&t->recvmsg_queue_lock); in put_recvmsg()
278 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) in get_empty_recvmsg() argument
282 spin_lock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
283 if (!list_empty(&t->empty_recvmsg_queue)) { in get_empty_recvmsg()
284 recvmsg = list_first_entry(&t->empty_recvmsg_queue, in get_empty_recvmsg()
288 spin_unlock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
292 static void put_empty_recvmsg(struct smb_direct_transport *t, in put_empty_recvmsg() argument
295 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_empty_recvmsg()
298 spin_lock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
299 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); in put_empty_recvmsg()
300 spin_unlock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
303 static void enqueue_reassembly(struct smb_direct_transport *t, in enqueue_reassembly() argument
307 spin_lock(&t->reassembly_queue_lock); in enqueue_reassembly()
308 list_add_tail(&recvmsg->list, &t->reassembly_queue); in enqueue_reassembly()
309 t->reassembly_queue_length++; in enqueue_reassembly()
317 t->reassembly_data_length += data_length; in enqueue_reassembly()
318 spin_unlock(&t->reassembly_queue_lock); in enqueue_reassembly()
321 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) in get_first_reassembly() argument
323 if (!list_empty(&t->reassembly_queue)) in get_first_reassembly()
324 return list_first_entry(&t->reassembly_queue, in get_first_reassembly()
332 struct smb_direct_transport *t = in smb_direct_disconnect_rdma_work() local
336 if (t->status == SMB_DIRECT_CS_CONNECTED) { in smb_direct_disconnect_rdma_work()
337 t->status = SMB_DIRECT_CS_DISCONNECTING; in smb_direct_disconnect_rdma_work()
338 rdma_disconnect(t->cm_id); in smb_direct_disconnect_rdma_work()
343 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) in smb_direct_disconnect_rdma_connection() argument
345 if (t->status == SMB_DIRECT_CS_CONNECTED) in smb_direct_disconnect_rdma_connection()
346 queue_work(smb_direct_wq, &t->disconnect_work); in smb_direct_disconnect_rdma_connection()
351 struct smb_direct_transport *t = container_of(work, in smb_direct_send_immediate_work() local
354 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_send_immediate_work()
357 smb_direct_post_send_data(t, NULL, NULL, 0, 0); in smb_direct_send_immediate_work()
362 struct smb_direct_transport *t; in alloc_transport() local
365 t = kzalloc(sizeof(*t), GFP_KERNEL); in alloc_transport()
366 if (!t) in alloc_transport()
369 t->cm_id = cm_id; in alloc_transport()
370 cm_id->context = t; in alloc_transport()
372 t->status = SMB_DIRECT_CS_NEW; in alloc_transport()
373 init_waitqueue_head(&t->wait_status); in alloc_transport()
375 spin_lock_init(&t->reassembly_queue_lock); in alloc_transport()
376 INIT_LIST_HEAD(&t->reassembly_queue); in alloc_transport()
377 t->reassembly_data_length = 0; in alloc_transport()
378 t->reassembly_queue_length = 0; in alloc_transport()
379 init_waitqueue_head(&t->wait_reassembly_queue); in alloc_transport()
380 init_waitqueue_head(&t->wait_send_credits); in alloc_transport()
381 init_waitqueue_head(&t->wait_rw_credits); in alloc_transport()
383 spin_lock_init(&t->receive_credit_lock); in alloc_transport()
384 spin_lock_init(&t->recvmsg_queue_lock); in alloc_transport()
385 INIT_LIST_HEAD(&t->recvmsg_queue); in alloc_transport()
387 spin_lock_init(&t->empty_recvmsg_queue_lock); in alloc_transport()
388 INIT_LIST_HEAD(&t->empty_recvmsg_queue); in alloc_transport()
390 init_waitqueue_head(&t->wait_send_pending); in alloc_transport()
391 atomic_set(&t->send_pending, 0); in alloc_transport()
393 spin_lock_init(&t->lock_new_recv_credits); in alloc_transport()
395 INIT_DELAYED_WORK(&t->post_recv_credits_work, in alloc_transport()
397 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); in alloc_transport()
398 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); in alloc_transport()
403 conn->transport = KSMBD_TRANS(t); in alloc_transport()
404 KSMBD_TRANS(t)->conn = conn; in alloc_transport()
405 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; in alloc_transport()
406 return t; in alloc_transport()
408 kfree(t); in alloc_transport()
412 static void free_transport(struct smb_direct_transport *t) in free_transport() argument
416 wake_up_interruptible(&t->wait_send_credits); in free_transport()
419 wait_event(t->wait_send_pending, in free_transport()
420 atomic_read(&t->send_pending) == 0); in free_transport()
422 cancel_work_sync(&t->disconnect_work); in free_transport()
423 cancel_delayed_work_sync(&t->post_recv_credits_work); in free_transport()
424 cancel_work_sync(&t->send_immediate_work); in free_transport()
426 if (t->qp) { in free_transport()
427 ib_drain_qp(t->qp); in free_transport()
428 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); in free_transport()
429 ib_destroy_qp(t->qp); in free_transport()
434 spin_lock(&t->reassembly_queue_lock); in free_transport()
435 recvmsg = get_first_reassembly(t); in free_transport()
438 spin_unlock(&t->reassembly_queue_lock); in free_transport()
439 put_recvmsg(t, recvmsg); in free_transport()
441 spin_unlock(&t->reassembly_queue_lock); in free_transport()
444 t->reassembly_data_length = 0; in free_transport()
446 if (t->send_cq) in free_transport()
447 ib_free_cq(t->send_cq); in free_transport()
448 if (t->recv_cq) in free_transport()
449 ib_free_cq(t->recv_cq); in free_transport()
450 if (t->pd) in free_transport()
451 ib_dealloc_pd(t->pd); in free_transport()
452 if (t->cm_id) in free_transport()
453 rdma_destroy_id(t->cm_id); in free_transport()
455 smb_direct_destroy_pools(t); in free_transport()
456 ksmbd_conn_free(KSMBD_TRANS(t)->conn); in free_transport()
457 kfree(t); in free_transport()
461 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) in smb_direct_alloc_sendmsg() argument
465 msg = mempool_alloc(t->sendmsg_mempool, GFP_KERNEL); in smb_direct_alloc_sendmsg()
468 msg->transport = t; in smb_direct_alloc_sendmsg()
474 static void smb_direct_free_sendmsg(struct smb_direct_transport *t, in smb_direct_free_sendmsg() argument
480 ib_dma_unmap_single(t->cm_id->device, in smb_direct_free_sendmsg()
484 ib_dma_unmap_page(t->cm_id->device, in smb_direct_free_sendmsg()
488 mempool_free(msg, t->sendmsg_mempool); in smb_direct_free_sendmsg()
538 struct smb_direct_transport *t; in recv_done() local
541 t = recvmsg->transport; in recv_done()
548 smb_direct_disconnect_rdma_connection(t); in recv_done()
550 put_empty_recvmsg(t, recvmsg); in recv_done()
564 put_empty_recvmsg(t, recvmsg); in recv_done()
567 t->negotiation_requested = true; in recv_done()
568 t->full_packet_received = true; in recv_done()
569 t->status = SMB_DIRECT_CS_CONNECTED; in recv_done()
570 enqueue_reassembly(t, recvmsg, 0); in recv_done()
571 wake_up_interruptible(&t->wait_status); in recv_done()
581 put_empty_recvmsg(t, recvmsg); in recv_done()
589 put_empty_recvmsg(t, recvmsg); in recv_done()
593 if (t->full_packet_received) in recv_done()
597 t->full_packet_received = false; in recv_done()
599 t->full_packet_received = true; in recv_done()
601 enqueue_reassembly(t, recvmsg, (int)data_length); in recv_done()
602 wake_up_interruptible(&t->wait_reassembly_queue); in recv_done()
604 spin_lock(&t->receive_credit_lock); in recv_done()
605 receive_credits = --(t->recv_credits); in recv_done()
606 avail_recvmsg_count = t->count_avail_recvmsg; in recv_done()
607 spin_unlock(&t->receive_credit_lock); in recv_done()
609 put_empty_recvmsg(t, recvmsg); in recv_done()
611 spin_lock(&t->receive_credit_lock); in recv_done()
612 receive_credits = --(t->recv_credits); in recv_done()
613 avail_recvmsg_count = ++(t->count_avail_recvmsg); in recv_done()
614 spin_unlock(&t->receive_credit_lock); in recv_done()
617 t->recv_credit_target = in recv_done()
620 &t->send_credits); in recv_done()
624 queue_work(smb_direct_wq, &t->send_immediate_work); in recv_done()
626 if (atomic_read(&t->send_credits) > 0) in recv_done()
627 wake_up_interruptible(&t->wait_send_credits); in recv_done()
631 &t->post_recv_credits_work, 0); in recv_done()
639 static int smb_direct_post_recv(struct smb_direct_transport *t, in smb_direct_post_recv() argument
645 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, in smb_direct_post_recv()
646 recvmsg->packet, t->max_recv_size, in smb_direct_post_recv()
648 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); in smb_direct_post_recv()
651 recvmsg->sge.length = t->max_recv_size; in smb_direct_post_recv()
652 recvmsg->sge.lkey = t->pd->local_dma_lkey; in smb_direct_post_recv()
660 ret = ib_post_recv(t->qp, &wr, NULL); in smb_direct_post_recv()
663 ib_dma_unmap_single(t->cm_id->device, in smb_direct_post_recv()
666 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_recv()
672 static int smb_direct_read(struct ksmbd_transport *t, char *buf, in smb_direct_read() argument
680 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_read()
802 struct smb_direct_transport *t = container_of(work, in smb_direct_post_recv_credits() local
809 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
810 receive_credits = t->recv_credits; in smb_direct_post_recv_credits()
811 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
813 if (receive_credits < t->recv_credit_target) { in smb_direct_post_recv_credits()
816 recvmsg = get_free_recvmsg(t); in smb_direct_post_recv_credits()
818 recvmsg = get_empty_recvmsg(t); in smb_direct_post_recv_credits()
831 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_post_recv_credits()
834 put_recvmsg(t, recvmsg); in smb_direct_post_recv_credits()
841 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
842 t->recv_credits += credits; in smb_direct_post_recv_credits()
843 t->count_avail_recvmsg -= credits; in smb_direct_post_recv_credits()
844 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
846 spin_lock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
847 t->new_recv_credits += credits; in smb_direct_post_recv_credits()
848 spin_unlock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
851 queue_work(smb_direct_wq, &t->send_immediate_work); in smb_direct_post_recv_credits()
857 struct smb_direct_transport *t; in send_done() local
861 t = sendmsg->transport; in send_done()
871 smb_direct_disconnect_rdma_connection(t); in send_done()
874 if (atomic_dec_and_test(&t->send_pending)) in send_done()
875 wake_up(&t->wait_send_pending); in send_done()
883 smb_direct_free_sendmsg(t, sibling); in send_done()
887 smb_direct_free_sendmsg(t, sibling); in send_done()
890 static int manage_credits_prior_sending(struct smb_direct_transport *t) in manage_credits_prior_sending() argument
894 spin_lock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
895 new_credits = t->new_recv_credits; in manage_credits_prior_sending()
896 t->new_recv_credits = 0; in manage_credits_prior_sending()
897 spin_unlock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
902 static int smb_direct_post_send(struct smb_direct_transport *t, in smb_direct_post_send() argument
907 atomic_inc(&t->send_pending); in smb_direct_post_send()
908 ret = ib_post_send(t->qp, wr, NULL); in smb_direct_post_send()
911 if (atomic_dec_and_test(&t->send_pending)) in smb_direct_post_send()
912 wake_up(&t->wait_send_pending); in smb_direct_post_send()
913 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_send()
918 static void smb_direct_send_ctx_init(struct smb_direct_transport *t, in smb_direct_send_ctx_init() argument
929 static int smb_direct_flush_send_list(struct smb_direct_transport *t, in smb_direct_flush_send_list() argument
953 ret = smb_direct_post_send(t, &first->wr); in smb_direct_flush_send_list()
955 smb_direct_send_ctx_init(t, send_ctx, in smb_direct_flush_send_list()
959 atomic_add(send_ctx->wr_cnt, &t->send_credits); in smb_direct_flush_send_list()
960 wake_up(&t->wait_send_credits); in smb_direct_flush_send_list()
963 smb_direct_free_sendmsg(t, first); in smb_direct_flush_send_list()
969 static int wait_for_credits(struct smb_direct_transport *t, in wait_for_credits() argument
982 t->status != SMB_DIRECT_CS_CONNECTED); in wait_for_credits()
984 if (t->status != SMB_DIRECT_CS_CONNECTED) in wait_for_credits()
991 static int wait_for_send_credits(struct smb_direct_transport *t, in wait_for_send_credits() argument
997 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { in wait_for_send_credits()
998 ret = smb_direct_flush_send_list(t, send_ctx, false); in wait_for_send_credits()
1003 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); in wait_for_send_credits()
1006 static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) in wait_for_rw_credits() argument
1008 return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); in wait_for_rw_credits()
1011 static int calc_rw_credits(struct smb_direct_transport *t, in calc_rw_credits() argument
1015 t->pages_per_rw_credit); in calc_rw_credits()
1018 static int smb_direct_create_header(struct smb_direct_transport *t, in smb_direct_create_header() argument
1027 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_create_header()
1033 packet->credits_requested = cpu_to_le16(t->send_credit_target); in smb_direct_create_header()
1034 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_create_header()
1061 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_create_header()
1065 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_create_header()
1067 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_create_header()
1073 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_create_header()
1123 static int post_sendmsg(struct smb_direct_transport *t, in post_sendmsg() argument
1130 ib_dma_sync_single_for_device(t->cm_id->device, in post_sendmsg()
1158 return smb_direct_post_send(t, &msg->wr); in post_sendmsg()
1161 static int smb_direct_post_send_data(struct smb_direct_transport *t, in smb_direct_post_send_data() argument
1171 ret = wait_for_send_credits(t, send_ctx); in smb_direct_post_send_data()
1179 ret = smb_direct_create_header(t, data_length, remaining_data_length, in smb_direct_post_send_data()
1182 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1191 sg_cnt = get_mapped_sg_list(t->cm_id->device, in smb_direct_post_send_data()
1202 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, in smb_direct_post_send_data()
1211 sge->lkey = t->pd->local_dma_lkey; in smb_direct_post_send_data()
1216 ret = post_sendmsg(t, send_ctx, msg); in smb_direct_post_send_data()
1221 smb_direct_free_sendmsg(t, msg); in smb_direct_post_send_data()
1222 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1226 static int smb_direct_writev(struct ksmbd_transport *t, in smb_direct_writev() argument
1230 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_writev()
1318 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t, in smb_direct_free_rdma_rw_msg() argument
1322 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_free_rdma_rw_msg()
1333 struct smb_direct_transport *t = msg->t; in read_write_done() local
1340 smb_direct_disconnect_rdma_connection(t); in read_write_done()
1356 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, in smb_direct_rdma_xmit() argument
1372 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_rdma_xmit()
1381 credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len); in smb_direct_rdma_xmit()
1385 total_length > t->max_rdma_rw_size) in smb_direct_rdma_xmit()
1392 ret = wait_for_rw_credits(t, credits_needed); in smb_direct_rdma_xmit()
1408 msg->t = t; in smb_direct_rdma_xmit()
1430 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1451 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1455 ret = ib_post_send(t->qp, first_wr, NULL); in smb_direct_rdma_xmit()
1467 smb_direct_free_rdma_rw_msg(t, msg, in smb_direct_rdma_xmit()
1470 atomic_add(credits_needed, &t->rw_credits); in smb_direct_rdma_xmit()
1471 wake_up(&t->wait_rw_credits); in smb_direct_rdma_xmit()
1475 static int smb_direct_rdma_write(struct ksmbd_transport *t, in smb_direct_rdma_write() argument
1480 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_write()
1484 static int smb_direct_rdma_read(struct ksmbd_transport *t, in smb_direct_rdma_read() argument
1489 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_read()
1493 static void smb_direct_disconnect(struct ksmbd_transport *t) in smb_direct_disconnect() argument
1495 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_disconnect()
1505 static void smb_direct_shutdown(struct ksmbd_transport *t) in smb_direct_shutdown() argument
1507 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_shutdown()
1517 struct smb_direct_transport *t = cm_id->context; in smb_direct_cm_handler() local
1524 t->status = SMB_DIRECT_CS_CONNECTED; in smb_direct_cm_handler()
1525 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1530 ib_drain_qp(t->qp); in smb_direct_cm_handler()
1532 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1533 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1534 wake_up_interruptible(&t->wait_reassembly_queue); in smb_direct_cm_handler()
1535 wake_up(&t->wait_send_credits); in smb_direct_cm_handler()
1539 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1540 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1554 struct smb_direct_transport *t = context; in smb_direct_qpair_handler() local
1557 t->cm_id, ib_event_msg(event->event), event->event); in smb_direct_qpair_handler()
1562 smb_direct_disconnect_rdma_connection(t); in smb_direct_qpair_handler()
1569 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, in smb_direct_send_negotiate_response() argument
1576 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_send_negotiate_response()
1593 cpu_to_le16(t->send_credit_target); in smb_direct_send_negotiate_response()
1594 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_send_negotiate_response()
1595 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); in smb_direct_send_negotiate_response()
1596 resp->preferred_send_size = cpu_to_le32(t->max_send_size); in smb_direct_send_negotiate_response()
1597 resp->max_receive_size = cpu_to_le32(t->max_recv_size); in smb_direct_send_negotiate_response()
1599 cpu_to_le32(t->max_fragmented_recv_size); in smb_direct_send_negotiate_response()
1602 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_send_negotiate_response()
1605 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_send_negotiate_response()
1607 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1613 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_send_negotiate_response()
1615 ret = post_sendmsg(t, NULL, sendmsg); in smb_direct_send_negotiate_response()
1617 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1621 wait_event(t->wait_send_pending, in smb_direct_send_negotiate_response()
1622 atomic_read(&t->send_pending) == 0); in smb_direct_send_negotiate_response()
1626 static int smb_direct_accept_client(struct smb_direct_transport *t) in smb_direct_accept_client() argument
1634 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, in smb_direct_accept_client()
1638 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, in smb_direct_accept_client()
1639 t->cm_id->port_num, in smb_direct_accept_client()
1654 ret = rdma_accept(t->cm_id, &conn_param); in smb_direct_accept_client()
1662 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) in smb_direct_prepare_negotiation() argument
1667 recvmsg = get_free_recvmsg(t); in smb_direct_prepare_negotiation()
1672 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_prepare_negotiation()
1678 t->negotiation_requested = false; in smb_direct_prepare_negotiation()
1679 ret = smb_direct_accept_client(t); in smb_direct_prepare_negotiation()
1685 smb_direct_post_recv_credits(&t->post_recv_credits_work.work); in smb_direct_prepare_negotiation()
1688 put_recvmsg(t, recvmsg); in smb_direct_prepare_negotiation()
1692 static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t) in smb_direct_get_max_fr_pages() argument
1695 t->cm_id->device->attrs.max_fast_reg_page_list_len, in smb_direct_get_max_fr_pages()
1699 static int smb_direct_init_params(struct smb_direct_transport *t, in smb_direct_init_params() argument
1702 struct ib_device *device = t->cm_id->device; in smb_direct_init_params()
1709 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1710 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3; in smb_direct_init_params()
1712 pr_err("max_send_size %d is too large\n", t->max_send_size); in smb_direct_init_params()
1723 t->max_rdma_rw_size = smb_direct_max_read_write_size; in smb_direct_init_params()
1724 t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t); in smb_direct_init_params()
1725 t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size, in smb_direct_init_params()
1726 (t->pages_per_rw_credit - 1) * in smb_direct_init_params()
1734 DIV_ROUND_UP(t->pages_per_rw_credit, in smb_direct_init_params()
1736 max_rw_wrs = t->max_rw_credits * wrs_per_credit; in smb_direct_init_params()
1763 t->recv_credits = 0; in smb_direct_init_params()
1764 t->count_avail_recvmsg = 0; in smb_direct_init_params()
1766 t->recv_credit_max = smb_direct_receive_credit_max; in smb_direct_init_params()
1767 t->recv_credit_target = 10; in smb_direct_init_params()
1768 t->new_recv_credits = 0; in smb_direct_init_params()
1770 t->send_credit_target = smb_direct_send_credit_target; in smb_direct_init_params()
1771 atomic_set(&t->send_credits, 0); in smb_direct_init_params()
1772 atomic_set(&t->rw_credits, t->max_rw_credits); in smb_direct_init_params()
1774 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1775 t->max_recv_size = smb_direct_max_receive_size; in smb_direct_init_params()
1776 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; in smb_direct_init_params()
1779 cap->max_recv_wr = t->recv_credit_max; in smb_direct_init_params()
1783 cap->max_rdma_ctxs = t->max_rw_credits; in smb_direct_init_params()
1787 static void smb_direct_destroy_pools(struct smb_direct_transport *t) in smb_direct_destroy_pools() argument
1791 while ((recvmsg = get_free_recvmsg(t))) in smb_direct_destroy_pools()
1792 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1793 while ((recvmsg = get_empty_recvmsg(t))) in smb_direct_destroy_pools()
1794 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1796 mempool_destroy(t->recvmsg_mempool); in smb_direct_destroy_pools()
1797 t->recvmsg_mempool = NULL; in smb_direct_destroy_pools()
1799 kmem_cache_destroy(t->recvmsg_cache); in smb_direct_destroy_pools()
1800 t->recvmsg_cache = NULL; in smb_direct_destroy_pools()
1802 mempool_destroy(t->sendmsg_mempool); in smb_direct_destroy_pools()
1803 t->sendmsg_mempool = NULL; in smb_direct_destroy_pools()
1805 kmem_cache_destroy(t->sendmsg_cache); in smb_direct_destroy_pools()
1806 t->sendmsg_cache = NULL; in smb_direct_destroy_pools()
1809 static int smb_direct_create_pools(struct smb_direct_transport *t) in smb_direct_create_pools() argument
1815 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); in smb_direct_create_pools()
1816 t->sendmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1820 if (!t->sendmsg_cache) in smb_direct_create_pools()
1823 t->sendmsg_mempool = mempool_create(t->send_credit_target, in smb_direct_create_pools()
1825 t->sendmsg_cache); in smb_direct_create_pools()
1826 if (!t->sendmsg_mempool) in smb_direct_create_pools()
1829 snprintf(name, sizeof(name), "smb_direct_resp_%p", t); in smb_direct_create_pools()
1830 t->recvmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1832 t->max_recv_size, in smb_direct_create_pools()
1834 if (!t->recvmsg_cache) in smb_direct_create_pools()
1837 t->recvmsg_mempool = in smb_direct_create_pools()
1838 mempool_create(t->recv_credit_max, mempool_alloc_slab, in smb_direct_create_pools()
1839 mempool_free_slab, t->recvmsg_cache); in smb_direct_create_pools()
1840 if (!t->recvmsg_mempool) in smb_direct_create_pools()
1843 INIT_LIST_HEAD(&t->recvmsg_queue); in smb_direct_create_pools()
1845 for (i = 0; i < t->recv_credit_max; i++) { in smb_direct_create_pools()
1846 recvmsg = mempool_alloc(t->recvmsg_mempool, GFP_KERNEL); in smb_direct_create_pools()
1849 recvmsg->transport = t; in smb_direct_create_pools()
1850 list_add(&recvmsg->list, &t->recvmsg_queue); in smb_direct_create_pools()
1852 t->count_avail_recvmsg = t->recv_credit_max; in smb_direct_create_pools()
1856 smb_direct_destroy_pools(t); in smb_direct_create_pools()
1860 static int smb_direct_create_qpair(struct smb_direct_transport *t, in smb_direct_create_qpair() argument
1867 t->pd = ib_alloc_pd(t->cm_id->device, 0); in smb_direct_create_qpair()
1868 if (IS_ERR(t->pd)) { in smb_direct_create_qpair()
1870 ret = PTR_ERR(t->pd); in smb_direct_create_qpair()
1871 t->pd = NULL; in smb_direct_create_qpair()
1875 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1878 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1880 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1881 t->send_cq = NULL; in smb_direct_create_qpair()
1885 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1886 t->recv_credit_max, 0, IB_POLL_WORKQUEUE); in smb_direct_create_qpair()
1887 if (IS_ERR(t->recv_cq)) { in smb_direct_create_qpair()
1889 ret = PTR_ERR(t->recv_cq); in smb_direct_create_qpair()
1890 t->recv_cq = NULL; in smb_direct_create_qpair()
1896 qp_attr.qp_context = t; in smb_direct_create_qpair()
1900 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1901 qp_attr.recv_cq = t->recv_cq; in smb_direct_create_qpair()
1904 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); in smb_direct_create_qpair()
1910 t->qp = t->cm_id->qp; in smb_direct_create_qpair()
1911 t->cm_id->event_handler = smb_direct_cm_handler; in smb_direct_create_qpair()
1913 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; in smb_direct_create_qpair()
1914 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { in smb_direct_create_qpair()
1915 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, in smb_direct_create_qpair()
1916 t->max_rw_credits, IB_MR_TYPE_MEM_REG, in smb_direct_create_qpair()
1917 t->pages_per_rw_credit, 0); in smb_direct_create_qpair()
1920 t->max_rw_credits, t->pages_per_rw_credit); in smb_direct_create_qpair()
1927 if (t->qp) { in smb_direct_create_qpair()
1928 ib_destroy_qp(t->qp); in smb_direct_create_qpair()
1929 t->qp = NULL; in smb_direct_create_qpair()
1931 if (t->recv_cq) { in smb_direct_create_qpair()
1932 ib_destroy_cq(t->recv_cq); in smb_direct_create_qpair()
1933 t->recv_cq = NULL; in smb_direct_create_qpair()
1935 if (t->send_cq) { in smb_direct_create_qpair()
1936 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair()
1937 t->send_cq = NULL; in smb_direct_create_qpair()
1939 if (t->pd) { in smb_direct_create_qpair()
1940 ib_dealloc_pd(t->pd); in smb_direct_create_qpair()
1941 t->pd = NULL; in smb_direct_create_qpair()
1946 static int smb_direct_prepare(struct ksmbd_transport *t) in smb_direct_prepare() argument
1948 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_prepare()
2032 struct smb_direct_transport *t; in smb_direct_handle_connect_request() local
2042 t = alloc_transport(new_cm_id); in smb_direct_handle_connect_request()
2043 if (!t) in smb_direct_handle_connect_request()
2046 ret = smb_direct_connect(t); in smb_direct_handle_connect_request()
2050 KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop, in smb_direct_handle_connect_request()
2051 KSMBD_TRANS(t)->conn, "ksmbd:r%u", in smb_direct_handle_connect_request()
2053 if (IS_ERR(KSMBD_TRANS(t)->handler)) { in smb_direct_handle_connect_request()
2054 ret = PTR_ERR(KSMBD_TRANS(t)->handler); in smb_direct_handle_connect_request()
2061 free_transport(t); in smb_direct_handle_connect_request()