Lines Matching refs:csk

98 	struct cxgbi_sock *csk;  in cxgbi_device_portmap_cleanup()  local
103 csk = pmap->port_csk[i]; in cxgbi_device_portmap_cleanup()
107 csk, cdev); in cxgbi_device_portmap_cleanup()
108 spin_lock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
109 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN); in cxgbi_device_portmap_cleanup()
110 cxgbi_sock_closed(csk); in cxgbi_device_portmap_cleanup()
111 spin_unlock_bh(&csk->lock); in cxgbi_device_portmap_cleanup()
112 cxgbi_sock_put(csk); in cxgbi_device_portmap_cleanup()
424 struct cxgbi_sock *csk = pmap->port_csk[i]; in find_sock_on_port() local
426 if (csk) { in find_sock_on_port()
427 if (csk->port_id == port_id) { in find_sock_on_port()
429 return csk; in find_sock_on_port()
439 static int sock_get_port(struct cxgbi_sock *csk) in sock_get_port() argument
441 struct cxgbi_device *cdev = csk->cdev; in sock_get_port()
449 cdev, csk->port_id, cdev->ports[csk->port_id]->name); in sock_get_port()
453 if (csk->csk_family == AF_INET) in sock_get_port()
454 port = &csk->saddr.sin_port; in sock_get_port()
456 port = &csk->saddr6.sin6_port; in sock_get_port()
468 cdev, csk->port_id, cdev->ports[csk->port_id]->name); in sock_get_port()
480 pmap->port_csk[idx] = csk; in sock_get_port()
482 cxgbi_sock_get(csk); in sock_get_port()
485 cdev, csk->port_id, in sock_get_port()
486 cdev->ports[csk->port_id]->name, in sock_get_port()
495 cdev, csk->port_id, cdev->ports[csk->port_id]->name, in sock_get_port()
500 static void sock_put_port(struct cxgbi_sock *csk) in sock_put_port() argument
502 struct cxgbi_device *cdev = csk->cdev; in sock_put_port()
506 if (csk->csk_family == AF_INET) in sock_put_port()
507 port = &csk->saddr.sin_port; in sock_put_port()
509 port = &csk->saddr6.sin6_port; in sock_put_port()
517 cdev, csk->port_id, in sock_put_port()
518 cdev->ports[csk->port_id]->name, in sock_put_port()
530 cdev, csk->port_id, cdev->ports[csk->port_id]->name, in sock_put_port()
533 cxgbi_sock_put(csk); in sock_put_port()
540 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk) in cxgbi_sock_free_cpl_skbs() argument
542 if (csk->cpl_close) { in cxgbi_sock_free_cpl_skbs()
543 kfree_skb(csk->cpl_close); in cxgbi_sock_free_cpl_skbs()
544 csk->cpl_close = NULL; in cxgbi_sock_free_cpl_skbs()
546 if (csk->cpl_abort_req) { in cxgbi_sock_free_cpl_skbs()
547 kfree_skb(csk->cpl_abort_req); in cxgbi_sock_free_cpl_skbs()
548 csk->cpl_abort_req = NULL; in cxgbi_sock_free_cpl_skbs()
550 if (csk->cpl_abort_rpl) { in cxgbi_sock_free_cpl_skbs()
551 kfree_skb(csk->cpl_abort_rpl); in cxgbi_sock_free_cpl_skbs()
552 csk->cpl_abort_rpl = NULL; in cxgbi_sock_free_cpl_skbs()
559 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO); in cxgbi_sock_create() local
561 if (!csk) { in cxgbi_sock_create()
562 pr_info("alloc csk %zu failed.\n", sizeof(*csk)); in cxgbi_sock_create()
566 if (cdev->csk_alloc_cpls(csk) < 0) { in cxgbi_sock_create()
567 pr_info("csk 0x%p, alloc cpls failed.\n", csk); in cxgbi_sock_create()
568 kfree(csk); in cxgbi_sock_create()
572 spin_lock_init(&csk->lock); in cxgbi_sock_create()
573 kref_init(&csk->refcnt); in cxgbi_sock_create()
574 skb_queue_head_init(&csk->receive_queue); in cxgbi_sock_create()
575 skb_queue_head_init(&csk->write_queue); in cxgbi_sock_create()
576 timer_setup(&csk->retry_timer, NULL, 0); in cxgbi_sock_create()
577 init_completion(&csk->cmpl); in cxgbi_sock_create()
578 rwlock_init(&csk->callback_lock); in cxgbi_sock_create()
579 csk->cdev = cdev; in cxgbi_sock_create()
580 csk->flags = 0; in cxgbi_sock_create()
581 cxgbi_sock_set_state(csk, CTP_CLOSED); in cxgbi_sock_create()
583 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk); in cxgbi_sock_create()
585 return csk; in cxgbi_sock_create()
613 struct cxgbi_sock *csk = NULL; in cxgbi_check_route() local
674 csk = cxgbi_sock_create(cdev); in cxgbi_check_route()
675 if (!csk) { in cxgbi_check_route()
679 csk->cdev = cdev; in cxgbi_check_route()
680 csk->port_id = port; in cxgbi_check_route()
681 csk->mtu = mtu; in cxgbi_check_route()
682 csk->dst = dst; in cxgbi_check_route()
684 csk->csk_family = AF_INET; in cxgbi_check_route()
685 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; in cxgbi_check_route()
686 csk->daddr.sin_port = daddr->sin_port; in cxgbi_check_route()
687 csk->daddr.sin_family = daddr->sin_family; in cxgbi_check_route()
688 csk->saddr.sin_family = daddr->sin_family; in cxgbi_check_route()
689 csk->saddr.sin_addr.s_addr = fl4.saddr; in cxgbi_check_route()
692 return csk; in cxgbi_check_route()
729 struct cxgbi_sock *csk = NULL; in cxgbi_check_route6() local
785 csk = cxgbi_sock_create(cdev); in cxgbi_check_route6()
786 if (!csk) { in cxgbi_check_route6()
790 csk->cdev = cdev; in cxgbi_check_route6()
791 csk->port_id = port; in cxgbi_check_route6()
792 csk->mtu = mtu; in cxgbi_check_route6()
793 csk->dst = dst; in cxgbi_check_route6()
808 csk->csk_family = AF_INET6; in cxgbi_check_route6()
809 csk->daddr6.sin6_addr = daddr6->sin6_addr; in cxgbi_check_route6()
810 csk->daddr6.sin6_port = daddr6->sin6_port; in cxgbi_check_route6()
811 csk->daddr6.sin6_family = daddr6->sin6_family; in cxgbi_check_route6()
812 csk->saddr6.sin6_family = daddr6->sin6_family; in cxgbi_check_route6()
813 csk->saddr6.sin6_addr = pref_saddr; in cxgbi_check_route6()
816 return csk; in cxgbi_check_route6()
823 if (csk) in cxgbi_check_route6()
824 cxgbi_sock_closed(csk); in cxgbi_check_route6()
830 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn, in cxgbi_sock_established() argument
833 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn; in cxgbi_sock_established()
834 dst_confirm(csk->dst); in cxgbi_sock_established()
836 cxgbi_sock_set_state(csk, CTP_ESTABLISHED); in cxgbi_sock_established()
840 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) in cxgbi_inform_iscsi_conn_closing() argument
844 csk, csk->state, csk->flags, csk->user_data); in cxgbi_inform_iscsi_conn_closing()
846 if (csk->state != CTP_ESTABLISHED) { in cxgbi_inform_iscsi_conn_closing()
847 read_lock_bh(&csk->callback_lock); in cxgbi_inform_iscsi_conn_closing()
848 if (csk->user_data) in cxgbi_inform_iscsi_conn_closing()
849 iscsi_conn_failure(csk->user_data, in cxgbi_inform_iscsi_conn_closing()
851 read_unlock_bh(&csk->callback_lock); in cxgbi_inform_iscsi_conn_closing()
855 void cxgbi_sock_closed(struct cxgbi_sock *csk) in cxgbi_sock_closed() argument
858 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_closed()
859 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); in cxgbi_sock_closed()
860 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED) in cxgbi_sock_closed()
862 if (csk->saddr.sin_port) in cxgbi_sock_closed()
863 sock_put_port(csk); in cxgbi_sock_closed()
864 if (csk->dst) in cxgbi_sock_closed()
865 dst_release(csk->dst); in cxgbi_sock_closed()
866 csk->cdev->csk_release_offload_resources(csk); in cxgbi_sock_closed()
867 cxgbi_sock_set_state(csk, CTP_CLOSED); in cxgbi_sock_closed()
868 cxgbi_inform_iscsi_conn_closing(csk); in cxgbi_sock_closed()
869 cxgbi_sock_put(csk); in cxgbi_sock_closed()
873 static void need_active_close(struct cxgbi_sock *csk) in need_active_close() argument
879 csk, (csk)->state, (csk)->flags, (csk)->tid); in need_active_close()
880 spin_lock_bh(&csk->lock); in need_active_close()
881 if (csk->dst) in need_active_close()
882 dst_confirm(csk->dst); in need_active_close()
883 data_lost = skb_queue_len(&csk->receive_queue); in need_active_close()
884 __skb_queue_purge(&csk->receive_queue); in need_active_close()
886 if (csk->state == CTP_ACTIVE_OPEN) in need_active_close()
887 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED); in need_active_close()
888 else if (csk->state == CTP_ESTABLISHED) { in need_active_close()
890 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE); in need_active_close()
891 } else if (csk->state == CTP_PASSIVE_CLOSE) { in need_active_close()
893 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); in need_active_close()
897 if (!cxgbi_sock_flag(csk, CTPF_LOGOUT_RSP_RCVD) || in need_active_close()
899 csk->cdev->csk_send_abort_req(csk); in need_active_close()
901 csk->cdev->csk_send_close_req(csk); in need_active_close()
904 spin_unlock_bh(&csk->lock); in need_active_close()
907 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno) in cxgbi_sock_fail_act_open() argument
910 csk, csk->state, csk->flags, in cxgbi_sock_fail_act_open()
911 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port, in cxgbi_sock_fail_act_open()
912 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port, in cxgbi_sock_fail_act_open()
915 cxgbi_sock_set_state(csk, CTP_CONNECTING); in cxgbi_sock_fail_act_open()
916 csk->err = errno; in cxgbi_sock_fail_act_open()
917 cxgbi_sock_closed(csk); in cxgbi_sock_fail_act_open()
923 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk; in cxgbi_sock_act_open_req_arp_failure() local
924 struct module *owner = csk->cdev->owner; in cxgbi_sock_act_open_req_arp_failure()
927 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_act_open_req_arp_failure()
928 cxgbi_sock_get(csk); in cxgbi_sock_act_open_req_arp_failure()
929 spin_lock_bh(&csk->lock); in cxgbi_sock_act_open_req_arp_failure()
930 if (csk->state == CTP_ACTIVE_OPEN) in cxgbi_sock_act_open_req_arp_failure()
931 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH); in cxgbi_sock_act_open_req_arp_failure()
932 spin_unlock_bh(&csk->lock); in cxgbi_sock_act_open_req_arp_failure()
933 cxgbi_sock_put(csk); in cxgbi_sock_act_open_req_arp_failure()
940 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk) in cxgbi_sock_rcv_abort_rpl() argument
942 cxgbi_sock_get(csk); in cxgbi_sock_rcv_abort_rpl()
943 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_abort_rpl()
945 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); in cxgbi_sock_rcv_abort_rpl()
946 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in cxgbi_sock_rcv_abort_rpl()
947 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); in cxgbi_sock_rcv_abort_rpl()
948 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) in cxgbi_sock_rcv_abort_rpl()
950 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_abort_rpl()
951 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_abort_rpl()
954 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_abort_rpl()
955 cxgbi_sock_put(csk); in cxgbi_sock_rcv_abort_rpl()
959 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk) in cxgbi_sock_rcv_peer_close() argument
962 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_rcv_peer_close()
963 cxgbi_sock_get(csk); in cxgbi_sock_rcv_peer_close()
964 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_peer_close()
966 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) in cxgbi_sock_rcv_peer_close()
969 switch (csk->state) { in cxgbi_sock_rcv_peer_close()
971 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE); in cxgbi_sock_rcv_peer_close()
974 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2); in cxgbi_sock_rcv_peer_close()
977 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_peer_close()
983 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_peer_close()
985 cxgbi_inform_iscsi_conn_closing(csk); in cxgbi_sock_rcv_peer_close()
987 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_peer_close()
988 cxgbi_sock_put(csk); in cxgbi_sock_rcv_peer_close()
992 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt) in cxgbi_sock_rcv_close_conn_rpl() argument
995 csk, (csk)->state, (csk)->flags, (csk)->tid); in cxgbi_sock_rcv_close_conn_rpl()
996 cxgbi_sock_get(csk); in cxgbi_sock_rcv_close_conn_rpl()
997 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_close_conn_rpl()
999 csk->snd_una = snd_nxt - 1; in cxgbi_sock_rcv_close_conn_rpl()
1000 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) in cxgbi_sock_rcv_close_conn_rpl()
1003 switch (csk->state) { in cxgbi_sock_rcv_close_conn_rpl()
1005 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1); in cxgbi_sock_rcv_close_conn_rpl()
1009 cxgbi_sock_closed(csk); in cxgbi_sock_rcv_close_conn_rpl()
1015 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_rcv_close_conn_rpl()
1018 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_close_conn_rpl()
1019 cxgbi_sock_put(csk); in cxgbi_sock_rcv_close_conn_rpl()
1023 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits, in cxgbi_sock_rcv_wr_ack() argument
1028 csk, csk->state, csk->flags, csk->tid, credits, in cxgbi_sock_rcv_wr_ack()
1029 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk); in cxgbi_sock_rcv_wr_ack()
1031 spin_lock_bh(&csk->lock); in cxgbi_sock_rcv_wr_ack()
1033 csk->wr_cred += credits; in cxgbi_sock_rcv_wr_ack()
1034 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred) in cxgbi_sock_rcv_wr_ack()
1035 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred; in cxgbi_sock_rcv_wr_ack()
1038 struct sk_buff *p = cxgbi_sock_peek_wr(csk); in cxgbi_sock_rcv_wr_ack()
1042 csk, csk->state, csk->flags, csk->tid, credits, in cxgbi_sock_rcv_wr_ack()
1043 csk->wr_cred, csk->wr_una_cred); in cxgbi_sock_rcv_wr_ack()
1049 csk, csk->state, csk->flags, csk->tid, in cxgbi_sock_rcv_wr_ack()
1050 credits, csk->wr_cred, csk->wr_una_cred, in cxgbi_sock_rcv_wr_ack()
1055 cxgbi_sock_dequeue_wr(csk); in cxgbi_sock_rcv_wr_ack()
1061 cxgbi_sock_check_wr_invariants(csk); in cxgbi_sock_rcv_wr_ack()
1064 if (unlikely(before(snd_una, csk->snd_una))) { in cxgbi_sock_rcv_wr_ack()
1066 csk, csk->state, csk->flags, csk->tid, snd_una, in cxgbi_sock_rcv_wr_ack()
1067 csk->snd_una); in cxgbi_sock_rcv_wr_ack()
1071 if (csk->snd_una != snd_una) { in cxgbi_sock_rcv_wr_ack()
1072 csk->snd_una = snd_una; in cxgbi_sock_rcv_wr_ack()
1073 dst_confirm(csk->dst); in cxgbi_sock_rcv_wr_ack()
1077 if (skb_queue_len(&csk->write_queue)) { in cxgbi_sock_rcv_wr_ack()
1078 if (csk->cdev->csk_push_tx_frames(csk, 0)) in cxgbi_sock_rcv_wr_ack()
1079 cxgbi_conn_tx_open(csk); in cxgbi_sock_rcv_wr_ack()
1081 cxgbi_conn_tx_open(csk); in cxgbi_sock_rcv_wr_ack()
1083 spin_unlock_bh(&csk->lock); in cxgbi_sock_rcv_wr_ack()
1087 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk, in cxgbi_sock_find_best_mtu() argument
1092 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu) in cxgbi_sock_find_best_mtu()
1098 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu) in cxgbi_sock_select_mss() argument
1101 struct dst_entry *dst = csk->dst; in cxgbi_sock_select_mss()
1103 csk->advmss = dst_metric_advmss(dst); in cxgbi_sock_select_mss()
1105 if (csk->advmss > pmtu - 40) in cxgbi_sock_select_mss()
1106 csk->advmss = pmtu - 40; in cxgbi_sock_select_mss()
1107 if (csk->advmss < csk->cdev->mtus[0] - 40) in cxgbi_sock_select_mss()
1108 csk->advmss = csk->cdev->mtus[0] - 40; in cxgbi_sock_select_mss()
1109 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40); in cxgbi_sock_select_mss()
1115 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb) in cxgbi_sock_skb_entail() argument
1117 cxgbi_skcb_tcp_seq(skb) = csk->write_seq; in cxgbi_sock_skb_entail()
1118 __skb_queue_tail(&csk->write_queue, skb); in cxgbi_sock_skb_entail()
1122 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk) in cxgbi_sock_purge_wr_queue() argument
1126 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL) in cxgbi_sock_purge_wr_queue()
1131 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk) in cxgbi_sock_check_wr_invariants() argument
1133 int pending = cxgbi_sock_count_pending_wrs(csk); in cxgbi_sock_check_wr_invariants()
1135 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred)) in cxgbi_sock_check_wr_invariants()
1137 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred); in cxgbi_sock_check_wr_invariants()
1264 struct cxgbi_sock *csk = cconn->cep->csk; in cxgbi_ddp_reserve() local
1265 struct cxgbi_device *cdev = csk->cdev; in cxgbi_ddp_reserve()
1324 ttinfo->cid = csk->port_id; in cxgbi_ddp_reserve()
1326 cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset, in cxgbi_ddp_reserve()
1334 err = cdev->csk_ddp_set_map(ppm, csk, ttinfo); in cxgbi_ddp_reserve()
1407 cconn->cep->csk, task, tdata->dlen, in task_reserve_itt()
1451 void cxgbi_conn_tx_open(struct cxgbi_sock *csk) in cxgbi_conn_tx_open() argument
1453 struct iscsi_conn *conn = csk->user_data; in cxgbi_conn_tx_open()
1457 "csk 0x%p, cid %d.\n", csk, conn->id); in cxgbi_conn_tx_open()
1508 skb_read_pdu_bhs(struct cxgbi_sock *csk, struct iscsi_conn *conn, in skb_read_pdu_bhs() argument
1556 cxgbi_sock_set_flag(csk, CTPF_LOGOUT_RSP_RCVD); in skb_read_pdu_bhs()
1600 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied) in csk_return_rx_credits() argument
1602 struct cxgbi_device *cdev = csk->cdev; in csk_return_rx_credits()
1608 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, in csk_return_rx_credits()
1609 csk->rcv_wup, cdev->rx_credit_thres, in csk_return_rx_credits()
1610 csk->rcv_win); in csk_return_rx_credits()
1615 if (csk->state != CTP_ESTABLISHED) in csk_return_rx_credits()
1618 credits = csk->copied_seq - csk->rcv_wup; in csk_return_rx_credits()
1621 must_send = credits + 16384 >= csk->rcv_win; in csk_return_rx_credits()
1623 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits); in csk_return_rx_credits()
1626 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk) in cxgbi_conn_pdu_ready() argument
1628 struct cxgbi_device *cdev = csk->cdev; in cxgbi_conn_pdu_ready()
1629 struct iscsi_conn *conn = csk->user_data; in cxgbi_conn_pdu_ready()
1635 "csk 0x%p, conn 0x%p.\n", csk, conn); in cxgbi_conn_pdu_ready()
1640 csk, conn, conn ? conn->id : 0xFF, in cxgbi_conn_pdu_ready()
1646 skb = skb_peek(&csk->receive_queue); in cxgbi_conn_pdu_ready()
1655 __skb_unlink(skb, &csk->receive_queue); in cxgbi_conn_pdu_ready()
1660 csk, skb, skb->len, cxgbi_skcb_flags(skb), in cxgbi_conn_pdu_ready()
1664 err = skb_read_pdu_bhs(csk, conn, skb); in cxgbi_conn_pdu_ready()
1668 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1678 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1682 err = skb_read_pdu_bhs(csk, conn, skb); in cxgbi_conn_pdu_ready()
1686 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1695 dskb = skb_peek(&csk->receive_queue); in cxgbi_conn_pdu_ready()
1699 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1705 __skb_unlink(dskb, &csk->receive_queue); in cxgbi_conn_pdu_ready()
1712 csk, skb, skb->len, in cxgbi_conn_pdu_ready()
1727 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read); in cxgbi_conn_pdu_ready()
1729 csk->copied_seq += read; in cxgbi_conn_pdu_ready()
1730 csk_return_rx_credits(csk, read); in cxgbi_conn_pdu_ready()
1736 csk, conn, err, read); in cxgbi_conn_pdu_ready()
1892 struct cxgbi_sock *csk = cconn->cep ? cconn->cep->csk : NULL; in cxgbi_conn_alloc_pdu() local
1907 if (!csk) { in cxgbi_conn_alloc_pdu()
1948 if (cdev->skb_iso_txhdr && !csk->disable_iso && in cxgbi_conn_alloc_pdu()
1960 csk->advmss); in cxgbi_conn_alloc_pdu()
2273 static int cxgbi_sock_tx_queue_up(struct cxgbi_sock *csk, struct sk_buff *skb) in cxgbi_sock_tx_queue_up() argument
2275 struct cxgbi_device *cdev = csk->cdev; in cxgbi_sock_tx_queue_up()
2281 if (csk->state != CTP_ESTABLISHED) { in cxgbi_sock_tx_queue_up()
2284 csk, csk->state, csk->flags, csk->tid); in cxgbi_sock_tx_queue_up()
2288 if (csk->err) { in cxgbi_sock_tx_queue_up()
2291 csk, csk->state, csk->flags, csk->tid, csk->err); in cxgbi_sock_tx_queue_up()
2296 before((csk->snd_win + csk->snd_una), csk->write_seq)) { in cxgbi_sock_tx_queue_up()
2299 csk, csk->state, csk->flags, csk->tid, csk->write_seq, in cxgbi_sock_tx_queue_up()
2300 csk->snd_una, csk->snd_win); in cxgbi_sock_tx_queue_up()
2309 csk, skb_headroom(skb), cdev->skb_tx_rsvd); in cxgbi_sock_tx_queue_up()
2318 csk, skb_shinfo(skb)->nr_frags, skb->len, in cxgbi_sock_tx_queue_up()
2325 cxgbi_sock_skb_entail(csk, skb); in cxgbi_sock_tx_queue_up()
2337 csk->write_seq += (skb->len + extra_len); in cxgbi_sock_tx_queue_up()
2342 static int cxgbi_sock_send_skb(struct cxgbi_sock *csk, struct sk_buff *skb) in cxgbi_sock_send_skb() argument
2344 struct cxgbi_device *cdev = csk->cdev; in cxgbi_sock_send_skb()
2348 spin_lock_bh(&csk->lock); in cxgbi_sock_send_skb()
2349 err = cxgbi_sock_tx_queue_up(csk, skb); in cxgbi_sock_send_skb()
2351 spin_unlock_bh(&csk->lock); in cxgbi_sock_send_skb()
2355 if (likely(skb_queue_len(&csk->write_queue))) in cxgbi_sock_send_skb()
2356 cdev->csk_push_tx_frames(csk, 0); in cxgbi_sock_send_skb()
2357 spin_unlock_bh(&csk->lock); in cxgbi_sock_send_skb()
2369 struct cxgbi_sock *csk = NULL; in cxgbi_conn_xmit_pdu() local
2389 csk = cconn->cep->csk; in cxgbi_conn_xmit_pdu()
2391 if (!csk) { in cxgbi_conn_xmit_pdu()
2402 struct cxgbi_ppm *ppm = csk->cdev->cdev2ppm(csk->cdev); in cxgbi_conn_xmit_pdu()
2405 if (csk->cdev->csk_ddp_set_map(ppm, csk, ttinfo) < 0) in cxgbi_conn_xmit_pdu()
2414 err = cxgbi_sock_send_skb(csk, skb); in cxgbi_conn_xmit_pdu()
2429 if (unlikely(cxgbi_is_iso_config(csk) && cxgbi_is_iso_disabled(csk))) { in cxgbi_conn_xmit_pdu()
2430 if (time_after(jiffies, csk->prev_iso_ts + HZ)) { in cxgbi_conn_xmit_pdu()
2431 csk->disable_iso = false; in cxgbi_conn_xmit_pdu()
2432 csk->prev_iso_ts = 0; in cxgbi_conn_xmit_pdu()
2434 "enable iso: csk 0x%p\n", csk); in cxgbi_conn_xmit_pdu()
2448 if (cxgbi_is_iso_config(csk) && !cxgbi_is_iso_disabled(csk) && in cxgbi_conn_xmit_pdu()
2449 (csk->no_tx_credits++ >= 2)) { in cxgbi_conn_xmit_pdu()
2450 csk->disable_iso = true; in cxgbi_conn_xmit_pdu()
2451 csk->prev_iso_ts = jiffies; in cxgbi_conn_xmit_pdu()
2454 csk, csk->prev_iso_ts); in cxgbi_conn_xmit_pdu()
2574 struct cxgbi_sock *csk = cconn->cep->csk; in cxgbi_set_conn_param() local
2585 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, in cxgbi_set_conn_param()
2592 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid, in cxgbi_set_conn_param()
2619 struct cxgbi_sock *csk; in cxgbi_get_ep_param() local
2630 csk = cep->csk; in cxgbi_get_ep_param()
2631 if (!csk) in cxgbi_get_ep_param()
2635 &csk->daddr, param, buf); in cxgbi_get_ep_param()
2678 struct cxgbi_sock *csk; in cxgbi_bind_conn() local
2687 csk = cep->csk; in cxgbi_bind_conn()
2689 ppm = csk->cdev->cdev2ppm(csk->cdev); in cxgbi_bind_conn()
2690 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, in cxgbi_bind_conn()
2704 write_lock_bh(&csk->callback_lock); in cxgbi_bind_conn()
2705 csk->user_data = conn; in cxgbi_bind_conn()
2709 write_unlock_bh(&csk->callback_lock); in cxgbi_bind_conn()
2716 cls_session, cls_conn, ep, cconn, csk); in cxgbi_bind_conn()
2837 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev, in cxgbi_get_host_param() local
2839 if (csk) { in cxgbi_get_host_param()
2841 (struct sockaddr *)&csk->saddr); in cxgbi_get_host_param()
2862 struct cxgbi_sock *csk; in cxgbi_ep_connect() local
2880 csk = cxgbi_check_route(dst_addr, ifindex); in cxgbi_ep_connect()
2883 csk = cxgbi_check_route6(dst_addr, ifindex); in cxgbi_ep_connect()
2892 if (IS_ERR(csk)) in cxgbi_ep_connect()
2893 return (struct iscsi_endpoint *)csk; in cxgbi_ep_connect()
2894 cxgbi_sock_get(csk); in cxgbi_ep_connect()
2897 hba = csk->cdev->hbas[csk->port_id]; in cxgbi_ep_connect()
2898 else if (hba != csk->cdev->hbas[csk->port_id]) { in cxgbi_ep_connect()
2900 cxgbi_sock_put(csk); in cxgbi_ep_connect()
2901 cxgbi_sock_closed(csk); in cxgbi_ep_connect()
2909 csk->cdev->hbas[csk->port_id], csk->port_id); in cxgbi_ep_connect()
2914 err = sock_get_port(csk); in cxgbi_ep_connect()
2918 cxgbi_sock_set_state(csk, CTP_CONNECTING); in cxgbi_ep_connect()
2919 err = csk->cdev->csk_init_act_open(csk); in cxgbi_ep_connect()
2923 if (cxgbi_sock_is_closing(csk)) { in cxgbi_ep_connect()
2925 pr_info("csk 0x%p is closing.\n", csk); in cxgbi_ep_connect()
2937 cep->csk = csk; in cxgbi_ep_connect()
2942 ep, cep, csk, hba, hba->ndev->name); in cxgbi_ep_connect()
2946 cxgbi_sock_put(csk); in cxgbi_ep_connect()
2947 cxgbi_sock_closed(csk); in cxgbi_ep_connect()
2956 struct cxgbi_sock *csk = cep->csk; in cxgbi_ep_poll() local
2958 if (!cxgbi_sock_is_established(csk)) in cxgbi_ep_poll()
2968 struct cxgbi_sock *csk = cep->csk; in cxgbi_ep_disconnect() local
2972 ep, cep, cconn, csk, csk->state, csk->flags); in cxgbi_ep_disconnect()
2975 write_lock_bh(&csk->callback_lock); in cxgbi_ep_disconnect()
2976 cep->csk->user_data = NULL; in cxgbi_ep_disconnect()
2978 write_unlock_bh(&csk->callback_lock); in cxgbi_ep_disconnect()
2982 if (likely(csk->state >= CTP_ESTABLISHED)) in cxgbi_ep_disconnect()
2983 need_active_close(csk); in cxgbi_ep_disconnect()
2985 cxgbi_sock_closed(csk); in cxgbi_ep_disconnect()
2987 cxgbi_sock_put(csk); in cxgbi_ep_disconnect()