Lines Matching refs:csk
208 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
211 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req()
212 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req()
215 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req()
216 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req()
220 MSS_IDX_V(csk->mss_idx) | in send_act_open_req()
221 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req()
222 TX_CHAN_V(csk->tx_chan) | in send_act_open_req()
223 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req()
225 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req()
229 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req()
238 req->local_port = csk->saddr.sin_port; in send_act_open_req()
239 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
240 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
241 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
244 csk->cdev->ports[csk->port_id], in send_act_open_req()
245 csk->l2t)); in send_act_open_req()
251 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
253 csk->atid, csk->rss_qid); in send_act_open_req()
262 req->local_port = csk->saddr.sin_port; in send_act_open_req()
263 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
264 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
265 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
269 csk->cdev->ports[csk->port_id], in send_act_open_req()
270 csk->l2t))); in send_act_open_req()
279 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
281 csk->atid, csk->rss_qid); in send_act_open_req()
290 req->local_port = csk->saddr.sin_port; in send_act_open_req()
291 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
292 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
293 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
297 csk->cdev->ports[csk->port_id], in send_act_open_req()
298 csk->l2t))); in send_act_open_req()
311 csk, &req->local_ip, ntohs(req->local_port), in send_act_open_req()
313 csk->atid, csk->rss_qid); in send_act_open_req()
316 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req()
319 (&csk->saddr), (&csk->daddr), in send_act_open_req()
320 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, in send_act_open_req()
321 csk->state, csk->flags, csk->atid, csk->rss_qid); in send_act_open_req()
323 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req()
327 static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req6() argument
330 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in send_act_open_req6()
331 int wscale = cxgbi_sock_compute_wscale(csk->mss_idx); in send_act_open_req6()
334 unsigned int qid_atid = ((unsigned int)csk->atid) | in send_act_open_req6()
335 (((unsigned int)csk->rss_qid) << 14); in send_act_open_req6()
339 MSS_IDX_V(csk->mss_idx) | in send_act_open_req6()
340 L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) | in send_act_open_req6()
341 TX_CHAN_V(csk->tx_chan) | in send_act_open_req6()
342 SMAC_SEL_V(csk->smac_idx) | in send_act_open_req6()
344 RCV_BUFSIZ_V(csk->rcv_win >> 10); in send_act_open_req6()
348 RSS_QUEUE_V(csk->rss_qid); in send_act_open_req6()
357 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
358 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
360 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
361 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
363 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
364 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
373 csk->cdev->ports[csk->port_id], in send_act_open_req6()
374 csk->l2t)); in send_act_open_req6()
382 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
383 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
384 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
385 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
387 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
388 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
396 csk->cdev->ports[csk->port_id], in send_act_open_req6()
397 csk->l2t))); in send_act_open_req6()
405 req->local_port = csk->saddr6.sin6_port; in send_act_open_req6()
406 req->peer_port = csk->daddr6.sin6_port; in send_act_open_req6()
407 req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr); in send_act_open_req6()
408 req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr + in send_act_open_req6()
410 req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr); in send_act_open_req6()
411 req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr + in send_act_open_req6()
421 csk->cdev->ports[csk->port_id], in send_act_open_req6()
422 csk->l2t))); in send_act_open_req6()
428 set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id); in send_act_open_req6()
431 CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state, in send_act_open_req6()
432 csk->flags, csk->atid, in send_act_open_req6()
433 &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port), in send_act_open_req6()
434 &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port), in send_act_open_req6()
435 csk->rss_qid); in send_act_open_req6()
437 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_act_open_req6()
441 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
443 struct sk_buff *skb = csk->cpl_close; in send_close_req()
445 unsigned int tid = csk->tid; in send_close_req()
449 csk, csk->state, csk->flags, csk->tid); in send_close_req()
450 csk->cpl_close = NULL; in send_close_req()
451 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_close_req()
456 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
457 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
458 push_tx_frames(csk, 1); in send_close_req()
463 struct cxgbi_sock *csk = (struct cxgbi_sock *)handle; in abort_arp_failure() local
468 csk, csk->state, csk->flags, csk->tid); in abort_arp_failure()
471 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in abort_arp_failure()
474 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
477 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
479 if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev) in send_abort_req()
482 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in send_abort_req()
483 send_tx_flowc_wr(csk); in send_abort_req()
484 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
487 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
488 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
489 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
491 csk->cpl_abort_req = NULL; in send_abort_req()
493 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_req()
495 t4_set_arp_err_handler(skb, csk, abort_arp_failure); in send_abort_req()
496 INIT_TP_WR(req, csk->tid); in send_abort_req()
497 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
498 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
499 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
503 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
506 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in send_abort_req()
509 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
511 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
516 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
518 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
519 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_abort_rpl()
520 INIT_TP_WR(rpl, csk->tid); in send_abort_rpl()
521 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
523 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_abort_rpl()
531 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
538 csk, csk->state, csk->flags, csk->tid, credits); in send_rx_credits()
542 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
547 set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); in send_rx_credits()
548 INIT_TP_WR(req, csk->tid); in send_rx_credits()
550 csk->tid)); in send_rx_credits()
553 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_rx_credits()
614 static inline int send_tx_flowc_wr(struct cxgbi_sock *csk) in send_tx_flowc_wr() argument
621 u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan; in send_tx_flowc_wr()
629 htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); in send_tx_flowc_wr()
631 flowc->mnemval[0].val = htonl(csk->cdev->pfvf); in send_tx_flowc_wr()
633 flowc->mnemval[1].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
635 flowc->mnemval[2].val = htonl(csk->tx_chan); in send_tx_flowc_wr()
637 flowc->mnemval[3].val = htonl(csk->rss_qid); in send_tx_flowc_wr()
639 flowc->mnemval[4].val = htonl(csk->snd_nxt); in send_tx_flowc_wr()
641 flowc->mnemval[5].val = htonl(csk->rcv_nxt); in send_tx_flowc_wr()
643 flowc->mnemval[6].val = htonl(csk->snd_win); in send_tx_flowc_wr()
645 flowc->mnemval[7].val = htonl(csk->advmss); in send_tx_flowc_wr()
649 if (csk->cdev->skb_iso_txhdr) in send_tx_flowc_wr()
657 csk->tid); in send_tx_flowc_wr()
665 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in send_tx_flowc_wr()
669 csk, csk->tid, 0, csk->tx_chan, csk->rss_qid, in send_tx_flowc_wr()
670 csk->snd_nxt, csk->rcv_nxt, csk->snd_win, in send_tx_flowc_wr()
671 csk->advmss); in send_tx_flowc_wr()
673 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in send_tx_flowc_wr()
714 cxgb4i_make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, int dlen, in cxgb4i_make_tx_data_wr() argument
717 struct cxgbi_device *cdev = csk->cdev; in cxgb4i_make_tx_data_wr()
743 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | in cxgb4i_make_tx_data_wr()
758 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) in cxgb4i_make_tx_data_wr()
759 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in cxgb4i_make_tx_data_wr()
767 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
772 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
773 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
777 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
781 while (csk->wr_cred && ((skb = skb_peek(&csk->write_queue)) != NULL)) { in push_tx_frames()
808 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in push_tx_frames()
809 flowclen16 = send_tx_flowc_wr(csk); in push_tx_frames()
810 csk->wr_cred -= flowclen16; in push_tx_frames()
811 csk->wr_una_cred += flowclen16; in push_tx_frames()
812 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in push_tx_frames()
815 if (csk->wr_cred < credits_needed) { in push_tx_frames()
818 csk, skb->len, skb->data_len, in push_tx_frames()
819 credits_needed, csk->wr_cred); in push_tx_frames()
821 csk->no_tx_credits++; in push_tx_frames()
825 csk->no_tx_credits = 0; in push_tx_frames()
827 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
828 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in push_tx_frames()
830 csk->wr_cred -= credits_needed; in push_tx_frames()
831 csk->wr_una_cred += credits_needed; in push_tx_frames()
832 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
836 csk, skb->len, skb->data_len, credits_needed, in push_tx_frames()
837 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
840 ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || in push_tx_frames()
841 after(csk->write_seq, (csk->snd_una + csk->snd_win / 2)))) in push_tx_frames()
857 cxgb4i_make_tx_data_wr(csk, skb, dlen, len, in push_tx_frames()
859 csk->snd_nxt += len; in push_tx_frames()
862 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { in push_tx_frames()
870 t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard); in push_tx_frames()
874 csk, csk->state, csk->flags, csk->tid, skb, len); in push_tx_frames()
875 cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t); in push_tx_frames()
880 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
882 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid()
884 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
885 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid()
886 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
887 cxgbi_sock_put(csk); in free_atid()
893 struct cxgbi_sock *csk; in do_act_establish() local
902 csk = lookup_atid(t, atid); in do_act_establish()
903 if (unlikely(!csk)) { in do_act_establish()
908 if (csk->atid != atid) { in do_act_establish()
910 atid, csk, csk->state, csk->flags, csk->tid, csk->atid); in do_act_establish()
915 (&csk->saddr), (&csk->daddr), in do_act_establish()
916 atid, tid, csk, csk->state, csk->flags, rcv_isn); in do_act_establish()
920 cxgbi_sock_get(csk); in do_act_establish()
921 csk->tid = tid; in do_act_establish()
922 cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family); in do_act_establish()
923 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
925 free_atid(csk); in do_act_establish()
927 spin_lock_bh(&csk->lock); in do_act_establish()
928 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
930 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
932 if (csk->retry_timer.function) { in do_act_establish()
933 del_timer(&csk->retry_timer); in do_act_establish()
934 csk->retry_timer.function = NULL; in do_act_establish()
937 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
942 if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10)) in do_act_establish()
943 csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10); in do_act_establish()
945 csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40; in do_act_establish()
947 csk->advmss -= 12; in do_act_establish()
948 if (csk->advmss < 128) in do_act_establish()
949 csk->advmss = 128; in do_act_establish()
953 csk, TCPOPT_MSS_G(tcp_opt), csk->advmss); in do_act_establish()
955 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
957 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
958 send_abort_req(csk); in do_act_establish()
960 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
961 push_tx_frames(csk, 0); in do_act_establish()
962 cxgbi_conn_tx_open(csk); in do_act_establish()
964 spin_unlock_bh(&csk->lock); in do_act_establish()
991 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); in csk_act_open_retry_timer() local
992 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in csk_act_open_retry_timer()
999 csk, csk->state, csk->flags, csk->tid); in csk_act_open_retry_timer()
1001 cxgbi_sock_get(csk); in csk_act_open_retry_timer()
1002 spin_lock_bh(&csk->lock); in csk_act_open_retry_timer()
1012 if (csk->csk_family == AF_INET) { in csk_act_open_retry_timer()
1023 cxgbi_sock_fail_act_open(csk, -ENOMEM); in csk_act_open_retry_timer()
1025 skb->sk = (struct sock *)csk; in csk_act_open_retry_timer()
1026 t4_set_arp_err_handler(skb, csk, in csk_act_open_retry_timer()
1028 send_act_open_func(csk, skb, csk->l2t); in csk_act_open_retry_timer()
1031 spin_unlock_bh(&csk->lock); in csk_act_open_retry_timer()
1032 cxgbi_sock_put(csk); in csk_act_open_retry_timer()
1045 struct cxgbi_sock *csk; in do_act_open_rpl() local
1054 csk = lookup_atid(t, atid); in do_act_open_rpl()
1055 if (unlikely(!csk)) { in do_act_open_rpl()
1061 "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr), in do_act_open_rpl()
1062 atid, tid, status, csk, csk->state, csk->flags); in do_act_open_rpl()
1072 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl), in do_act_open_rpl()
1073 csk->csk_family); in do_act_open_rpl()
1075 cxgbi_sock_get(csk); in do_act_open_rpl()
1076 spin_lock_bh(&csk->lock); in do_act_open_rpl()
1079 csk->retry_timer.function != csk_act_open_retry_timer) { in do_act_open_rpl()
1080 csk->retry_timer.function = csk_act_open_retry_timer; in do_act_open_rpl()
1081 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
1083 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
1086 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
1087 cxgbi_sock_put(csk); in do_act_open_rpl()
1094 struct cxgbi_sock *csk; in do_peer_close() local
1100 csk = lookup_tid(t, tid); in do_peer_close()
1101 if (unlikely(!csk)) { in do_peer_close()
1106 (&csk->saddr), (&csk->daddr), in do_peer_close()
1107 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
1108 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
1115 struct cxgbi_sock *csk; in do_close_con_rpl() local
1121 csk = lookup_tid(t, tid); in do_close_con_rpl()
1122 if (unlikely(!csk)) { in do_close_con_rpl()
1127 (&csk->saddr), (&csk->daddr), in do_close_con_rpl()
1128 csk, csk->state, csk->flags, csk->tid); in do_close_con_rpl()
1129 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
1134 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
1140 return csk->state > CTP_ESTABLISHED ? in abort_status_to_errno()
1154 struct cxgbi_sock *csk; in do_abort_req_rss() local
1161 csk = lookup_tid(t, tid); in do_abort_req_rss()
1162 if (unlikely(!csk)) { in do_abort_req_rss()
1168 (&csk->saddr), (&csk->daddr), in do_abort_req_rss()
1169 csk, csk->state, csk->flags, csk->tid, req->status); in do_abort_req_rss()
1174 cxgbi_sock_get(csk); in do_abort_req_rss()
1175 spin_lock_bh(&csk->lock); in do_abort_req_rss()
1177 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
1179 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in do_abort_req_rss()
1180 send_tx_flowc_wr(csk); in do_abort_req_rss()
1181 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in do_abort_req_rss()
1184 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req_rss()
1185 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req_rss()
1187 send_abort_rpl(csk, rst_status); in do_abort_req_rss()
1189 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req_rss()
1190 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req_rss()
1191 cxgbi_sock_closed(csk); in do_abort_req_rss()
1194 spin_unlock_bh(&csk->lock); in do_abort_req_rss()
1195 cxgbi_sock_put(csk); in do_abort_req_rss()
1202 struct cxgbi_sock *csk; in do_abort_rpl_rss() local
1208 csk = lookup_tid(t, tid); in do_abort_rpl_rss()
1209 if (!csk) in do_abort_rpl_rss()
1213 (&csk->saddr), (&csk->daddr), csk, in do_abort_rpl_rss()
1214 csk->state, csk->flags, csk->tid, rpl->status); in do_abort_rpl_rss()
1219 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl_rss()
1226 struct cxgbi_sock *csk; in do_rx_data() local
1232 csk = lookup_tid(t, tid); in do_rx_data()
1233 if (!csk) { in do_rx_data()
1237 pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid); in do_rx_data()
1238 spin_lock_bh(&csk->lock); in do_rx_data()
1239 send_abort_req(csk); in do_rx_data()
1240 spin_unlock_bh(&csk->lock); in do_rx_data()
1247 struct cxgbi_sock *csk; in do_rx_iscsi_hdr() local
1254 csk = lookup_tid(t, tid); in do_rx_iscsi_hdr()
1255 if (unlikely(!csk)) { in do_rx_iscsi_hdr()
1262 csk, csk->state, csk->flags, csk->tid, skb, skb->len, in do_rx_iscsi_hdr()
1265 spin_lock_bh(&csk->lock); in do_rx_iscsi_hdr()
1267 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_hdr()
1270 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_hdr()
1271 if (csk->state != CTP_ABORTING) in do_rx_iscsi_hdr()
1284 if (!csk->skb_ulp_lhdr) { in do_rx_iscsi_hdr()
1290 csk, csk->state, csk->flags, csk->tid, skb); in do_rx_iscsi_hdr()
1291 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_hdr()
1295 (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) { in do_rx_iscsi_hdr()
1297 csk->tid, cxgbi_skcb_tcp_seq(skb), in do_rx_iscsi_hdr()
1298 csk->rcv_nxt); in do_rx_iscsi_hdr()
1313 csk->tid, plen, hlen, dlen, in do_rx_iscsi_hdr()
1320 cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len; in do_rx_iscsi_hdr()
1321 csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb); in do_rx_iscsi_hdr()
1325 csk, skb, *bhs, hlen, dlen, in do_rx_iscsi_hdr()
1330 struct sk_buff *lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_hdr()
1335 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_hdr()
1338 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_hdr()
1339 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1343 send_abort_req(csk); in do_rx_iscsi_hdr()
1345 spin_unlock_bh(&csk->lock); in do_rx_iscsi_hdr()
1352 struct cxgbi_sock *csk; in do_rx_iscsi_data() local
1360 csk = lookup_tid(t, tid); in do_rx_iscsi_data()
1361 if (unlikely(!csk)) { in do_rx_iscsi_data()
1368 csk, csk->state, csk->flags, csk->tid, skb, in do_rx_iscsi_data()
1371 spin_lock_bh(&csk->lock); in do_rx_iscsi_data()
1373 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_data()
1376 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_data()
1378 if (csk->state != CTP_ABORTING) in do_rx_iscsi_data()
1391 if (!csk->skb_ulp_lhdr) in do_rx_iscsi_data()
1392 csk->skb_ulp_lhdr = skb; in do_rx_iscsi_data()
1394 lskb = csk->skb_ulp_lhdr; in do_rx_iscsi_data()
1399 csk, csk->state, csk->flags, skb, lskb); in do_rx_iscsi_data()
1401 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_data()
1402 spin_unlock_bh(&csk->lock); in do_rx_iscsi_data()
1406 send_abort_req(csk); in do_rx_iscsi_data()
1408 spin_unlock_bh(&csk->lock); in do_rx_iscsi_data()
1414 cxgb4i_process_ddpvld(struct cxgbi_sock *csk, in cxgb4i_process_ddpvld() argument
1419 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); in cxgb4i_process_ddpvld()
1425 csk, skb, ddpvld, cxgbi_skcb_flags(skb)); in cxgb4i_process_ddpvld()
1432 csk, skb, ddpvld); in cxgb4i_process_ddpvld()
1440 csk, skb, ddpvld); in cxgb4i_process_ddpvld()
1448 struct cxgbi_sock *csk; in do_rx_data_ddp() local
1456 csk = lookup_tid(t, tid); in do_rx_data_ddp()
1457 if (unlikely(!csk)) { in do_rx_data_ddp()
1464 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr); in do_rx_data_ddp()
1466 spin_lock_bh(&csk->lock); in do_rx_data_ddp()
1468 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_data_ddp()
1471 csk, csk->state, csk->flags, csk->tid); in do_rx_data_ddp()
1472 if (csk->state != CTP_ABORTING) in do_rx_data_ddp()
1478 if (!csk->skb_ulp_lhdr) { in do_rx_data_ddp()
1479 pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid); in do_rx_data_ddp()
1483 lskb = csk->skb_ulp_lhdr; in do_rx_data_ddp()
1484 csk->skb_ulp_lhdr = NULL; in do_rx_data_ddp()
1490 csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb)); in do_rx_data_ddp()
1492 cxgb4i_process_ddpvld(csk, lskb, ddpvld); in do_rx_data_ddp()
1496 csk, lskb, cxgbi_skcb_flags(lskb)); in do_rx_data_ddp()
1499 cxgbi_conn_pdu_ready(csk); in do_rx_data_ddp()
1500 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1504 send_abort_req(csk); in do_rx_data_ddp()
1506 spin_unlock_bh(&csk->lock); in do_rx_data_ddp()
1514 struct cxgbi_sock *csk; in do_rx_iscsi_cmp() local
1524 csk = lookup_tid(t, tid); in do_rx_iscsi_cmp()
1525 if (unlikely(!csk)) { in do_rx_iscsi_cmp()
1533 csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr, in do_rx_iscsi_cmp()
1536 spin_lock_bh(&csk->lock); in do_rx_iscsi_cmp()
1538 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_rx_iscsi_cmp()
1541 csk, csk->state, csk->flags, csk->tid); in do_rx_iscsi_cmp()
1543 if (csk->state != CTP_ABORTING) in do_rx_iscsi_cmp()
1557 csk->rcv_nxt = seq + pdu_len_ddp; in do_rx_iscsi_cmp()
1559 if (csk->skb_ulp_lhdr) { in do_rx_iscsi_cmp()
1560 data_skb = skb_peek(&csk->receive_queue); in do_rx_iscsi_cmp()
1568 __skb_unlink(data_skb, &csk->receive_queue); in do_rx_iscsi_cmp()
1572 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_cmp()
1573 __skb_queue_tail(&csk->receive_queue, data_skb); in do_rx_iscsi_cmp()
1575 __skb_queue_tail(&csk->receive_queue, skb); in do_rx_iscsi_cmp()
1578 csk->skb_ulp_lhdr = NULL; in do_rx_iscsi_cmp()
1585 cxgb4i_process_ddpvld(csk, skb, ddpvld); in do_rx_iscsi_cmp()
1588 csk, skb, cxgbi_skcb_flags(skb)); in do_rx_iscsi_cmp()
1590 cxgbi_conn_pdu_ready(csk); in do_rx_iscsi_cmp()
1591 spin_unlock_bh(&csk->lock); in do_rx_iscsi_cmp()
1596 send_abort_req(csk); in do_rx_iscsi_cmp()
1598 spin_unlock_bh(&csk->lock); in do_rx_iscsi_cmp()
1605 struct cxgbi_sock *csk; in do_fw4_ack() local
1611 csk = lookup_tid(t, tid); in do_fw4_ack()
1612 if (unlikely(!csk)) in do_fw4_ack()
1617 csk, csk->state, csk->flags, csk->tid); in do_fw4_ack()
1618 cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una), in do_fw4_ack()
1630 struct cxgbi_sock *csk; in do_set_tcb_rpl() local
1632 csk = lookup_tid(t, tid); in do_set_tcb_rpl()
1633 if (!csk) { in do_set_tcb_rpl()
1640 csk, csk->state, csk->flags, csk->tid, rpl->status); in do_set_tcb_rpl()
1644 csk, tid, rpl->status); in do_set_tcb_rpl()
1645 csk->err = -EINVAL; in do_set_tcb_rpl()
1648 complete(&csk->cmpl); in do_set_tcb_rpl()
1653 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
1655 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), in alloc_cpls()
1657 if (!csk->cpl_close) in alloc_cpls()
1660 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), in alloc_cpls()
1662 if (!csk->cpl_abort_req) in alloc_cpls()
1665 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), in alloc_cpls()
1667 if (!csk->cpl_abort_rpl) in alloc_cpls()
1672 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
1676 static inline void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
1678 if (csk->l2t) { in l2t_put()
1679 cxgb4_l2t_release(csk->l2t); in l2t_put()
1680 csk->l2t = NULL; in l2t_put()
1681 cxgbi_sock_put(csk); in l2t_put()
1685 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
1689 struct net_device *ndev = csk->cdev->ports[csk->port_id]; in release_offload_resources()
1694 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
1696 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
1697 cxgbi_sock_purge_write_queue(csk); in release_offload_resources()
1698 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
1699 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
1700 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
1703 l2t_put(csk); in release_offload_resources()
1705 if (csk->csk_family == AF_INET6) in release_offload_resources()
1707 (const u32 *)&csk->saddr6.sin6_addr, 1); in release_offload_resources()
1710 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
1711 free_atid(csk); in release_offload_resources()
1712 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
1713 lldi = cxgbi_cdev_priv(csk->cdev); in release_offload_resources()
1714 cxgb4_remove_tid(lldi->tids, 0, csk->tid, in release_offload_resources()
1715 csk->csk_family); in release_offload_resources()
1716 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
1717 cxgbi_sock_put(csk); in release_offload_resources()
1719 csk->dst = NULL; in release_offload_resources()
1766 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
1768 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
1770 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
1784 csk, csk->state, csk->flags, csk->tid); in init_act_open()
1786 if (csk->csk_family == AF_INET) in init_act_open()
1787 daddr = &csk->daddr.sin_addr.s_addr; in init_act_open()
1789 else if (csk->csk_family == AF_INET6) in init_act_open()
1790 daddr = &csk->daddr6.sin6_addr; in init_act_open()
1793 pr_err("address family 0x%x not supported\n", csk->csk_family); in init_act_open()
1797 n = dst_neigh_lookup(csk->dst, daddr); in init_act_open()
1807 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); in init_act_open()
1808 if (csk->atid < 0) { in init_act_open()
1812 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
1813 cxgbi_sock_get(csk); in init_act_open()
1819 csk->dcb_priority = priority; in init_act_open()
1820 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority); in init_act_open()
1822 csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0); in init_act_open()
1824 if (!csk->l2t) { in init_act_open()
1828 cxgbi_sock_get(csk); in init_act_open()
1831 if (csk->csk_family == AF_INET6) in init_act_open()
1832 cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
1846 if (csk->csk_family == AF_INET) in init_act_open()
1855 skb->sk = (struct sock *)csk; in init_act_open()
1856 t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure); in init_act_open()
1858 if (!csk->mtu) in init_act_open()
1859 csk->mtu = dst_mtu(csk->dst); in init_act_open()
1860 cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx); in init_act_open()
1861 csk->tx_chan = cxgb4_port_chan(ndev); in init_act_open()
1862 csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; in init_act_open()
1864 csk->txq_idx = cxgb4_port_idx(ndev) * step; in init_act_open()
1868 csk->rss_qid = lldi->rxq_ids[rxq_idx]; in init_act_open()
1870 csk->snd_win = cxgb4i_snd_win; in init_act_open()
1871 csk->rcv_win = cxgb4i_rcv_win; in init_act_open()
1873 csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN; in init_act_open()
1876 csk->rcv_win *= rcv_winf; in init_act_open()
1879 csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN; in init_act_open()
1882 csk->snd_win *= snd_winf; in init_act_open()
1884 csk->wr_cred = lldi->wr_cred - in init_act_open()
1886 csk->wr_max_cred = csk->wr_cred; in init_act_open()
1887 csk->wr_una_cred = 0; in init_act_open()
1888 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1889 csk->err = 0; in init_act_open()
1892 (&csk->saddr), (&csk->daddr), csk, csk->state, in init_act_open()
1893 csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid, in init_act_open()
1894 csk->mtu, csk->mss_idx, csk->smac_idx); in init_act_open()
1902 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1903 if (csk->csk_family == AF_INET) in init_act_open()
1904 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1907 send_act_open_req6(csk, skb, csk->l2t); in init_act_open()
1915 if (csk->csk_family == AF_INET6) in init_act_open()
1917 (const u32 *)&csk->saddr6.sin6_addr, 1); in init_act_open()
2016 static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, in ddp_ppod_write_idata() argument
2022 struct cxgbi_device *cdev = csk->cdev; in ddp_ppod_write_idata()
2024 csk->tid); in ddp_ppod_write_idata()
2042 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id); in ddp_ppod_write_idata()
2044 spin_lock_bh(&csk->lock); in ddp_ppod_write_idata()
2045 cxgbi_sock_skb_entail(csk, skb); in ddp_ppod_write_idata()
2046 spin_unlock_bh(&csk->lock); in ddp_ppod_write_idata()
2051 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, in ddp_set_map() argument
2061 ttinfo->cid = csk->port_id; in ddp_set_map()
2068 err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt, in ddp_set_map()
2077 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_pgidx() argument
2092 INIT_TP_WR(req, csk->tid); in ddp_setup_conn_pgidx()
2093 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid)); in ddp_setup_conn_pgidx()
2094 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_pgidx()
2098 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_pgidx()
2101 "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx); in ddp_setup_conn_pgidx()
2103 reinit_completion(&csk->cmpl); in ddp_setup_conn_pgidx()
2104 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_pgidx()
2105 wait_for_completion(&csk->cmpl); in ddp_setup_conn_pgidx()
2107 return csk->err; in ddp_setup_conn_pgidx()
2110 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
2123 csk->hcrc_len = (hcrc ? 4 : 0); in ddp_setup_conn_digest()
2124 csk->dcrc_len = (dcrc ? 4 : 0); in ddp_setup_conn_digest()
2129 req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid)); in ddp_setup_conn_digest()
2134 set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id); in ddp_setup_conn_digest()
2137 "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc); in ddp_setup_conn_digest()
2139 reinit_completion(&csk->cmpl); in ddp_setup_conn_digest()
2140 cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); in ddp_setup_conn_digest()
2141 wait_for_completion(&csk->cmpl); in ddp_setup_conn_digest()
2143 return csk->err; in ddp_setup_conn_digest()
2429 struct cxgbi_sock *csk = pmap->port_csk[i]; in cxgb4_dcb_change_notify() local
2431 if (csk->dcb_priority != priority) { in cxgb4_dcb_change_notify()
2432 iscsi_conn_failure(csk->user_data, in cxgb4_dcb_change_notify()
2435 "priority %u->%u.\n", csk, in cxgb4_dcb_change_notify()
2436 csk->dcb_priority, priority); in cxgb4_dcb_change_notify()