Lines Matching refs:csk

27 __cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)  in __cxgbit_alloc_skb()  argument
46 submode |= (csk->submode & CXGBIT_SUBMODE_DCRC); in __cxgbit_alloc_skb()
60 submode |= (csk->submode & CXGBIT_SUBMODE_HCRC); in __cxgbit_alloc_skb()
67 static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len) in cxgbit_alloc_skb() argument
69 return __cxgbit_alloc_skb(csk, len, false); in cxgbit_alloc_skb()
159 cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen, in cxgbit_tx_data_wr() argument
163 const struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_tx_data_wr()
186 req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) | in cxgbit_tx_data_wr()
201 void cxgbit_push_tx_frames(struct cxgbit_sock *csk) in cxgbit_push_tx_frames() argument
205 while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) { in cxgbit_push_tx_frames()
231 if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) { in cxgbit_push_tx_frames()
232 flowclen16 = cxgbit_send_tx_flowc_wr(csk); in cxgbit_push_tx_frames()
233 csk->wr_cred -= flowclen16; in cxgbit_push_tx_frames()
234 csk->wr_una_cred += flowclen16; in cxgbit_push_tx_frames()
237 if (csk->wr_cred < credits_needed) { in cxgbit_push_tx_frames()
239 csk, skb->len, skb->data_len, in cxgbit_push_tx_frames()
240 credits_needed, csk->wr_cred); in cxgbit_push_tx_frames()
243 __skb_unlink(skb, &csk->txq); in cxgbit_push_tx_frames()
244 set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx); in cxgbit_push_tx_frames()
246 csk->wr_cred -= credits_needed; in cxgbit_push_tx_frames()
247 csk->wr_una_cred += credits_needed; in cxgbit_push_tx_frames()
250 csk, skb->len, skb->data_len, credits_needed, in cxgbit_push_tx_frames()
251 csk->wr_cred, csk->wr_una_cred); in cxgbit_push_tx_frames()
256 if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) || in cxgbit_push_tx_frames()
257 (!before(csk->write_seq, in cxgbit_push_tx_frames()
258 csk->snd_una + csk->snd_win))) { in cxgbit_push_tx_frames()
260 csk->wr_una_cred = 0; in cxgbit_push_tx_frames()
263 cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed, in cxgbit_push_tx_frames()
265 csk->snd_nxt += len; in cxgbit_push_tx_frames()
268 (csk->wr_una_cred >= (csk->wr_max_cred / 2))) { in cxgbit_push_tx_frames()
272 csk->wr_una_cred = 0; in cxgbit_push_tx_frames()
275 cxgbit_sock_enqueue_wr(csk, skb); in cxgbit_push_tx_frames()
276 t4_set_arp_err_handler(skb, csk, in cxgbit_push_tx_frames()
280 csk, csk->tid, skb, len); in cxgbit_push_tx_frames()
282 cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t); in cxgbit_push_tx_frames()
286 static void cxgbit_unlock_sock(struct cxgbit_sock *csk) in cxgbit_unlock_sock() argument
294 spin_lock_bh(&csk->lock); in cxgbit_unlock_sock()
295 while (skb_queue_len(&csk->backlogq)) { in cxgbit_unlock_sock()
296 skb_queue_splice_init(&csk->backlogq, &backlogq); in cxgbit_unlock_sock()
297 spin_unlock_bh(&csk->lock); in cxgbit_unlock_sock()
301 fn(csk, skb); in cxgbit_unlock_sock()
304 spin_lock_bh(&csk->lock); in cxgbit_unlock_sock()
307 csk->lock_owner = false; in cxgbit_unlock_sock()
308 spin_unlock_bh(&csk->lock); in cxgbit_unlock_sock()
311 static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_queue_skb() argument
315 spin_lock_bh(&csk->lock); in cxgbit_queue_skb()
316 csk->lock_owner = true; in cxgbit_queue_skb()
317 spin_unlock_bh(&csk->lock); in cxgbit_queue_skb()
319 if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) || in cxgbit_queue_skb()
322 __skb_queue_purge(&csk->ppodq); in cxgbit_queue_skb()
327 csk->write_seq += skb->len + in cxgbit_queue_skb()
330 skb_queue_splice_tail_init(&csk->ppodq, &csk->txq); in cxgbit_queue_skb()
331 __skb_queue_tail(&csk->txq, skb); in cxgbit_queue_skb()
332 cxgbit_push_tx_frames(csk); in cxgbit_queue_skb()
335 cxgbit_unlock_sock(csk); in cxgbit_queue_skb()
393 cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsit_cmd *cmd, in cxgbit_tx_datain_iso() argument
396 struct iscsit_conn *conn = csk->conn; in cxgbit_tx_datain_iso()
410 if (num_pdu > csk->max_iso_npdu) in cxgbit_tx_datain_iso()
411 num_pdu = csk->max_iso_npdu; in cxgbit_tx_datain_iso()
417 skb = __cxgbit_alloc_skb(csk, 0, true); in cxgbit_tx_datain_iso()
423 cxgbit_skcb_submode(skb) |= (csk->submode & in cxgbit_tx_datain_iso()
464 ret = cxgbit_queue_skb(csk, skb); in cxgbit_tx_datain_iso()
484 cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsit_cmd *cmd, in cxgbit_tx_datain() argument
490 skb = cxgbit_alloc_skb(csk, 0); in cxgbit_tx_datain()
497 cxgbit_skcb_submode(skb) |= (csk->submode & in cxgbit_tx_datain()
509 return cxgbit_queue_skb(csk, skb); in cxgbit_tx_datain()
517 struct cxgbit_sock *csk = conn->context; in cxgbit_xmit_datain_pdu() local
523 (!padding) && (!datain->offset) && csk->max_iso_npdu) { in cxgbit_xmit_datain_pdu()
526 return cxgbit_tx_datain_iso(csk, cmd, dr); in cxgbit_xmit_datain_pdu()
529 return cxgbit_tx_datain(csk, cmd, datain); in cxgbit_xmit_datain_pdu()
536 struct cxgbit_sock *csk = conn->context; in cxgbit_xmit_nondatain_pdu() local
540 skb = cxgbit_alloc_skb(csk, data_buf_len + padding); in cxgbit_xmit_nondatain_pdu()
559 return cxgbit_queue_skb(csk, skb); in cxgbit_xmit_nondatain_pdu()
574 struct cxgbit_sock *csk = conn->context; in cxgbit_validate_params() local
575 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_validate_params()
596 static int cxgbit_set_digest(struct cxgbit_sock *csk) in cxgbit_set_digest() argument
598 struct iscsit_conn *conn = csk->conn; in cxgbit_set_digest()
608 csk->submode |= CXGBIT_SUBMODE_HCRC; in cxgbit_set_digest()
612 csk->submode = 0; in cxgbit_set_digest()
618 csk->submode |= CXGBIT_SUBMODE_DCRC; in cxgbit_set_digest()
620 if (cxgbit_setup_conn_digest(csk)) { in cxgbit_set_digest()
621 csk->submode = 0; in cxgbit_set_digest()
628 static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk) in cxgbit_set_iso_npdu() argument
630 struct iscsit_conn *conn = csk->conn; in cxgbit_set_iso_npdu()
654 max_iso_payload = rounddown(CXGBIT_MAX_ISO_PAYLOAD, csk->emss); in cxgbit_set_iso_npdu()
658 cxgbit_digest_len[csk->submode]); in cxgbit_set_iso_npdu()
660 csk->max_iso_npdu = min(max_npdu, max_iso_npdu); in cxgbit_set_iso_npdu()
662 if (csk->max_iso_npdu <= 1) in cxgbit_set_iso_npdu()
663 csk->max_iso_npdu = 0; in cxgbit_set_iso_npdu()
679 static int cxgbit_seq_pdu_inorder(struct cxgbit_sock *csk) in cxgbit_seq_pdu_inorder() argument
681 struct iscsit_conn *conn = csk->conn; in cxgbit_seq_pdu_inorder()
717 struct cxgbit_sock *csk = conn->context; in cxgbit_set_params() local
718 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_set_params()
719 struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm; in cxgbit_set_params()
727 if (cxgbit_set_digest(csk)) in cxgbit_set_params()
746 ret = cxgbit_seq_pdu_inorder(csk); in cxgbit_set_params()
757 if (cxgbit_set_iso_npdu(csk)) in cxgbit_set_params()
763 if (cxgbit_setup_conn_pgidx(csk, in cxgbit_set_params()
766 set_bit(CSK_DDP_ENABLE, &csk->com.flags); in cxgbit_set_params()
777 struct cxgbit_sock *csk = conn->context; in cxgbit_put_login_tx() local
782 skb = cxgbit_alloc_skb(csk, length + padding); in cxgbit_put_login_tx()
798 set_bit(CSK_LOGIN_DONE, &csk->com.flags); in cxgbit_put_login_tx()
801 if (cxgbit_queue_skb(csk, skb)) in cxgbit_put_login_tx()
835 static struct iscsit_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk) in cxgbit_allocate_cmd() argument
837 struct iscsit_conn *conn = csk->conn; in cxgbit_allocate_cmd()
838 struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev); in cxgbit_allocate_cmd()
860 struct cxgbit_sock *csk = conn->context; in cxgbit_handle_immediate_data() local
861 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_handle_immediate_data()
881 struct skb_shared_info *ssi = skb_shinfo(csk->skb); in cxgbit_handle_immediate_data()
897 cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0); in cxgbit_handle_immediate_data()
967 cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) in cxgbit_handle_scsi_cmd() argument
969 struct iscsit_conn *conn = csk->conn; in cxgbit_handle_scsi_cmd()
970 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_handle_scsi_cmd()
995 static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) in cxgbit_handle_iscsi_dataout() argument
998 struct iscsit_conn *conn = csk->conn; in cxgbit_handle_iscsi_dataout()
1002 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_handle_iscsi_dataout()
1061 cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip); in cxgbit_handle_iscsi_dataout()
1069 struct cxgbit_device *cdev = csk->com.cdev; in cxgbit_handle_iscsi_dataout()
1087 static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) in cxgbit_handle_nop_out() argument
1089 struct iscsit_conn *conn = csk->conn; in cxgbit_handle_nop_out()
1090 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_handle_nop_out()
1132 skb_copy_bits(csk->skb, pdu_cb->doffset, in cxgbit_handle_nop_out()
1155 cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsit_cmd *cmd) in cxgbit_handle_text_cmd() argument
1157 struct iscsit_conn *conn = csk->conn; in cxgbit_handle_text_cmd()
1158 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_handle_text_cmd()
1193 skb_copy_bits(csk->skb, pdu_cb->doffset, in cxgbit_handle_text_cmd()
1208 static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk) in cxgbit_target_rx_opcode() argument
1210 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_target_rx_opcode()
1212 struct iscsit_conn *conn = csk->conn; in cxgbit_target_rx_opcode()
1219 cmd = cxgbit_allocate_cmd(csk); in cxgbit_target_rx_opcode()
1223 ret = cxgbit_handle_scsi_cmd(csk, cmd); in cxgbit_target_rx_opcode()
1226 ret = cxgbit_handle_iscsi_dataout(csk); in cxgbit_target_rx_opcode()
1230 cmd = cxgbit_allocate_cmd(csk); in cxgbit_target_rx_opcode()
1235 ret = cxgbit_handle_nop_out(csk, cmd); in cxgbit_target_rx_opcode()
1238 cmd = cxgbit_allocate_cmd(csk); in cxgbit_target_rx_opcode()
1251 cmd = cxgbit_allocate_cmd(csk); in cxgbit_target_rx_opcode()
1256 ret = cxgbit_handle_text_cmd(csk, cmd); in cxgbit_target_rx_opcode()
1259 cmd = cxgbit_allocate_cmd(csk); in cxgbit_target_rx_opcode()
1286 static int cxgbit_rx_opcode(struct cxgbit_sock *csk) in cxgbit_rx_opcode() argument
1288 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_rx_opcode()
1289 struct iscsit_conn *conn = csk->conn; in cxgbit_rx_opcode()
1313 if (cxgbit_target_rx_opcode(csk) < 0) in cxgbit_rx_opcode()
1322 static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk) in cxgbit_rx_login_pdu() argument
1324 struct iscsit_conn *conn = csk->conn; in cxgbit_rx_login_pdu()
1326 struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb); in cxgbit_rx_login_pdu()
1359 skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen); in cxgbit_rx_login_pdu()
1365 cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx) in cxgbit_process_iscsi_pdu() argument
1372 csk->skb = skb; in cxgbit_process_iscsi_pdu()
1374 if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) { in cxgbit_process_iscsi_pdu()
1375 ret = cxgbit_rx_login_pdu(csk); in cxgbit_process_iscsi_pdu()
1376 set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); in cxgbit_process_iscsi_pdu()
1378 ret = cxgbit_rx_opcode(csk); in cxgbit_process_iscsi_pdu()
1395 skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen); in cxgbit_lro_skb_dump()
1408 static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk) in cxgbit_lro_hskb_reset() argument
1410 struct sk_buff *skb = csk->lro_hskb; in cxgbit_lro_hskb_reset()
1424 cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx) in cxgbit_lro_skb_merge() argument
1426 struct sk_buff *hskb = csk->lro_hskb; in cxgbit_lro_skb_merge()
1494 static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_process_lro_skb() argument
1502 cxgbit_lro_skb_merge(csk, skb, 0); in cxgbit_process_lro_skb()
1505 struct sk_buff *hskb = csk->lro_hskb; in cxgbit_process_lro_skb()
1507 ret = cxgbit_process_iscsi_pdu(csk, hskb, 0); in cxgbit_process_lro_skb()
1509 cxgbit_lro_hskb_reset(csk); in cxgbit_process_lro_skb()
1522 ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx); in cxgbit_process_lro_skb()
1528 cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx); in cxgbit_process_lro_skb()
1534 static int cxgbit_t5_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_t5_rx_lro_skb() argument
1541 (pdu_cb->seq != csk->rcv_nxt)) { in cxgbit_t5_rx_lro_skb()
1543 csk, csk->tid, pdu_cb->seq, csk->rcv_nxt); in cxgbit_t5_rx_lro_skb()
1548 csk->rcv_nxt += lro_cb->pdu_totallen; in cxgbit_t5_rx_lro_skb()
1550 ret = cxgbit_process_lro_skb(csk, skb); in cxgbit_t5_rx_lro_skb()
1552 csk->rx_credits += lro_cb->pdu_totallen; in cxgbit_t5_rx_lro_skb()
1554 if (csk->rx_credits >= (csk->rcv_win / 4)) in cxgbit_t5_rx_lro_skb()
1555 cxgbit_rx_data_ack(csk); in cxgbit_t5_rx_lro_skb()
1560 static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_rx_lro_skb() argument
1565 ret = cxgbit_process_lro_skb(csk, skb); in cxgbit_rx_lro_skb()
1569 csk->rx_credits += lro_cb->pdu_totallen; in cxgbit_rx_lro_skb()
1570 if (csk->rx_credits >= csk->rcv_win) { in cxgbit_rx_lro_skb()
1571 csk->rx_credits = 0; in cxgbit_rx_lro_skb()
1572 cxgbit_rx_data_ack(csk); in cxgbit_rx_lro_skb()
1578 static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_rx_skb() argument
1580 struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi; in cxgbit_rx_skb()
1585 ret = cxgbit_t5_rx_lro_skb(csk, skb); in cxgbit_rx_skb()
1587 ret = cxgbit_rx_lro_skb(csk, skb); in cxgbit_rx_skb()
1594 static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq) in cxgbit_rxq_len() argument
1596 spin_lock_bh(&csk->rxq.lock); in cxgbit_rxq_len()
1597 if (skb_queue_len(&csk->rxq)) { in cxgbit_rxq_len()
1598 skb_queue_splice_init(&csk->rxq, rxq); in cxgbit_rxq_len()
1599 spin_unlock_bh(&csk->rxq.lock); in cxgbit_rxq_len()
1602 spin_unlock_bh(&csk->rxq.lock); in cxgbit_rxq_len()
1606 static int cxgbit_wait_rxq(struct cxgbit_sock *csk) in cxgbit_wait_rxq() argument
1613 wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq)); in cxgbit_wait_rxq()
1619 if (cxgbit_rx_skb(csk, skb)) in cxgbit_wait_rxq()
1631 struct cxgbit_sock *csk = conn->context; in cxgbit_get_login_rx() local
1634 while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) { in cxgbit_get_login_rx()
1635 ret = cxgbit_wait_rxq(csk); in cxgbit_get_login_rx()
1637 clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags); in cxgbit_get_login_rx()
1647 struct cxgbit_sock *csk = conn->context; in cxgbit_get_rx_pdu() local
1651 if (cxgbit_wait_rxq(csk)) in cxgbit_get_rx_pdu()