Lines Matching refs:csk

98 	struct cxgbit_sock *csk;  in cxgbit_close_conn()  local
103 list_for_each_entry(csk, &cdev->cskq.list, list) { in cxgbit_close_conn()
108 spin_lock_bh(&csk->rxq.lock); in cxgbit_close_conn()
109 __skb_queue_tail(&csk->rxq, skb); in cxgbit_close_conn()
110 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_close_conn()
112 spin_unlock_bh(&csk->rxq.lock); in cxgbit_close_conn()
115 wake_up(&csk->waitq); in cxgbit_close_conn()
173 cxgbit_process_ddpvld(struct cxgbit_sock *csk, struct cxgbit_lro_pdu_cb *pdu_cb, in cxgbit_process_ddpvld() argument
178 pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", csk->tid, ddpvld); in cxgbit_process_ddpvld()
183 pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", csk->tid, ddpvld); in cxgbit_process_ddpvld()
188 pr_info("tid 0x%x, status 0x%x, pad bad.\n", csk->tid, ddpvld); in cxgbit_process_ddpvld()
204 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, be32_to_cpu(cpl->ddpvld)); in cxgbit_lro_add_packet_rsp()
291 cxgbit_process_ddpvld(lro_cb->csk, pdu_cb, in cxgbit_lro_add_packet_gl()
316 cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, in cxgbit_lro_init_skb() argument
333 cxgbit_get_csk(csk); in cxgbit_lro_init_skb()
335 lro_cb->csk = csk; in cxgbit_lro_init_skb()
340 static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) in cxgbit_queue_lro_skb() argument
344 spin_lock(&csk->rxq.lock); in cxgbit_queue_lro_skb()
345 __skb_queue_tail(&csk->rxq, skb); in cxgbit_queue_lro_skb()
346 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_queue_lro_skb()
348 spin_unlock(&csk->rxq.lock); in cxgbit_queue_lro_skb()
351 wake_up(&csk->waitq); in cxgbit_queue_lro_skb()
357 struct cxgbit_sock *csk = lro_cb->csk; in cxgbit_lro_flush() local
359 csk->lro_skb = NULL; in cxgbit_lro_flush()
362 cxgbit_queue_lro_skb(csk, skb); in cxgbit_lro_flush()
364 cxgbit_put_csk(csk); in cxgbit_lro_flush()
379 cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, in cxgbit_lro_receive() argument
386 if (!csk) { in cxgbit_lro_receive()
391 if (csk->lro_skb) in cxgbit_lro_receive()
400 skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); in cxgbit_lro_receive()
404 csk->lro_skb = skb; in cxgbit_lro_receive()
410 skb = csk->lro_skb; in cxgbit_lro_receive()
441 struct cxgbit_sock *csk = NULL; in cxgbit_uld_lro_rx_handler() local
465 csk = lookup_tid(lldi->tids, tid); in cxgbit_uld_lro_rx_handler()
471 if (csk && csk->lro_skb && lro_flush) in cxgbit_uld_lro_rx_handler()
472 cxgbit_lro_flush(lro_mgr, csk->lro_skb); in cxgbit_uld_lro_rx_handler()
478 if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, in cxgbit_uld_lro_rx_handler()
500 if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, in cxgbit_uld_lro_rx_handler()
541 struct cxgbit_sock *csk; in cxgbit_update_dcb_priority() local
547 list_for_each_entry(csk, &cdev->cskq.list, list) { in cxgbit_update_dcb_priority()
548 if (csk->port_id != port_id) in cxgbit_update_dcb_priority()
551 if (csk->com.local_addr.ss_family == AF_INET6) { in cxgbit_update_dcb_priority()
554 sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; in cxgbit_update_dcb_priority()
559 sock_in = (struct sockaddr_in *)&csk->com.local_addr; in cxgbit_update_dcb_priority()
566 if (csk->dcb_priority == dcb_priority) in cxgbit_update_dcb_priority()
573 spin_lock(&csk->rxq.lock); in cxgbit_update_dcb_priority()
574 __skb_queue_tail(&csk->rxq, skb); in cxgbit_update_dcb_priority()
575 if (skb_queue_len(&csk->rxq) == 1) in cxgbit_update_dcb_priority()
577 spin_unlock(&csk->rxq.lock); in cxgbit_update_dcb_priority()
580 wake_up(&csk->waitq); in cxgbit_update_dcb_priority()