Lines Matching refs:cp

65 void rds_send_path_reset(struct rds_conn_path *cp)  in rds_send_path_reset()  argument
70 if (cp->cp_xmit_rm) { in rds_send_path_reset()
71 rm = cp->cp_xmit_rm; in rds_send_path_reset()
72 cp->cp_xmit_rm = NULL; in rds_send_path_reset()
81 cp->cp_xmit_sg = 0; in rds_send_path_reset()
82 cp->cp_xmit_hdr_off = 0; in rds_send_path_reset()
83 cp->cp_xmit_data_off = 0; in rds_send_path_reset()
84 cp->cp_xmit_atomic_sent = 0; in rds_send_path_reset()
85 cp->cp_xmit_rdma_sent = 0; in rds_send_path_reset()
86 cp->cp_xmit_data_sent = 0; in rds_send_path_reset()
88 cp->cp_conn->c_map_queued = 0; in rds_send_path_reset()
90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; in rds_send_path_reset()
91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; in rds_send_path_reset()
94 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_reset()
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_reset()
99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); in rds_send_path_reset()
100 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_reset()
104 static int acquire_in_xmit(struct rds_conn_path *cp) in acquire_in_xmit() argument
106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; in acquire_in_xmit()
109 static void release_in_xmit(struct rds_conn_path *cp) in release_in_xmit() argument
111 clear_bit(RDS_IN_XMIT, &cp->cp_flags); in release_in_xmit()
119 if (waitqueue_active(&cp->cp_waitq)) in release_in_xmit()
120 wake_up_all(&cp->cp_waitq); in release_in_xmit()
137 int rds_send_xmit(struct rds_conn_path *cp) in rds_send_xmit() argument
139 struct rds_connection *conn = cp->cp_conn; in rds_send_xmit()
160 if (!acquire_in_xmit(cp)) { in rds_send_xmit()
166 if (rds_destroy_pending(cp->cp_conn)) { in rds_send_xmit()
167 release_in_xmit(cp); in rds_send_xmit()
180 send_gen = READ_ONCE(cp->cp_send_gen) + 1; in rds_send_xmit()
181 WRITE_ONCE(cp->cp_send_gen, send_gen); in rds_send_xmit()
187 if (!rds_conn_path_up(cp)) { in rds_send_xmit()
188 release_in_xmit(cp); in rds_send_xmit()
194 conn->c_trans->xmit_path_prepare(cp); in rds_send_xmit()
202 rm = cp->cp_xmit_rm; in rds_send_xmit()
226 rm->m_inc.i_conn_path = cp; in rds_send_xmit()
227 rm->m_inc.i_conn = cp->cp_conn; in rds_send_xmit()
229 cp->cp_xmit_rm = rm; in rds_send_xmit()
252 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
254 if (!list_empty(&cp->cp_send_queue)) { in rds_send_xmit()
255 rm = list_entry(cp->cp_send_queue.next, in rds_send_xmit()
265 &cp->cp_retrans); in rds_send_xmit()
268 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
283 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_xmit()
286 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_xmit()
292 if (cp->cp_unacked_packets == 0 || in rds_send_xmit()
293 cp->cp_unacked_bytes < len) { in rds_send_xmit()
296 cp->cp_unacked_packets = in rds_send_xmit()
298 cp->cp_unacked_bytes = in rds_send_xmit()
302 cp->cp_unacked_bytes -= len; in rds_send_xmit()
303 cp->cp_unacked_packets--; in rds_send_xmit()
306 cp->cp_xmit_rm = rm; in rds_send_xmit()
310 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { in rds_send_xmit()
322 cp->cp_xmit_rdma_sent = 1; in rds_send_xmit()
326 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { in rds_send_xmit()
338 cp->cp_xmit_atomic_sent = 1; in rds_send_xmit()
364 if (rm->data.op_active && !cp->cp_xmit_data_sent) { in rds_send_xmit()
368 cp->cp_xmit_hdr_off, in rds_send_xmit()
369 cp->cp_xmit_sg, in rds_send_xmit()
370 cp->cp_xmit_data_off); in rds_send_xmit()
374 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { in rds_send_xmit()
377 cp->cp_xmit_hdr_off); in rds_send_xmit()
378 cp->cp_xmit_hdr_off += tmp; in rds_send_xmit()
382 sg = &rm->data.op_sg[cp->cp_xmit_sg]; in rds_send_xmit()
385 cp->cp_xmit_data_off); in rds_send_xmit()
386 cp->cp_xmit_data_off += tmp; in rds_send_xmit()
388 if (cp->cp_xmit_data_off == sg->length) { in rds_send_xmit()
389 cp->cp_xmit_data_off = 0; in rds_send_xmit()
391 cp->cp_xmit_sg++; in rds_send_xmit()
392 BUG_ON(ret != 0 && cp->cp_xmit_sg == in rds_send_xmit()
397 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && in rds_send_xmit()
398 (cp->cp_xmit_sg == rm->data.op_nents)) in rds_send_xmit()
399 cp->cp_xmit_data_sent = 1; in rds_send_xmit()
407 if (!rm->data.op_active || cp->cp_xmit_data_sent) { in rds_send_xmit()
408 cp->cp_xmit_rm = NULL; in rds_send_xmit()
409 cp->cp_xmit_sg = 0; in rds_send_xmit()
410 cp->cp_xmit_hdr_off = 0; in rds_send_xmit()
411 cp->cp_xmit_data_off = 0; in rds_send_xmit()
412 cp->cp_xmit_rdma_sent = 0; in rds_send_xmit()
413 cp->cp_xmit_atomic_sent = 0; in rds_send_xmit()
414 cp->cp_xmit_data_sent = 0; in rds_send_xmit()
422 conn->c_trans->xmit_path_complete(cp); in rds_send_xmit()
423 release_in_xmit(cp); in rds_send_xmit()
452 raced = send_gen != READ_ONCE(cp->cp_send_gen); in rds_send_xmit()
455 !list_empty(&cp->cp_send_queue)) && !raced) { in rds_send_xmit()
459 if (rds_destroy_pending(cp->cp_conn)) in rds_send_xmit()
462 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_xmit()
686 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, in rds_send_path_drop_acked() argument
693 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_path_drop_acked()
695 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { in rds_send_path_drop_acked()
707 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_path_drop_acked()
726 struct rds_conn_path *cp; in rds_send_drop_to() local
757 cp = rm->m_inc.i_conn_path; in rds_send_drop_to()
759 cp = &conn->c_path[0]; in rds_send_drop_to()
761 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_drop_to()
768 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
772 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_drop_to()
819 struct rds_conn_path *cp, in rds_send_queue_rm() argument
864 rm->m_inc.i_conn_path = cp; in rds_send_queue_rm()
867 spin_lock(&cp->cp_lock); in rds_send_queue_rm()
868 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); in rds_send_queue_rm()
869 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_queue_rm()
871 spin_unlock(&cp->cp_lock); in rds_send_queue_rm()
1426 rds_send_probe(struct rds_conn_path *cp, __be16 sport, in rds_send_probe() argument
1439 rm->m_daddr = cp->cp_conn->c_faddr; in rds_send_probe()
1442 rds_conn_path_connect_if_down(cp); in rds_send_probe()
1444 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); in rds_send_probe()
1448 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_probe()
1449 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); in rds_send_probe()
1452 rm->m_inc.i_conn = cp->cp_conn; in rds_send_probe()
1453 rm->m_inc.i_conn_path = cp; in rds_send_probe()
1456 cp->cp_next_tx_seq); in rds_send_probe()
1458 cp->cp_next_tx_seq++; in rds_send_probe()
1461 cp->cp_conn->c_trans->t_mp_capable) { in rds_send_probe()
1463 u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); in rds_send_probe()
1473 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_probe()
1480 if (!rds_destroy_pending(cp->cp_conn)) in rds_send_probe()
1481 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); in rds_send_probe()
1494 rds_send_pong(struct rds_conn_path *cp, __be16 dport) in rds_send_pong() argument
1496 return rds_send_probe(cp, 0, dport, 0); in rds_send_pong()
1503 struct rds_conn_path *cp = &conn->c_path[cp_index]; in rds_send_ping() local
1505 spin_lock_irqsave(&cp->cp_lock, flags); in rds_send_ping()
1507 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1511 spin_unlock_irqrestore(&cp->cp_lock, flags); in rds_send_ping()
1512 rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0); in rds_send_ping()