| /net/ipv4/ |
| A D | tcp_veno.c | 119 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_veno_cong_avoid() argument 125 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid() 138 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid() 157 acked = tcp_slow_start(tp, acked); in tcp_veno_cong_avoid() 158 if (!acked) in tcp_veno_cong_avoid() 167 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_veno_cong_avoid() 181 tp->snd_cwnd_cnt += acked; in tcp_veno_cong_avoid()
|
| A D | tcp_scalable.c | 18 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_scalable_cong_avoid() argument 26 acked = tcp_slow_start(tp, acked); in tcp_scalable_cong_avoid() 27 if (!acked) in tcp_scalable_cong_avoid() 31 acked); in tcp_scalable_cong_avoid()
|
| A D | tcp_cong.c | 455 __bpf_kfunc u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) in tcp_slow_start() argument 457 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); in tcp_slow_start() 459 acked -= cwnd - tcp_snd_cwnd(tp); in tcp_slow_start() 462 return acked; in tcp_slow_start() 469 __bpf_kfunc void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) in tcp_cong_avoid_ai() argument 477 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai() 495 __bpf_kfunc void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_reno_cong_avoid() argument 504 acked = tcp_slow_start(tp, acked); in tcp_reno_cong_avoid() 505 if (!acked) in tcp_reno_cong_avoid() 509 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_reno_cong_avoid()
|
| A D | tcp_yeah.c | 58 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_yeah_cong_avoid() argument 67 acked = tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid() 68 if (!acked) in tcp_yeah_cong_avoid() 75 acked); in tcp_yeah_cong_avoid() 78 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_yeah_cong_avoid()
|
| A D | tcp_illinois.c | 52 u16 acked; /* # packets acked by current ACK */ member 78 ca->acked = 0; in tcp_illinois_init() 91 ca->acked = sample->pkts_acked; in tcp_illinois_acked() 260 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_illinois_cong_avoid() argument 274 tcp_slow_start(tp, acked); in tcp_illinois_cong_avoid() 280 tp->snd_cwnd_cnt += ca->acked; in tcp_illinois_cong_avoid() 281 ca->acked = 1; in tcp_illinois_cong_avoid()
|
| A D | tcp_cubic.c | 214 static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) in bictcp_update() argument 219 ca->ack_cnt += acked; /* count the number of ACKed packets */ in bictcp_update() 237 ca->ack_cnt = acked; /* start counting */ in bictcp_update() 324 __bpf_kfunc static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in cubictcp_cong_avoid() argument 333 acked = tcp_slow_start(tp, acked); in cubictcp_cong_avoid() 334 if (!acked) in cubictcp_cong_avoid() 337 bictcp_update(ca, tcp_snd_cwnd(tp), acked); in cubictcp_cong_avoid() 338 tcp_cong_avoid_ai(tp, ca->cnt, acked); in cubictcp_cong_avoid()
|
| A D | tcp_bic.c | 140 static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bictcp_cong_avoid() argument 149 acked = tcp_slow_start(tp, acked); in bictcp_cong_avoid() 150 if (!acked) in bictcp_cong_avoid() 154 tcp_cong_avoid_ai(tp, ca->cnt, acked); in bictcp_cong_avoid()
|
| A D | tcp_vegas.c | 165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_vegas_cong_avoid() argument 171 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid() 196 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid() 247 tcp_slow_start(tp, acked); in tcp_vegas_cong_avoid() 287 tcp_slow_start(tp, acked); in tcp_vegas_cong_avoid()
|
| A D | tcp_nv.c | 180 static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcpnv_cong_avoid() argument 194 acked = tcp_slow_start(tp, acked); in tcpnv_cong_avoid() 195 if (!acked) in tcpnv_cong_avoid() 201 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid() 204 tcp_cong_avoid_ai(tp, cnt, acked); in tcpnv_cong_avoid()
|
| A D | tcp_highspeed.c | 112 static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hstcp_cong_avoid() argument 121 tcp_slow_start(tp, acked); in hstcp_cong_avoid()
|
| A D | tcp_bbr.c | 481 struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd) in bbr_set_cwnd_to_recover_or_restore() argument 500 cwnd = tcp_packets_in_flight(tp) + acked; in bbr_set_cwnd_to_recover_or_restore() 509 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); in bbr_set_cwnd_to_recover_or_restore() 520 u32 acked, u32 bw, int gain) in bbr_set_cwnd() argument 526 if (!acked) in bbr_set_cwnd() 529 if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd)) in bbr_set_cwnd() 542 cwnd = min(cwnd + acked, target_cwnd); in bbr_set_cwnd() 544 cwnd = cwnd + acked; in bbr_set_cwnd()
|
| A D | tcp_hybla.c | 90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hybla_cong_avoid() argument 107 tcp_reno_cong_avoid(sk, ack, acked); in hybla_cong_avoid()
|
| A D | tcp_lp.c | 121 static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_lp_cong_avoid() argument 126 tcp_reno_cong_avoid(sk, ack, acked); in tcp_lp_cong_avoid()
|
| A D | tcp_htcp.c | 231 static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in htcp_cong_avoid() argument 240 tcp_slow_start(tp, acked); in htcp_cong_avoid()
|
| A D | tcp_cdg.c | 262 static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cdg_cong_avoid() argument 293 tcp_reno_cong_avoid(sk, ack, acked); in tcp_cdg_cong_avoid()
|
| A D | bpf_tcp_ca.c | 263 static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bpf_tcp_ca_cong_avoid() argument
|
| A D | tcp_minisocks.c | 863 inet_rsk(req)->acked = 1; in tcp_check_req() 900 inet_rsk(req)->acked = 1; in tcp_check_req()
|
| A D | inet_connection_sock.c | 878 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); in syn_ack_recalc() 883 *resend = !inet_rsk(req)->acked || in syn_ack_recalc() 1127 inet_rsk(req)->acked)) { in reqsk_timer_handler()
|
| /net/mac80211/ |
| A D | status.c | 617 .ack = acked, in ieee80211_report_ack_skb() 636 acked, in ieee80211_report_ack_skb() 640 cookie, acked, in ieee80211_report_ack_skb() 674 if (!acked) in ieee80211_handle_smps_status() 701 bool acked) in ieee80211_handle_teardown_ttlm_status() argument 706 if (!acked) in ieee80211_handle_teardown_ttlm_status() 726 acked = false; in ieee80211_report_used_skb() 758 if (!acked) { in ieee80211_report_used_skb() 772 acked); in ieee80211_report_used_skb() 962 bool acked; in __ieee80211_tx_status() local [all …]
|
| A D | mesh_ps.c | 544 bool tx, bool acked) in ieee80211_mpsp_trigger_process() argument 550 if (rspi && acked) in ieee80211_mpsp_trigger_process() 555 else if (acked && in ieee80211_mpsp_trigger_process()
|
| A D | mesh.h | 298 bool tx, bool acked);
|
| /net/tipc/ |
| A D | link.c | 203 u16 acked; member 259 u16 acked, u16 gap, 439 return l->acked; in tipc_link_acked() 993 l->acked = 0; in tipc_link_reset() 1536 u16 acked, u16 gap, in tipc_link_advance_transmq() argument 1555 trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq); in tipc_link_advance_transmq() 1665 acked = end; in tipc_link_advance_transmq() 1678 r->acked = acked; in tipc_link_advance_transmq() 2481 if (less(acked, r->acked) || (acked == r->acked && !gap && !ga)) in tipc_link_bc_ack_rcv() 2505 u16 from = acked + 1; in tipc_link_bc_nack_rcv() [all …]
|
| A D | link.h | 147 int tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, u16 gap,
|
| A D | bcast.c | 474 u16 acked = msg_bcast_ack(hdr); in tipc_bcast_ack_rcv() local 484 tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL); in tipc_bcast_ack_rcv()
|
| /net/rxrpc/ |
| A D | recvmsg.c | 117 int acked; in rxrpc_rotate_rx_window() local 142 acked = atomic_add_return(call->rx_consumed - old_consumed, in rxrpc_rotate_rx_window() 144 if (acked > 8 && in rxrpc_rotate_rx_window()
|