| /net/rxrpc/ |
| A D | conn_event.c | 97 struct rxrpc_ackpacket ack; in rxrpc_conn_retransmit_call() member 109 &pkt.ack, sizeof(pkt.ack)) < 0) in rxrpc_conn_retransmit_call() 111 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE) in rxrpc_conn_retransmit_call() 168 pkt.ack.bufferSpace = 0; in rxrpc_conn_retransmit_call() 171 pkt.ack.previousPacket = htonl(chan->last_seq); in rxrpc_conn_retransmit_call() 174 pkt.ack.nAcks = 0; in rxrpc_conn_retransmit_call() 181 iov[0].iov_len += sizeof(pkt.ack); in rxrpc_conn_retransmit_call() 182 len += sizeof(pkt.ack) + 3 + sizeof(trailer); in rxrpc_conn_retransmit_call() 186 ntohl(pkt.ack.firstPacket), in rxrpc_conn_retransmit_call() 187 ntohl(pkt.ack.serial), in rxrpc_conn_retransmit_call() [all …]
|
| A D | io_thread.c | 147 struct rxrpc_ackpacket ack; in rxrpc_extract_header() local 167 if (skb_copy_bits(skb, sizeof(whdr), &ack, sizeof(ack)) < 0) in rxrpc_extract_header() 169 sp->ack.first_ack = ntohl(ack.firstPacket); in rxrpc_extract_header() 170 sp->ack.prev_ack = ntohl(ack.previousPacket); in rxrpc_extract_header() 171 sp->ack.acked_serial = ntohl(ack.serial); in rxrpc_extract_header() 172 sp->ack.reason = ack.reason; in rxrpc_extract_header() 173 sp->ack.nr_acks = ack.nAcks; in rxrpc_extract_header() 383 after_eq(sp->ack.acked_serial, conn->pmtud_probe)) in rxrpc_input_packet_on_conn() 384 rxrpc_input_probe_for_pmtud(conn, sp->ack.acked_serial, false); in rxrpc_input_packet_on_conn()
|
| A D | output.c | 81 struct rxrpc_ackpacket *ack; in rxrpc_alloc_ack() local 101 ack = buf + sizeof(*whdr); in rxrpc_alloc_ack() 186 ack->bufferSpace = 0; in rxrpc_fill_out_ack() 187 ack->maxSkew = 0; in rxrpc_fill_out_ack() 188 ack->firstPacket = htonl(window); in rxrpc_fill_out_ack() 191 ack->reason = ack_reason; in rxrpc_fill_out_ack() 192 ack->nAcks = wtop - window; in rxrpc_fill_out_ack() 201 kv[1].iov_len = ack->nAcks; in rxrpc_fill_out_ack() 215 ack->reason = RXRPC_ACK_IDLE; in rxrpc_fill_out_ack() 268 ntohl(ack->firstPacket), in rxrpc_send_ack_packet() [all …]
|
| A D | input.c | 968 rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks; in rxrpc_input_soft_acks() 971 _enter("%x,%x,%u", tq->qbase, seq, sp->ack.nr_acks); in rxrpc_input_soft_acks() 976 for (unsigned int i = 0; i < sp->ack.nr_acks; i++) { in rxrpc_input_soft_acks() 1069 first_soft_ack = sp->ack.first_ack; in rxrpc_input_ack() 1070 prev_pkt = sp->ack.prev_ack; in rxrpc_input_ack() 1071 nr_acks = sp->ack.nr_acks; in rxrpc_input_ack() 1073 summary.acked_serial = sp->ack.acked_serial; in rxrpc_input_ack() 1074 summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ? in rxrpc_input_ack() 1075 sp->ack.reason : RXRPC_ACK__INVALID); in rxrpc_input_ack()
|
| /net/netfilter/ |
| A D | nf_conntrack_proto_tcp.c | 537 ack -= receiver_offset; in tcp_in_window() 548 if (!tcph->ack) in tcp_in_window() 596 if (!(tcph->ack)) { in tcp_in_window() 603 && (ack == 0)) { in tcp_in_window() 674 swin = win + (sack - ack); in tcp_in_window() 681 if (tcph->ack) { in tcp_in_window() 683 sender->td_maxack = ack; in tcp_in_window() 686 sender->td_maxack = ack; in tcp_in_window() 698 if (ack == receiver->td_end) in tcp_in_window() 705 state->last_ack == ack && in tcp_in_window() [all …]
|
| A D | nfnetlink.c | 460 goto ack; in nfnetlink_rcv_batch() 474 goto ack; in nfnetlink_rcv_batch() 482 goto ack; in nfnetlink_rcv_batch() 488 goto ack; in nfnetlink_rcv_batch() 493 goto ack; in nfnetlink_rcv_batch() 514 goto ack; in nfnetlink_rcv_batch() 522 goto ack; in nfnetlink_rcv_batch() 535 ack: in nfnetlink_rcv_batch()
|
| A D | nf_synproxy_core.c | 678 if (!th->syn || th->ack || in ipv4_synproxy_hook() 694 if (!th->syn && th->ack && in ipv4_synproxy_hook() 717 if (!th->syn || !th->ack) in ipv4_synproxy_hook() 1101 if (!th->syn || th->ack || in ipv6_synproxy_hook() 1117 if (!th->syn && th->ack && in ipv6_synproxy_hook() 1140 if (!th->syn || !th->ack) in ipv6_synproxy_hook()
|
| /net/ipv4/ |
| A D | tcp_vegas.c | 165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_vegas_cong_avoid() argument 171 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid() 175 if (after(ack, vegas->beg_snd_nxt)) { in tcp_vegas_cong_avoid() 196 tcp_reno_cong_avoid(sk, ack, acked); in tcp_vegas_cong_avoid()
|
| A D | tcp_veno.c | 119 static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_veno_cong_avoid() argument 125 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid() 138 tcp_reno_cong_avoid(sk, ack, acked); in tcp_veno_cong_avoid()
|
| A D | tcp_input.c | 3517 if (ao && ack < tp->snd_una) { in tcp_snd_sne_update() 3527 u32 delta = ack - tp->snd_una; in tcp_snd_una_update() 3531 tcp_snd_sne_update(tp, ack); in tcp_snd_una_update() 3532 tp->snd_una = ack; in tcp_snd_una_update() 3601 tcp_snd_una_update(tp, ack); in tcp_ack_update_window() 3849 if (after(ack, tp->snd_nxt)) in tcp_ack() 3852 if (after(ack, prior_snd_una)) { in tcp_ack() 3879 tcp_snd_una_update(tp, ack); in tcp_ack() 4365 if (ack != tp->snd_una) in tcp_disordered_ack_check() 6394 if (th->ack) { in tcp_rcv_synsent_state_process() [all …]
|
| A D | tcp_hybla.c | 90 static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) in hybla_cong_avoid() argument 107 tcp_reno_cong_avoid(sk, ack, acked); in hybla_cong_avoid()
|
| A D | tcp_yeah.c | 58 static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_yeah_cong_avoid() argument 103 if (after(ack, yeah->vegas.beg_snd_nxt)) { in tcp_yeah_cong_avoid()
|
| A D | tcp_cdg.c | 262 static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_cdg_cong_avoid() argument 272 if (after(ack, ca->rtt_seq) && ca->rtt.v64) { in tcp_cdg_cong_avoid() 293 tcp_reno_cong_avoid(sk, ack, acked); in tcp_cdg_cong_avoid()
|
| A D | tcp_lp.c | 121 static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_lp_cong_avoid() argument 126 tcp_reno_cong_avoid(sk, ack, acked); in tcp_lp_cong_avoid()
|
| A D | tcp_illinois.c | 260 static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_illinois_cong_avoid() argument 265 if (after(ack, ca->end_seq)) in tcp_illinois_cong_avoid()
|
| A D | bpf_tcp_ca.c | 263 static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked) in bpf_tcp_ca_cong_avoid() argument 288 static void bpf_tcp_ca_cong_control(struct sock *sk, u32 ack, int flag, in bpf_tcp_ca_cong_control() argument
|
| A D | tcp_scalable.c | 18 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) in tcp_scalable_cong_avoid() argument
|
| /net/sctp/ |
| A D | associola.c | 1648 struct sctp_chunk *ack; in sctp_assoc_free_asconf_acks() local 1653 list_del_init(&ack->transmitted_list); in sctp_assoc_free_asconf_acks() 1654 sctp_chunk_free(ack); in sctp_assoc_free_asconf_acks() 1661 struct sctp_chunk *ack; in sctp_assoc_clean_asconf_ack_cache() local 1669 if (ack->subh.addip_hdr->serial == in sctp_assoc_clean_asconf_ack_cache() 1673 list_del_init(&ack->transmitted_list); in sctp_assoc_clean_asconf_ack_cache() 1674 sctp_chunk_free(ack); in sctp_assoc_clean_asconf_ack_cache() 1683 struct sctp_chunk *ack; in sctp_assoc_lookup_asconf_ack() local 1689 if (sctp_chunk_pending(ack)) in sctp_assoc_lookup_asconf_ack() 1692 sctp_chunk_hold(ack); in sctp_assoc_lookup_asconf_ack() [all …]
|
| /net/ipv4/netfilter/ |
| A D | ipt_SYNPROXY.c | 31 if (th->syn && !(th->ack || th->fin || th->rst)) { in synproxy_tg4() 51 } else if (th->ack && !(th->fin || th->rst || th->syn)) { in synproxy_tg4()
|
| /net/ipv6/netfilter/ |
| A D | ip6t_SYNPROXY.c | 31 if (th->syn && !(th->ack || th->fin || th->rst)) { in synproxy_tg6() 52 } else if (th->ack && !(th->fin || th->rst || th->syn)) { in synproxy_tg6()
|
| /net/tipc/ |
| A D | link.c | 379 u16 ack = snd_l->snd_nxt - 1; in tipc_link_remove_bc_peer() local 1021 u16 ack = l->rcv_nxt - 1; in tipc_link_xmit() local 1065 msg_set_ack(hdr, ack); in tipc_link_xmit() 1155 u16 ack = l->rcv_nxt - 1; in tipc_link_advance_backlog() local 1180 msg_set_ack(hdr, ack); in tipc_link_advance_backlog() 1479 gacks[n].ack = htons(seqno); in __tipc_build_gap_ack_blks() 1548 u16 ack = l->rcv_nxt - 1; in tipc_link_advance_transmq() local 1632 msg_set_ack(hdr, ack); in tipc_link_advance_transmq() 2188 u16 ack = msg_ack(hdr); in tipc_link_proto_rcv() local 2342 u16 ack = l->rcv_nxt - 1; in tipc_link_build_bc_proto_msg() local [all …]
|
| A D | group.c | 395 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack) in tipc_group_update_bc_members() argument 412 if (ack) in tipc_group_update_bc_members() 500 bool ack, deliver, update, leave = false; in tipc_group_filter_msg() local 533 ack = false; in tipc_group_filter_msg() 549 ack = msg_grp_bc_ack_req(hdr); in tipc_group_filter_msg() 570 if (ack) in tipc_group_filter_msg()
|
| A D | group.h | 68 void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
|
| /net/rds/ |
| A D | tcp_send.c | 172 static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) in rds_tcp_is_acked() argument 176 return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0; in rds_tcp_is_acked()
|
| A D | send.c | 485 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, in rds_send_is_acked() argument 489 return is_acked(rm, ack); in rds_send_is_acked() 490 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; in rds_send_is_acked() 685 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, in rds_send_path_drop_acked() argument 695 if (!rds_send_is_acked(rm, ack, is_acked)) in rds_send_path_drop_acked() 713 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, in rds_send_drop_acked() argument 717 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); in rds_send_drop_acked()
|