Lines Matching refs:seq

190 			u32 seq = tcptw->tw_snd_nxt + 65535 + 2;  in tcp_twsk_unique()  local
192 if (!seq) in tcp_twsk_unique()
193 seq = 1; in tcp_twsk_unique()
194 WRITE_ONCE(tp->write_seq, seq); in tcp_twsk_unique()
417 void tcp_req_err(struct sock *sk, u32 seq, bool abort) in tcp_req_err() argument
425 if (seq != tcp_rsk(req)->snt_isn) { in tcp_req_err()
442 void tcp_ld_RTO_revert(struct sock *sk, u32 seq) in tcp_ld_RTO_revert() argument
453 if (seq != tp->snd_una || !icsk->icsk_retransmits || in tcp_ld_RTO_revert()
505 u32 seq, snd_una; in tcp_v4_err() local
522 seq = ntohl(th->seq); in tcp_v4_err()
524 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || in tcp_v4_err()
563 !between(seq, snd_una, tp->snd_nxt)) { in tcp_v4_err()
607 tcp_ld_RTO_revert(sk, seq); in tcp_v4_err()
701 if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, ntohl(reply->seq), in tcp_v4_ao_sign_reset()
781 rep.th.seq = th->ack_seq; in tcp_v4_send_reset()
784 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + in tcp_v4_send_reset()
938 struct sk_buff *skb, u32 seq, u32 ack, in tcp_v4_send_ack() argument
971 rep.th.seq = htonl(seq); in tcp_v4_send_ack()
1112 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : local
1169 tcp_v4_send_ack(sk, skb, seq,
2062 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
2180 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
2181 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
2603 static unsigned short seq_file_family(const struct seq_file *seq);
2605 static bool seq_sk_match(struct seq_file *seq, const struct sock *sk) argument
2607 unsigned short family = seq_file_family(seq);
2611 net_eq(sock_net(sk), seq_file_net(seq)));
2617 static void *listening_get_first(struct seq_file *seq) argument
2619 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2620 struct tcp_iter_state *st = seq->private;
2634 if (seq_sk_match(seq, sk))
2648 static void *listening_get_next(struct seq_file *seq, void *cur) argument
2650 struct tcp_iter_state *st = seq->private;
2661 if (seq_sk_match(seq, sk))
2665 hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2669 return listening_get_first(seq);
2672 static void *listening_get_idx(struct seq_file *seq, loff_t *pos) argument
2674 struct tcp_iter_state *st = seq->private;
2679 rc = listening_get_first(seq);
2682 rc = listening_get_next(seq, rc);
2698 static void *established_get_first(struct seq_file *seq) argument
2700 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2701 struct tcp_iter_state *st = seq->private;
2717 if (seq_sk_match(seq, sk))
2726 static void *established_get_next(struct seq_file *seq, void *cur) argument
2728 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2729 struct tcp_iter_state *st = seq->private;
2739 if (seq_sk_match(seq, sk))
2745 return established_get_first(seq);
2748 static void *established_get_idx(struct seq_file *seq, loff_t pos) argument
2750 struct tcp_iter_state *st = seq->private;
2754 rc = established_get_first(seq);
2757 rc = established_get_next(seq, rc);
2763 static void *tcp_get_idx(struct seq_file *seq, loff_t pos) argument
2766 struct tcp_iter_state *st = seq->private;
2769 rc = listening_get_idx(seq, &pos);
2773 rc = established_get_idx(seq, pos);
2779 static void *tcp_seek_last_pos(struct seq_file *seq) argument
2781 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2782 struct tcp_iter_state *st = seq->private;
2792 rc = listening_get_first(seq);
2794 rc = listening_get_next(seq, rc);
2803 rc = established_get_first(seq);
2805 rc = established_get_next(seq, rc);
2813 void *tcp_seq_start(struct seq_file *seq, loff_t *pos) argument
2815 struct tcp_iter_state *st = seq->private;
2819 rc = tcp_seek_last_pos(seq);
2828 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2836 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) argument
2838 struct tcp_iter_state *st = seq->private;
2842 rc = tcp_get_idx(seq, 0);
2848 rc = listening_get_next(seq, v);
2853 rc = established_get_first(seq);
2857 rc = established_get_next(seq, v);
2867 void tcp_seq_stop(struct seq_file *seq, void *v) argument
2869 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
2870 struct tcp_iter_state *st = seq->private;
2996 static int tcp4_seq_show(struct seq_file *seq, void *v) argument
3001 seq_setwidth(seq, TMPSZ - 1);
3003 seq_puts(seq, " sl local_address rem_address st tx_queue "
3008 st = seq->private;
3011 get_timewait4_sock(v, seq, st->num);
3013 get_openreq4(v, seq, st->num);
3015 get_tcp4_sock(v, seq, st->num);
3017 seq_pad(seq, '\n');
3106 static struct sock *bpf_iter_tcp_resume_listening(struct seq_file *seq) argument
3108 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3109 struct bpf_tcp_iter_state *iter = seq->private;
3119 sk = listening_get_first(seq);
3129 sk = listening_get_first(seq);
3136 static struct sock *bpf_iter_tcp_resume_established(struct seq_file *seq) argument
3138 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3139 struct bpf_tcp_iter_state *iter = seq->private;
3149 sk = established_get_first(seq);
3159 sk = established_get_first(seq);
3166 static struct sock *bpf_iter_tcp_resume(struct seq_file *seq) argument
3168 struct bpf_tcp_iter_state *iter = seq->private;
3174 sk = bpf_iter_tcp_resume_listening(seq);
3181 sk = bpf_iter_tcp_resume_established(seq);
3188 static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq, argument
3191 struct bpf_tcp_iter_state *iter = seq->private;
3202 if (seq_sk_match(seq, sk)) {
3217 static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq, argument
3220 struct bpf_tcp_iter_state *iter = seq->private;
3231 if (seq_sk_match(seq, sk)) {
3246 static unsigned int bpf_iter_fill_batch(struct seq_file *seq, argument
3249 struct bpf_tcp_iter_state *iter = seq->private;
3253 return bpf_iter_tcp_listening_batch(seq, start_sk);
3255 return bpf_iter_tcp_established_batch(seq, start_sk);
3258 static void bpf_iter_tcp_unlock_bucket(struct seq_file *seq) argument
3260 struct inet_hashinfo *hinfo = seq_file_net(seq)->ipv4.tcp_death_row.hashinfo;
3261 struct bpf_tcp_iter_state *iter = seq->private;
3270 static struct sock *bpf_iter_tcp_batch(struct seq_file *seq) argument
3272 struct bpf_tcp_iter_state *iter = seq->private;
3277 sk = bpf_iter_tcp_resume(seq);
3281 expected = bpf_iter_fill_batch(seq, &sk);
3286 bpf_iter_tcp_unlock_bucket(seq);
3293 sk = bpf_iter_tcp_resume(seq);
3297 expected = bpf_iter_fill_batch(seq, &sk);
3307 bpf_iter_tcp_unlock_bucket(seq);
3311 expected = bpf_iter_fill_batch(seq, &sk);
3314 bpf_iter_tcp_unlock_bucket(seq);
3318 static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos) argument
3324 return bpf_iter_tcp_batch(seq);
3329 static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) argument
3331 struct bpf_tcp_iter_state *iter = seq->private;
3351 sk = bpf_iter_tcp_batch(seq);
3361 static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) argument
3385 uid = from_kuid_munged(seq_user_ns(seq),
3388 uid = from_kuid_munged(seq_user_ns(seq), sk_uid(sk));
3391 meta.seq = seq;
3402 static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) argument
3404 struct bpf_tcp_iter_state *iter = seq->private;
3409 meta.seq = seq;
3426 static unsigned short seq_file_family(const struct seq_file *seq) argument
3432 if (seq->op == &bpf_iter_tcp_seq_ops)
3437 afinfo = pde_data(file_inode(seq->file));