| /net/sctp/ |
| A D | ulpqueue.c | 94 event->ssn = ntohs(chunk->subh.data_hdr->ssn); in sctp_ulpq_tail_data() 757 cssn = cevent->ssn; in sctp_ulpq_retrieve_ordered() 787 __u16 ssn, cssn; in sctp_ulpq_store_ordered() local 796 ssn = event->ssn; in sctp_ulpq_store_ordered() 800 cssn = cevent->ssn; in sctp_ulpq_store_ordered() 817 cssn = cevent->ssn; in sctp_ulpq_store_ordered() 833 __u16 sid, ssn; in sctp_ulpq_order() local 842 ssn = event->ssn; in sctp_ulpq_order() 886 cssn = cevent->ssn; in sctp_ulpq_reap_ordered() 915 cssn = cevent->ssn; in sctp_ulpq_reap_ordered() [all …]
|
| A D | stream_interleave.c | 90 __u16 sid, ssn; in sctp_validate_data() local 100 ssn = ntohs(chunk->subh.data_hdr->ssn); in sctp_validate_data() 102 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)); in sctp_validate_data() 1248 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); in sctp_handle_fwdtsn()
|
| A D | sm_make_chunk.c | 1576 __u16 ssn, sid; in sctp_chunk_assign_ssn() local 1591 ssn = 0; in sctp_chunk_assign_ssn() 1594 ssn = sctp_ssn_next(stream, out, sid); in sctp_chunk_assign_ssn() 1596 ssn = sctp_ssn_peek(stream, out, sid); in sctp_chunk_assign_ssn() 1599 lchunk->subh.data_hdr->ssn = htons(ssn); in sctp_chunk_assign_ssn() 3573 skip.ssn = skiplist[i].ssn; in sctp_make_fwdtsn()
|
| A D | ulpevent.c | 997 sinfo.sinfo_ssn = event->ssn; in sctp_ulpevent_read_sndrcvinfo() 1025 rinfo.rcv_ssn = event->ssn; in sctp_ulpevent_read_rcvinfo()
|
| A D | outqueue.c | 1887 ftsn_skip_arr[skip_pos].ssn = in sctp_generate_fwdtsn() 1888 chunk->subh.data_hdr->ssn; in sctp_generate_fwdtsn()
|
| /net/mac80211/ |
| A D | agg-tx.c | 103 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn) in ieee80211_send_bar() argument 125 bar->start_seq_num = cpu_to_le16(ssn); in ieee80211_send_bar() 301 .ssn = 0, in __ieee80211_stop_tx_ba_session() 487 tid_tx->ssn, buf_size, tid_tx->timeout); in ieee80211_send_addba_with_timeout() 524 params.ssn = sta->tid_seq[tid] >> 4; in ieee80211_tx_ba_session_handle_start() 526 tid_tx->ssn = params.ssn; in ieee80211_tx_ba_session_handle_start() 761 .ssn = 0, in ieee80211_agg_tx_operational()
|
| A D | status.c | 197 static void ieee80211_set_bar_pending(struct sta_info *sta, u8 tid, u16 ssn) in ieee80211_set_bar_pending() argument 205 tid_tx->failed_bar_ssn = ssn; in ieee80211_set_bar_pending() 993 u16 ssn; in __ieee80211_tx_status() local 998 ssn = ((le16_to_cpu(hdr->seq_ctrl) + 0x10) in __ieee80211_tx_status() 1001 tid, ssn); in __ieee80211_tx_status() 1019 u16 ssn = le16_to_cpu(bar->start_seq_num); in __ieee80211_tx_status() local 1025 ieee80211_set_bar_pending(sta, tid, ssn); in __ieee80211_tx_status()
|
| A D | agg-rx.c | 69 .ssn = 0, in __ieee80211_stop_rx_ba_session() 285 .ssn = start_seq_num, in __ieee80211_start_rx_ba_session() 435 tid_agg_rx->ssn = start_seq_num; in __ieee80211_start_rx_ba_session()
|
| A D | sta_info.h | 197 u16 ssn; member 250 u16 ssn; member
|
| A D | rx.c | 1285 tid_agg_rx->ssn = mpdu_seq_num; in ieee80211_sta_manage_reorder_buf() 3292 event.u.ba.ssn = start_seq_num; in ieee80211_rx_h_ctrl() 4277 u16 ssn, u64 filtered, in ieee80211_mark_rx_ba_filtered_frames() argument 4322 tid_agg_rx->head_seq_num = ssn; in ieee80211_mark_rx_ba_filtered_frames() 4324 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, in ieee80211_mark_rx_ba_filtered_frames() 4330 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; in ieee80211_mark_rx_ba_filtered_frames() 4336 ssn += diff; in ieee80211_mark_rx_ba_filtered_frames() 4340 int index = (ssn + i) % tid_agg_rx->buf_size; in ieee80211_mark_rx_ba_filtered_frames()
|
| A D | trace.h | 128 __field(u16, ssn) \ 135 __entry->ssn = params->ssn; \ 141 #define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \
|
| A D | debugfs_sta.c | 335 tid_rx ? tid_rx->ssn : 0); in sta_agg_status_do_read()
|
| /net/mptcp/ |
| A D | subflow.c | 981 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) in dbg_bad_map() argument 984 ssn, subflow->map_subflow_seq, subflow->map_data_len); in dbg_bad_map() 1005 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; in validate_mapping() local 1007 if (unlikely(before(ssn, subflow->map_subflow_seq))) { in validate_mapping() 1011 dbg_bad_map(subflow, ssn); in validate_mapping() 1014 if (unlikely(!before(ssn, subflow->map_subflow_seq + in validate_mapping() 1017 dbg_bad_map(subflow, ssn); in validate_mapping()
|