Lines Matching refs:ulpq
32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
41 void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc) in sctp_ulpq_init() argument
43 memset(ulpq, 0, sizeof(struct sctp_ulpq)); in sctp_ulpq_init()
45 ulpq->asoc = asoc; in sctp_ulpq_init()
46 skb_queue_head_init(&ulpq->reasm); in sctp_ulpq_init()
47 skb_queue_head_init(&ulpq->reasm_uo); in sctp_ulpq_init()
48 skb_queue_head_init(&ulpq->lobby); in sctp_ulpq_init()
49 ulpq->pd_mode = 0; in sctp_ulpq_init()
54 void sctp_ulpq_flush(struct sctp_ulpq *ulpq) in sctp_ulpq_flush() argument
59 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { in sctp_ulpq_flush()
64 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { in sctp_ulpq_flush()
69 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) { in sctp_ulpq_flush()
76 void sctp_ulpq_free(struct sctp_ulpq *ulpq) in sctp_ulpq_free() argument
78 sctp_ulpq_flush(ulpq); in sctp_ulpq_free()
82 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_tail_data() argument
98 event = sctp_ulpq_reasm(ulpq, event); in sctp_ulpq_tail_data()
107 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_tail_data()
115 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_tail_data()
163 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_set_pd() argument
165 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); in sctp_ulpq_set_pd()
168 ulpq->pd_mode = 1; in sctp_ulpq_set_pd()
172 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) in sctp_ulpq_clear_pd() argument
174 ulpq->pd_mode = 0; in sctp_ulpq_clear_pd()
175 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_clear_pd()
176 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); in sctp_ulpq_clear_pd()
179 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list) in sctp_ulpq_tail_event() argument
181 struct sock *sk = ulpq->asoc->base.sk; in sctp_ulpq_tail_event()
204 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe)) in sctp_ulpq_tail_event()
215 if (ulpq->pd_mode) { in sctp_ulpq_tail_event()
249 sctp_ulpq_clear_pd(ulpq); in sctp_ulpq_tail_event()
267 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_store_reasm() argument
277 pos = skb_peek_tail(&ulpq->reasm); in sctp_ulpq_store_reasm()
279 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
287 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
292 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_store_reasm()
301 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); in sctp_ulpq_store_reasm()
393 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_reassembled() argument
425 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_reassembled()
435 if (skb_queue_is_first(&ulpq->reasm, pos)) { in sctp_ulpq_retrieve_reassembled()
469 asoc = ulpq->asoc; in sctp_ulpq_retrieve_reassembled()
484 &ulpq->reasm, in sctp_ulpq_retrieve_reassembled()
487 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_retrieve_reassembled()
493 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, in sctp_ulpq_retrieve_reassembled()
494 &ulpq->reasm, first_frag, pos); in sctp_ulpq_retrieve_reassembled()
501 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_partial() argument
514 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_partial()
522 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_partial()
559 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_partial()
571 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, in sctp_ulpq_reasm() argument
582 sctp_ulpq_store_reasm(ulpq, event); in sctp_ulpq_reasm()
583 if (!ulpq->pd_mode) in sctp_ulpq_reasm()
584 retval = sctp_ulpq_retrieve_reassembled(ulpq); in sctp_ulpq_reasm()
592 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); in sctp_ulpq_reasm()
594 retval = sctp_ulpq_retrieve_partial(ulpq); in sctp_ulpq_reasm()
601 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) in sctp_ulpq_retrieve_first() argument
613 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_retrieve_first()
620 skb_queue_walk(&ulpq->reasm, pos) { in sctp_ulpq_retrieve_first()
660 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, in sctp_ulpq_retrieve_first()
679 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) in sctp_ulpq_reasm_flushtsn() argument
685 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_flushtsn()
688 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { in sctp_ulpq_reasm_flushtsn()
698 __skb_unlink(pos, &ulpq->reasm); in sctp_ulpq_reasm_flushtsn()
710 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) in sctp_ulpq_reasm_drain() argument
714 if (skb_queue_empty(&ulpq->reasm)) in sctp_ulpq_reasm_drain()
717 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { in sctp_ulpq_reasm_drain()
725 event = sctp_ulpq_order(ulpq, event); in sctp_ulpq_reasm_drain()
731 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_reasm_drain()
739 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_retrieve_ordered() argument
749 stream = &ulpq->asoc->stream; in sctp_ulpq_retrieve_ordered()
754 sctp_skb_for_each(pos, &ulpq->lobby, tmp) { in sctp_ulpq_retrieve_ordered()
773 __skb_unlink(pos, &ulpq->lobby); in sctp_ulpq_retrieve_ordered()
781 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, in sctp_ulpq_store_ordered() argument
789 pos = skb_peek_tail(&ulpq->lobby); in sctp_ulpq_store_ordered()
791 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
802 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
807 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
814 skb_queue_walk(&ulpq->lobby, pos) { in sctp_ulpq_store_ordered()
827 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); in sctp_ulpq_store_ordered()
830 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, in sctp_ulpq_order() argument
843 stream = &ulpq->asoc->stream; in sctp_ulpq_order()
850 sctp_ulpq_store_ordered(ulpq, event); in sctp_ulpq_order()
860 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_order()
868 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) in sctp_ulpq_reap_ordered() argument
875 struct sk_buff_head *lobby = &ulpq->lobby; in sctp_ulpq_reap_ordered()
878 stream = &ulpq->asoc->stream; in sctp_ulpq_reap_ordered()
930 sctp_ulpq_retrieve_ordered(ulpq, event); in sctp_ulpq_reap_ordered()
931 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_reap_ordered()
938 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) in sctp_ulpq_skip() argument
943 stream = &ulpq->asoc->stream; in sctp_ulpq_skip()
955 sctp_ulpq_reap_ordered(ulpq, sid); in sctp_ulpq_skip()
958 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list, in sctp_ulpq_renege_list() argument
967 tsnmap = &ulpq->asoc->peer.tsn_map; in sctp_ulpq_renege_list()
1007 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_order() argument
1009 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); in sctp_ulpq_renege_order()
1013 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) in sctp_ulpq_renege_frags() argument
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); in sctp_ulpq_renege_frags()
1019 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, in sctp_ulpq_partial_delivery() argument
1028 asoc = ulpq->asoc; in sctp_ulpq_partial_delivery()
1034 if (ulpq->pd_mode) in sctp_ulpq_partial_delivery()
1040 skb = skb_peek(&asoc->ulpq.reasm); in sctp_ulpq_partial_delivery()
1054 event = sctp_ulpq_retrieve_first(ulpq); in sctp_ulpq_partial_delivery()
1061 sctp_ulpq_tail_event(ulpq, &temp); in sctp_ulpq_partial_delivery()
1062 sctp_ulpq_set_pd(ulpq); in sctp_ulpq_partial_delivery()
1069 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, in sctp_ulpq_renege() argument
1072 struct sctp_association *asoc = ulpq->asoc; in sctp_ulpq_renege()
1080 freed = sctp_ulpq_renege_order(ulpq, needed); in sctp_ulpq_renege()
1082 freed += sctp_ulpq_renege_frags(ulpq, needed - freed); in sctp_ulpq_renege()
1087 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); in sctp_ulpq_renege()
1093 sctp_ulpq_partial_delivery(ulpq, gfp); in sctp_ulpq_renege()
1095 sctp_ulpq_reasm_drain(ulpq); in sctp_ulpq_renege()
1102 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) in sctp_ulpq_abort_pd() argument
1108 if (!ulpq->pd_mode) in sctp_ulpq_abort_pd()
1111 sk = ulpq->asoc->base.sk; in sctp_ulpq_abort_pd()
1113 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe, in sctp_ulpq_abort_pd()
1115 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, in sctp_ulpq_abort_pd()
1122 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) { in sctp_ulpq_abort_pd()