Lines Matching defs:ulpq

32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
41 void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc)
43 memset(ulpq, 0, sizeof(struct sctp_ulpq));
45 ulpq->asoc = asoc;
46 skb_queue_head_init(&ulpq->reasm);
47 skb_queue_head_init(&ulpq->reasm_uo);
48 skb_queue_head_init(&ulpq->lobby);
49 ulpq->pd_mode = 0;
54 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
59 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
64 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
69 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
76 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
78 sctp_ulpq_flush(ulpq);
82 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
98 event = sctp_ulpq_reasm(ulpq, event);
107 event = sctp_ulpq_order(ulpq, event);
115 sctp_ulpq_tail_event(ulpq, &temp);
162 /* Set the pd_mode on the socket and ulpq */
163 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
165 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
168 ulpq->pd_mode = 1;
172 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
174 ulpq->pd_mode = 0;
175 sctp_ulpq_reasm_drain(ulpq);
176 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
179 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
181 struct sock *sk = ulpq->asoc->base.sk;
204 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
215 if (ulpq->pd_mode) {
249 sctp_ulpq_clear_pd(ulpq);
267 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
277 pos = skb_peek_tail(&ulpq->reasm);
279 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
287 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
292 skb_queue_walk(&ulpq->reasm, pos) {
301 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
393 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
425 skb_queue_walk(&ulpq->reasm, pos) {
435 if (skb_queue_is_first(&ulpq->reasm, pos)) {
469 asoc = ulpq->asoc;
484 &ulpq->reasm,
487 sctp_ulpq_set_pd(ulpq);
493 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
494 &ulpq->reasm, first_frag, pos);
501 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
514 if (skb_queue_empty(&ulpq->reasm))
522 skb_queue_walk(&ulpq->reasm, pos) {
559 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
571 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
582 sctp_ulpq_store_reasm(ulpq, event);
583 if (!ulpq->pd_mode)
584 retval = sctp_ulpq_retrieve_reassembled(ulpq);
592 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
594 retval = sctp_ulpq_retrieve_partial(ulpq);
601 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
613 if (skb_queue_empty(&ulpq->reasm))
620 skb_queue_walk(&ulpq->reasm, pos) {
660 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
679 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
685 if (skb_queue_empty(&ulpq->reasm))
688 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
698 __skb_unlink(pos, &ulpq->reasm);
710 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
714 if (skb_queue_empty(&ulpq->reasm))
717 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
725 event = sctp_ulpq_order(ulpq, event);
731 sctp_ulpq_tail_event(ulpq, &temp);
739 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
749 stream = &ulpq->asoc->stream;
754 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
773 __skb_unlink(pos, &ulpq->lobby);
781 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
789 pos = skb_peek_tail(&ulpq->lobby);
791 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
802 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
807 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
814 skb_queue_walk(&ulpq->lobby, pos) {
827 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
830 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
843 stream = &ulpq->asoc->stream;
850 sctp_ulpq_store_ordered(ulpq, event);
860 sctp_ulpq_retrieve_ordered(ulpq, event);
868 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
875 struct sk_buff_head *lobby = &ulpq->lobby;
878 stream = &ulpq->asoc->stream;
930 sctp_ulpq_retrieve_ordered(ulpq, event);
931 sctp_ulpq_tail_event(ulpq, &temp);
938 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
943 stream = &ulpq->asoc->stream;
955 sctp_ulpq_reap_ordered(ulpq, sid);
958 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
967 tsnmap = &ulpq->asoc->peer.tsn_map;
1007 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1009 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1013 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1019 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1028 asoc = ulpq->asoc;
1034 if (ulpq->pd_mode)
1040 skb = skb_peek(&asoc->ulpq.reasm);
1054 event = sctp_ulpq_retrieve_first(ulpq);
1061 sctp_ulpq_tail_event(ulpq, &temp);
1062 sctp_ulpq_set_pd(ulpq);
1069 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1072 struct sctp_association *asoc = ulpq->asoc;
1080 freed = sctp_ulpq_renege_order(ulpq, needed);
1082 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1087 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1093 sctp_ulpq_partial_delivery(ulpq, gfp);
1095 sctp_ulpq_reasm_drain(ulpq);
1102 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1108 if (!ulpq->pd_mode)
1111 sk = ulpq->asoc->base.sk;
1113 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1115 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1122 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {