• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/sctp/

Lines Matching defs:ulpq

53 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
57 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
62 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
65 memset(ulpq, 0, sizeof(struct sctp_ulpq));
67 ulpq->asoc = asoc;
68 skb_queue_head_init(&ulpq->reasm);
69 skb_queue_head_init(&ulpq->lobby);
70 ulpq->pd_mode = 0;
71 ulpq->malloced = 0;
73 return ulpq;
78 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
83 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
88 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
96 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
98 sctp_ulpq_flush(ulpq);
99 if (ulpq->malloced)
100 kfree(ulpq);
104 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
119 event = sctp_ulpq_reasm(ulpq, event);
127 event = sctp_ulpq_order(ulpq, event);
134 sctp_ulpq_tail_event(ulpq, event);
182 /* Set the pd_mode on the socket and ulpq */
183 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
185 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
188 ulpq->pd_mode = 1;
192 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
194 ulpq->pd_mode = 0;
195 sctp_ulpq_reasm_drain(ulpq);
196 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
202 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
204 struct sock *sk = ulpq->asoc->base.sk;
229 if (ulpq->pd_mode) {
269 sctp_ulpq_clear_pd(ulpq);
287 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
297 pos = skb_peek_tail(&ulpq->reasm);
299 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
307 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
312 skb_queue_walk(&ulpq->reasm, pos) {
321 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
409 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
441 skb_queue_walk(&ulpq->reasm, pos) {
451 if (pos == ulpq->reasm.next) {
485 asoc = ulpq->asoc;
499 retval = sctp_make_reassembled_event(&ulpq->reasm,
503 sctp_ulpq_set_pd(ulpq);
509 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
516 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
529 if (skb_queue_empty(&ulpq->reasm))
537 skb_queue_walk(&ulpq->reasm, pos) {
569 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
580 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
591 sctp_ulpq_store_reasm(ulpq, event);
592 if (!ulpq->pd_mode)
593 retval = sctp_ulpq_retrieve_reassembled(ulpq);
601 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
603 retval = sctp_ulpq_retrieve_partial(ulpq);
610 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
622 if (skb_queue_empty(&ulpq->reasm))
629 skb_queue_walk(&ulpq->reasm, pos) {
661 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
679 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
685 if (skb_queue_empty(&ulpq->reasm))
688 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
698 __skb_unlink(pos, &ulpq->reasm);
710 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
715 if (skb_queue_empty(&ulpq->reasm))
718 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
724 event = sctp_ulpq_order(ulpq, event);
731 sctp_ulpq_tail_event(ulpq, event);
739 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
751 in = &ulpq->asoc->ssnmap->in;
756 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
775 __skb_unlink(pos, &ulpq->lobby);
783 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
791 pos = skb_peek_tail(&ulpq->lobby);
793 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
804 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
809 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
816 skb_queue_walk(&ulpq->lobby, pos) {
829 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
832 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
845 in = &ulpq->asoc->ssnmap->in;
852 sctp_ulpq_store_ordered(ulpq, event);
862 sctp_ulpq_retrieve_ordered(ulpq, event);
870 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
877 struct sk_buff_head *lobby = &ulpq->lobby;
880 in = &ulpq->asoc->ssnmap->in;
932 sctp_ulpq_retrieve_ordered(ulpq, event);
933 sctp_ulpq_tail_event(ulpq, event);
940 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
945 in = &ulpq->asoc->ssnmap->in;
957 sctp_ulpq_reap_ordered(ulpq, sid);
960 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
969 tsnmap = &ulpq->asoc->peer.tsn_map;
986 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
988 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
992 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
994 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
998 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1006 asoc = ulpq->asoc;
1012 if (ulpq->pd_mode)
1022 event = sctp_ulpq_retrieve_first(ulpq);
1025 sctp_ulpq_tail_event(ulpq, event);
1026 sctp_ulpq_set_pd(ulpq);
1033 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1039 asoc = ulpq->asoc;
1050 freed = sctp_ulpq_renege_order(ulpq, needed);
1052 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1060 sctp_ulpq_tail_data(ulpq, chunk, gfp);
1062 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1073 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1078 if (!ulpq->pd_mode)
1081 sk = ulpq->asoc->base.sk;
1084 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1091 if (sctp_ulpq_clear_pd(ulpq) || ev)