• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/ipv6/

Lines Matching defs:fq

97 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
134 struct frag_queue *fq;
136 fq = container_of(q, struct frag_queue, q);
137 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
142 struct frag_queue *fq;
145 fq = container_of(q, struct frag_queue, q);
146 return (fq->id == arg->id && fq->user == arg->user &&
147 ipv6_addr_equal(&fq->saddr, arg->src) &&
148 ipv6_addr_equal(&fq->daddr, arg->dst));
154 struct frag_queue *fq = container_of(q, struct frag_queue, q);
157 fq->id = arg->id;
158 fq->user = arg->user;
159 ipv6_addr_copy(&fq->saddr, arg->src);
160 ipv6_addr_copy(&fq->daddr, arg->dst);
166 static __inline__ void fq_put(struct frag_queue *fq)
168 inet_frag_put(&fq->q, &ip6_frags);
171 /* Kill fq entry. It is not destroyed immediately,
174 static __inline__ void fq_kill(struct frag_queue *fq)
176 inet_frag_kill(&fq->q, &ip6_frags);
190 struct frag_queue *fq;
194 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
196 spin_lock(&fq->q.lock);
198 if (fq->q.last_in & INET_FRAG_COMPLETE)
201 fq_kill(fq);
203 net = container_of(fq->q.net, struct net, ipv6.frags);
205 dev = dev_get_by_index_rcu(net, fq->iif);
213 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
218 segment was received. And do not use fq->dev
221 fq->q.fragments->dev = dev;
222 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
226 spin_unlock(&fq->q.lock);
227 fq_put(fq);
252 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
260 if (fq->q.last_in & INET_FRAG_COMPLETE)
288 if (end < fq->q.len ||
289 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
291 fq->q.last_in |= INET_FRAG_LAST_IN;
292 fq->q.len = end;
307 if (end > fq->q.len) {
309 if (fq->q.last_in & INET_FRAG_LAST_IN)
311 fq->q.len = end;
329 prev = fq->q.fragments_tail;
335 for(next = fq->q.fragments; next != NULL; next = next->next) {
366 fq->q.fragments_tail = skb;
370 fq->q.fragments = skb;
374 fq->iif = dev->ifindex;
377 fq->q.stamp = skb->tstamp;
378 fq->q.meat += skb->len;
379 atomic_add(skb->truesize, &fq->q.net->mem);
385 fq->nhoffset = nhoff;
386 fq->q.last_in |= INET_FRAG_FIRST_IN;
389 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
390 fq->q.meat == fq->q.len)
391 return ip6_frag_reasm(fq, prev, dev);
394 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
399 fq_kill(fq);
412 * It is called with locked fq, and caller must check that
416 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
419 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
420 struct sk_buff *fp, *head = fq->q.fragments;
424 fq_kill(fq);
436 fq->q.fragments_tail = fp;
439 skb_morph(head, fq->q.fragments);
440 head->next = fq->q.fragments->next;
442 kfree_skb(fq->q.fragments);
443 fq->q.fragments = head;
451 sizeof(struct ipv6hdr) + fq->q.len -
480 atomic_add(clone->truesize, &fq->q.net->mem);
485 nhoff = fq->nhoffset;
505 atomic_sub(head->truesize, &fq->q.net->mem);
509 head->tstamp = fq->q.stamp;
522 fq->q.fragments = NULL;
523 fq->q.fragments_tail = NULL;
543 struct frag_queue *fq;
573 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
574 if (fq != NULL) {
577 spin_lock(&fq->q.lock);
579 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
581 spin_unlock(&fq->q.lock);
582 fq_put(fq);