Lines Matching refs:qp

78 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
84 struct ipq *qp = container_of(q, struct ipq, q);
90 qp->ecn = 0;
91 qp->peer = q->fqdir->max_dist ?
98 struct ipq *qp;
100 qp = container_of(q, struct ipq, q);
101 if (qp->peer)
102 inet_putpeer(qp->peer);
139 struct ipq *qp;
142 qp = container_of(frag, struct ipq, q);
143 net = qp->q.fqdir->net;
148 if (READ_ONCE(qp->q.fqdir->dead))
151 spin_lock(&qp->q.lock);
153 if (qp->q.flags & INET_FRAG_COMPLETE)
156 qp->q.flags |= INET_FRAG_DROP;
157 ipq_kill(qp);
161 if (!(qp->q.flags & INET_FRAG_FIRST_IN))
168 head = inet_frag_pull_head(&qp->q);
171 head->dev = dev_get_by_index_rcu(net, qp->iif);
186 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
190 spin_unlock(&qp->q.lock);
195 spin_unlock(&qp->q.lock);
199 ipq_put(qp);
226 static int ip_frag_too_far(struct ipq *qp)
228 struct inet_peer *peer = qp->peer;
229 unsigned int max = qp->q.fqdir->max_dist;
237 start = qp->rid;
239 qp->rid = end;
241 rc = qp->q.fragments_tail && (end - start) > max;
244 __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS);
249 static int ip_frag_reinit(struct ipq *qp)
253 if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) {
254 refcount_inc(&qp->q.refcnt);
258 sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments,
260 sub_frag_mem_limit(qp->q.fqdir, sum_truesize);
262 qp->q.flags = 0;
263 qp->q.len = 0;
264 qp->q.meat = 0;
265 qp->q.rb_fragments = RB_ROOT;
266 qp->q.fragments_tail = NULL;
267 qp->q.last_run_head = NULL;
268 qp->iif = 0;
269 qp->ecn = 0;
275 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
277 struct net *net = qp->q.fqdir->net;
287 if (qp->q.flags & INET_FRAG_COMPLETE) {
293 unlikely(ip_frag_too_far(qp)) &&
294 unlikely(err = ip_frag_reinit(qp))) {
295 ipq_kill(qp);
315 if (end < qp->q.len ||
316 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
318 qp->q.flags |= INET_FRAG_LAST_IN;
319 qp->q.len = end;
326 if (end > qp->q.len) {
328 if (qp->q.flags & INET_FRAG_LAST_IN)
330 qp->q.len = end;
349 prev_tail = qp->q.fragments_tail;
350 err = inet_frag_queue_insert(&qp->q, skb, offset, end);
355 qp->iif = dev->ifindex;
357 qp->q.stamp = skb->tstamp;
358 qp->q.mono_delivery_time = skb->mono_delivery_time;
359 qp->q.meat += skb->len;
360 qp->ecn |= ecn;
361 add_frag_mem_limit(qp->q.fqdir, skb->truesize);
363 qp->q.flags |= INET_FRAG_FIRST_IN;
367 if (fragsize > qp->q.max_size)
368 qp->q.max_size = fragsize;
371 fragsize > qp->max_df_size)
372 qp->max_df_size = fragsize;
374 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
375 qp->q.meat == qp->q.len) {
379 err = ip_frag_reasm(qp, skb, prev_tail, dev);
382 inet_frag_kill(&qp->q);
399 inet_frag_kill(&qp->q);
406 static bool ip_frag_coalesce_ok(const struct ipq *qp)
408 return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
412 static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
415 struct net *net = qp->q.fqdir->net;
421 ipq_kill(qp);
423 ecn = ip_frag_ecn_table[qp->ecn];
430 reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
434 len = ip_hdrlen(skb) + qp->q.len;
439 inet_frag_reasm_finish(&qp->q, skb, reasm_data,
440 ip_frag_coalesce_ok(qp));
443 IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
457 if (qp->max_df_size == qp->q.max_size) {
467 qp->q.rb_fragments = RB_ROOT;
468 qp->q.fragments_tail = NULL;
469 qp->q.last_run_head = NULL;
473 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
477 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
488 struct ipq *qp;
493 qp = ip_find(net, ip_hdr(skb), user, vif);
494 if (qp) {
497 spin_lock(&qp->q.lock);
499 ret = ip_frag_queue(qp, skb);
501 spin_unlock(&qp->q.lock);
502 ipq_put(qp);