1/* 2 * IPv6 fragment reassembly 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on: net/ipv4/ip_fragment.c 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16/* 17 * Fixes: 18 * Andi Kleen Make it work with multiple hosts. 19 * More RFC compliance. 20 * 21 * Horst von Brand Add missing #include <linux/string.h> 22 * Alexey Kuznetsov SMP races, threading, cleanup. 23 * Patrick McHardy LRU queue of frag heads for evictor. 24 * Mitsuru KANDA @USAGI Register inet6_protocol{}. 25 * David Stevens and 26 * YOSHIFUJI,H. @USAGI Always remove fragment header to 27 * calculate ICV correctly. 28 */ 29#include <linux/errno.h> 30#include <linux/types.h> 31#include <linux/string.h> 32#include <linux/socket.h> 33#include <linux/sockios.h> 34#include <linux/jiffies.h> 35#include <linux/net.h> 36#include <linux/list.h> 37#include <linux/netdevice.h> 38#include <linux/in6.h> 39#include <linux/ipv6.h> 40#include <linux/icmpv6.h> 41#include <linux/random.h> 42#include <linux/jhash.h> 43#include <linux/skbuff.h> 44#include <linux/slab.h> 45 46#include <net/sock.h> 47#include <net/snmp.h> 48 49#include <net/ipv6.h> 50#include <net/ip6_route.h> 51#include <net/protocol.h> 52#include <net/transp_v6.h> 53#include <net/rawv6.h> 54#include <net/ndisc.h> 55#include <net/addrconf.h> 56#include <net/inet_frag.h> 57 58struct ip6frag_skb_cb 59{ 60 struct inet6_skb_parm h; 61 int offset; 62}; 63 64#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 65 66 67/* 68 * Equivalent of ipv4 struct ipq 69 */ 70 71struct frag_queue 72{ 73 struct inet_frag_queue q; 74 75 __be32 id; /* fragment id */ 76 u32 user; 77 struct in6_addr saddr; 78 struct in6_addr daddr; 79 80 int iif; 81 unsigned int csum; 82 __u16 nhoffset; 83}; 84 85static struct inet_frags ip6_frags; 86 87int ip6_frag_nqueues(struct net *net) 88{ 89 return net->ipv6.frags.nqueues; 90} 91 92int ip6_frag_mem(struct net *net) 93{ 94 return atomic_read(&net->ipv6.frags.mem); 95} 96 97static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 98 struct net_device *dev); 99 100/* 101 * callers should be careful not to use the hash value outside the ipfrag_lock 102 * as doing so could race with ipfrag_hash_rnd being recalculated. 103 */ 104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, 105 const struct in6_addr *daddr, u32 rnd) 106{ 107 u32 a, b, c; 108 109 a = (__force u32)saddr->s6_addr32[0]; 110 b = (__force u32)saddr->s6_addr32[1]; 111 c = (__force u32)saddr->s6_addr32[2]; 112 113 a += JHASH_GOLDEN_RATIO; 114 b += JHASH_GOLDEN_RATIO; 115 c += rnd; 116 __jhash_mix(a, b, c); 117 118 a += (__force u32)saddr->s6_addr32[3]; 119 b += (__force u32)daddr->s6_addr32[0]; 120 c += (__force u32)daddr->s6_addr32[1]; 121 __jhash_mix(a, b, c); 122 123 a += (__force u32)daddr->s6_addr32[2]; 124 b += (__force u32)daddr->s6_addr32[3]; 125 c += (__force u32)id; 126 __jhash_mix(a, b, c); 127 128 return c & (INETFRAGS_HASHSZ - 1); 129} 130EXPORT_SYMBOL_GPL(inet6_hash_frag); 131 132static unsigned int ip6_hashfn(struct inet_frag_queue *q) 133{ 134 struct frag_queue *fq; 135 136 fq = container_of(q, struct frag_queue, q); 137 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); 138} 139 140int ip6_frag_match(struct inet_frag_queue *q, void *a) 141{ 142 struct frag_queue *fq; 143 struct ip6_create_arg *arg = a; 144 145 fq = container_of(q, struct frag_queue, q); 146 return (fq->id == arg->id && fq->user == arg->user && 147 ipv6_addr_equal(&fq->saddr, arg->src) && 148 ipv6_addr_equal(&fq->daddr, arg->dst)); 149} 150EXPORT_SYMBOL(ip6_frag_match); 151 152void ip6_frag_init(struct inet_frag_queue *q, void *a) 153{ 154 struct frag_queue *fq = container_of(q, struct frag_queue, q); 155 struct ip6_create_arg *arg = a; 156 157 fq->id = arg->id; 158 fq->user = arg->user; 159 ipv6_addr_copy(&fq->saddr, arg->src); 160 ipv6_addr_copy(&fq->daddr, arg->dst); 161} 162EXPORT_SYMBOL(ip6_frag_init); 163 164/* Destruction primitives. */ 165 166static __inline__ void fq_put(struct frag_queue *fq) 167{ 168 inet_frag_put(&fq->q, &ip6_frags); 169} 170 171/* Kill fq entry. It is not destroyed immediately, 172 * because caller (and someone more) holds reference count. 173 */ 174static __inline__ void fq_kill(struct frag_queue *fq) 175{ 176 inet_frag_kill(&fq->q, &ip6_frags); 177} 178 179static void ip6_evictor(struct net *net, struct inet6_dev *idev) 180{ 181 int evicted; 182 183 evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); 184 if (evicted) 185 IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); 186} 187 188static void ip6_frag_expire(unsigned long data) 189{ 190 struct frag_queue *fq; 191 struct net_device *dev = NULL; 192 struct net *net; 193 194 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); 195 196 spin_lock(&fq->q.lock); 197 198 if (fq->q.last_in & INET_FRAG_COMPLETE) 199 goto out; 200 201 fq_kill(fq); 202 203 net = container_of(fq->q.net, struct net, ipv6.frags); 204 rcu_read_lock(); 205 dev = dev_get_by_index_rcu(net, fq->iif); 206 if (!dev) 207 goto out_rcu_unlock; 208 209 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 210 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 211 212 /* Don't send error if the first segment did not arrive. */ 213 if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) 214 goto out_rcu_unlock; 215 216 /* 217 But use as source device on which LAST ARRIVED 218 segment was received. And do not use fq->dev 219 pointer directly, device might already disappeared. 220 */ 221 fq->q.fragments->dev = dev; 222 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); 223out_rcu_unlock: 224 rcu_read_unlock(); 225out: 226 spin_unlock(&fq->q.lock); 227 fq_put(fq); 228} 229 230static __inline__ struct frag_queue * 231fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 232{ 233 struct inet_frag_queue *q; 234 struct ip6_create_arg arg; 235 unsigned int hash; 236 237 arg.id = id; 238 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 239 arg.src = src; 240 arg.dst = dst; 241 242 read_lock(&ip6_frags.lock); 243 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 244 245 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); 246 if (q == NULL) 247 return NULL; 248 249 return container_of(q, struct frag_queue, q); 250} 251 252static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 253 struct frag_hdr *fhdr, int nhoff) 254{ 255 struct sk_buff *prev, *next; 256 struct net_device *dev; 257 int offset, end; 258 struct net *net = dev_net(skb_dst(skb)->dev); 259 260 if (fq->q.last_in & INET_FRAG_COMPLETE) 261 goto err; 262 263 offset = ntohs(fhdr->frag_off) & ~0x7; 264 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - 265 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); 266 267 if ((unsigned int)end > IPV6_MAXPLEN) { 268 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 269 IPSTATS_MIB_INHDRERRORS); 270 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 271 ((u8 *)&fhdr->frag_off - 272 skb_network_header(skb))); 273 return -1; 274 } 275 276 if (skb->ip_summed == CHECKSUM_COMPLETE) { 277 const unsigned char *nh = skb_network_header(skb); 278 skb->csum = csum_sub(skb->csum, 279 csum_partial(nh, (u8 *)(fhdr + 1) - nh, 280 0)); 281 } 282 283 /* Is this the final fragment? */ 284 if (!(fhdr->frag_off & htons(IP6_MF))) { 285 /* If we already have some bits beyond end 286 * or have different end, the segment is corrupted. 287 */ 288 if (end < fq->q.len || 289 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) 290 goto err; 291 fq->q.last_in |= INET_FRAG_LAST_IN; 292 fq->q.len = end; 293 } else { 294 /* Check if the fragment is rounded to 8 bytes. 295 * Required by the RFC. 296 */ 297 if (end & 0x7) { 298 /* RFC2460 says always send parameter problem in 299 * this case. -DaveM 300 */ 301 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 302 IPSTATS_MIB_INHDRERRORS); 303 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 304 offsetof(struct ipv6hdr, payload_len)); 305 return -1; 306 } 307 if (end > fq->q.len) { 308 /* Some bits beyond end -> corruption. */ 309 if (fq->q.last_in & INET_FRAG_LAST_IN) 310 goto err; 311 fq->q.len = end; 312 } 313 } 314 315 if (end == offset) 316 goto err; 317 318 /* Point into the IP datagram 'data' part. */ 319 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) 320 goto err; 321 322 if (pskb_trim_rcsum(skb, end - offset)) 323 goto err; 324 325 /* Find out which fragments are in front and at the back of us 326 * in the chain of fragments so far. We must know where to put 327 * this fragment, right? 328 */ 329 prev = fq->q.fragments_tail; 330 if (!prev || FRAG6_CB(prev)->offset < offset) { 331 next = NULL; 332 goto found; 333 } 334 prev = NULL; 335 for(next = fq->q.fragments; next != NULL; next = next->next) { 336 if (FRAG6_CB(next)->offset >= offset) 337 break; /* bingo! */ 338 prev = next; 339 } 340 341found: 342 /* RFC5722, Section 4: 343 * When reassembling an IPv6 datagram, if 344 * one or more its constituent fragments is determined to be an 345 * overlapping fragment, the entire datagram (and any constituent 346 * fragments, including those not yet received) MUST be silently 347 * discarded. 348 */ 349/* Bob removed start 04/01/2013 for ipv6ready */ 350#if 0 351 /* Check for overlap with preceding fragment. */ 352 if (prev && 353 (FRAG6_CB(prev)->offset + prev->len) - offset > 0) 354 goto discard_fq; 355#endif 356/* Bob removed end 04/01/2013 for ipv6ready */ 357 /* Look for overlap with succeeding segment. */ 358 if (next && FRAG6_CB(next)->offset < end) 359 goto discard_fq; 360 361 FRAG6_CB(skb)->offset = offset; 362 363 /* Insert this fragment in the chain of fragments. */ 364 skb->next = next; 365 if (!next) 366 fq->q.fragments_tail = skb; 367 if (prev) 368 prev->next = skb; 369 else 370 fq->q.fragments = skb; 371 372 dev = skb->dev; 373 if (dev) { 374 fq->iif = dev->ifindex; 375 skb->dev = NULL; 376 } 377 fq->q.stamp = skb->tstamp; 378 fq->q.meat += skb->len; 379 atomic_add(skb->truesize, &fq->q.net->mem); 380 381 /* The first fragment. 382 * nhoffset is obtained from the first fragment, of course. 383 */ 384 if (offset == 0) { 385 fq->nhoffset = nhoff; 386 fq->q.last_in |= INET_FRAG_FIRST_IN; 387 } 388 389 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 390 fq->q.meat == fq->q.len) 391 return ip6_frag_reasm(fq, prev, dev); 392 393 write_lock(&ip6_frags.lock); 394 list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); 395 write_unlock(&ip6_frags.lock); 396 return -1; 397 398discard_fq: 399 fq_kill(fq); 400err: 401 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 402 IPSTATS_MIB_REASMFAILS); 403 kfree_skb(skb); 404 return -1; 405} 406 407/* 408 * Check if this packet is complete. 409 * Returns NULL on failure by any reason, and pointer 410 * to current nexthdr field in reassembled frame. 411 * 412 * It is called with locked fq, and caller must check that 413 * queue is eligible for reassembly i.e. it is not COMPLETE, 414 * the last and the first frames arrived and all the bits are here. 415 */ 416static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 417 struct net_device *dev) 418{ 419 struct net *net = container_of(fq->q.net, struct net, ipv6.frags); 420 struct sk_buff *fp, *head = fq->q.fragments; 421 int payload_len; 422 unsigned int nhoff; 423 424 fq_kill(fq); 425 426 /* Make the one we just received the head. */ 427 if (prev) { 428 head = prev->next; 429 fp = skb_clone(head, GFP_ATOMIC); 430 431 if (!fp) 432 goto out_oom; 433 434 fp->next = head->next; 435 if (!fp->next) 436 fq->q.fragments_tail = fp; 437 prev->next = fp; 438 439 skb_morph(head, fq->q.fragments); 440 head->next = fq->q.fragments->next; 441 442 kfree_skb(fq->q.fragments); 443 fq->q.fragments = head; 444 } 445 446 WARN_ON(head == NULL); 447 WARN_ON(FRAG6_CB(head)->offset != 0); 448 449 /* Unfragmented part is taken from the first segment. */ 450 payload_len = ((head->data - skb_network_header(head)) - 451 sizeof(struct ipv6hdr) + fq->q.len - 452 sizeof(struct frag_hdr)); 453 if (payload_len > IPV6_MAXPLEN) 454 goto out_oversize; 455 456 /* Head of list must not be cloned. */ 457 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) 458 goto out_oom; 459 460 /* If the first fragment is fragmented itself, we split 461 * it to two chunks: the first with data and paged part 462 * and the second, holding only fragments. */ 463 if (skb_has_frags(head)) { 464 struct sk_buff *clone; 465 int i, plen = 0; 466 467 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) 468 goto out_oom; 469 clone->next = head->next; 470 head->next = clone; 471 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 472 skb_frag_list_init(head); 473 for (i=0; i<skb_shinfo(head)->nr_frags; i++) 474 plen += skb_shinfo(head)->frags[i].size; 475 clone->len = clone->data_len = head->data_len - plen; 476 head->data_len -= clone->len; 477 head->len -= clone->len; 478 clone->csum = 0; 479 clone->ip_summed = head->ip_summed; 480 atomic_add(clone->truesize, &fq->q.net->mem); 481 } 482 483 /* We have to remove fragment header from datagram and to relocate 484 * header in order to calculate ICV correctly. */ 485 nhoff = fq->nhoffset; 486 skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; 487 memmove(head->head + sizeof(struct frag_hdr), head->head, 488 (head->data - head->head) - sizeof(struct frag_hdr)); 489 head->mac_header += sizeof(struct frag_hdr); 490 head->network_header += sizeof(struct frag_hdr); 491 492 skb_shinfo(head)->frag_list = head->next; 493 skb_reset_transport_header(head); 494 skb_push(head, head->data - skb_network_header(head)); 495 496 for (fp=head->next; fp; fp = fp->next) { 497 head->data_len += fp->len; 498 head->len += fp->len; 499 if (head->ip_summed != fp->ip_summed) 500 head->ip_summed = CHECKSUM_NONE; 501 else if (head->ip_summed == CHECKSUM_COMPLETE) 502 head->csum = csum_add(head->csum, fp->csum); 503 head->truesize += fp->truesize; 504 } 505 atomic_sub(head->truesize, &fq->q.net->mem); 506 507 head->next = NULL; 508 head->dev = dev; 509 head->tstamp = fq->q.stamp; 510 ipv6_hdr(head)->payload_len = htons(payload_len); 511 IP6CB(head)->nhoff = nhoff; 512 513 /* Yes, and fold redundant checksum back. 8) */ 514 if (head->ip_summed == CHECKSUM_COMPLETE) 515 head->csum = csum_partial(skb_network_header(head), 516 skb_network_header_len(head), 517 head->csum); 518 519 rcu_read_lock(); 520 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 521 rcu_read_unlock(); 522 fq->q.fragments = NULL; 523 fq->q.fragments_tail = NULL; 524 return 1; 525 526out_oversize: 527 if (net_ratelimit()) 528 printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); 529 goto out_fail; 530out_oom: 531 if (net_ratelimit()) 532 printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); 533out_fail: 534 rcu_read_lock(); 535 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 536 rcu_read_unlock(); 537 return -1; 538} 539 540static int ipv6_frag_rcv(struct sk_buff *skb) 541{ 542 struct frag_hdr *fhdr; 543 struct frag_queue *fq; 544 struct ipv6hdr *hdr = ipv6_hdr(skb); 545 struct net *net = dev_net(skb_dst(skb)->dev); 546 547 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 548 549 /* Jumbo payload inhibits frag. header */ 550 if (hdr->payload_len==0) 551 goto fail_hdr; 552 553 if (!pskb_may_pull(skb, (skb_transport_offset(skb) + 554 sizeof(struct frag_hdr)))) 555 goto fail_hdr; 556 557 hdr = ipv6_hdr(skb); 558 fhdr = (struct frag_hdr *)skb_transport_header(skb); 559 560 if (!(fhdr->frag_off & htons(0xFFF9))) { 561 /* It is not a fragmented frame */ 562 skb->transport_header += sizeof(struct frag_hdr); 563 IP6_INC_STATS_BH(net, 564 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); 565 566 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 567 return 1; 568 } 569 570 if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) 571 ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); 572 573 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); 574 if (fq != NULL) { 575 int ret; 576 577 spin_lock(&fq->q.lock); 578 579 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 580 581 spin_unlock(&fq->q.lock); 582 fq_put(fq); 583 return ret; 584 } 585 586 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); 587 kfree_skb(skb); 588 return -1; 589 590fail_hdr: 591 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); 592 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); 593 return -1; 594} 595 596static const struct inet6_protocol frag_protocol = 597{ 598 .handler = ipv6_frag_rcv, 599 .flags = INET6_PROTO_NOPOLICY, 600}; 601 602#ifdef CONFIG_SYSCTL 603static struct ctl_table ip6_frags_ns_ctl_table[] = { 604 { 605 .procname = "ip6frag_high_thresh", 606 .data = &init_net.ipv6.frags.high_thresh, 607 .maxlen = sizeof(int), 608 .mode = 0644, 609 .proc_handler = proc_dointvec 610 }, 611 { 612 .procname = "ip6frag_low_thresh", 613 .data = &init_net.ipv6.frags.low_thresh, 614 .maxlen = sizeof(int), 615 .mode = 0644, 616 .proc_handler = proc_dointvec 617 }, 618 { 619 .procname = "ip6frag_time", 620 .data = &init_net.ipv6.frags.timeout, 621 .maxlen = sizeof(int), 622 .mode = 0644, 623 .proc_handler = proc_dointvec_jiffies, 624 }, 625 { } 626}; 627 628static struct ctl_table ip6_frags_ctl_table[] = { 629 { 630 .procname = "ip6frag_secret_interval", 631 .data = &ip6_frags.secret_interval, 632 .maxlen = sizeof(int), 633 .mode = 0644, 634 .proc_handler = proc_dointvec_jiffies, 635 }, 636 { } 637}; 638 639static int __net_init ip6_frags_ns_sysctl_register(struct net *net) 640{ 641 struct ctl_table *table; 642 struct ctl_table_header *hdr; 643 644 table = ip6_frags_ns_ctl_table; 645 if (!net_eq(net, &init_net)) { 646 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); 647 if (table == NULL) 648 goto err_alloc; 649 650 table[0].data = &net->ipv6.frags.high_thresh; 651 table[1].data = &net->ipv6.frags.low_thresh; 652 table[2].data = &net->ipv6.frags.timeout; 653 } 654 655 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 656 if (hdr == NULL) 657 goto err_reg; 658 659 net->ipv6.sysctl.frags_hdr = hdr; 660 return 0; 661 662err_reg: 663 if (!net_eq(net, &init_net)) 664 kfree(table); 665err_alloc: 666 return -ENOMEM; 667} 668 669static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) 670{ 671 struct ctl_table *table; 672 673 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; 674 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); 675 if (!net_eq(net, &init_net)) 676 kfree(table); 677} 678 679static struct ctl_table_header *ip6_ctl_header; 680 681static int ip6_frags_sysctl_register(void) 682{ 683 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 684 ip6_frags_ctl_table); 685 return ip6_ctl_header == NULL ? -ENOMEM : 0; 686} 687 688static void ip6_frags_sysctl_unregister(void) 689{ 690 unregister_net_sysctl_table(ip6_ctl_header); 691} 692#else 693static inline int ip6_frags_ns_sysctl_register(struct net *net) 694{ 695 return 0; 696} 697 698static inline void ip6_frags_ns_sysctl_unregister(struct net *net) 699{ 700} 701 702static inline int ip6_frags_sysctl_register(void) 703{ 704 return 0; 705} 706 707static inline void ip6_frags_sysctl_unregister(void) 708{ 709} 710#endif 711 712static int __net_init ipv6_frags_init_net(struct net *net) 713{ 714 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; 715 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; 716 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; 717 718 inet_frags_init_net(&net->ipv6.frags); 719 720 return ip6_frags_ns_sysctl_register(net); 721} 722 723static void __net_exit ipv6_frags_exit_net(struct net *net) 724{ 725 ip6_frags_ns_sysctl_unregister(net); 726 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); 727} 728 729static struct pernet_operations ip6_frags_ops = { 730 .init = ipv6_frags_init_net, 731 .exit = ipv6_frags_exit_net, 732}; 733 734int __init ipv6_frag_init(void) 735{ 736 int ret; 737 738 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); 739 if (ret) 740 goto out; 741 742 ret = ip6_frags_sysctl_register(); 743 if (ret) 744 goto err_sysctl; 745 746 ret = register_pernet_subsys(&ip6_frags_ops); 747 if (ret) 748 goto err_pernet; 749 750 ip6_frags.hashfn = ip6_hashfn; 751 ip6_frags.constructor = ip6_frag_init; 752 ip6_frags.destructor = NULL; 753 ip6_frags.skb_free = NULL; 754 ip6_frags.qsize = sizeof(struct frag_queue); 755 ip6_frags.match = ip6_frag_match; 756 ip6_frags.frag_expire = ip6_frag_expire; 757 ip6_frags.secret_interval = 10 * 60 * HZ; 758 inet_frags_init(&ip6_frags); 759out: 760 return ret; 761 762err_pernet: 763 ip6_frags_sysctl_unregister(); 764err_sysctl: 765 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 766 goto out; 767} 768 769void ipv6_frag_exit(void) 770{ 771 inet_frags_fini(&ip6_frags); 772 ip6_frags_sysctl_unregister(); 773 unregister_pernet_subsys(&ip6_frags_ops); 774 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); 775} 776