ip_reass.c revision 337796
1/*- 2 * Copyright (c) 2015 Gleb Smirnoff <glebius@FreeBSD.org> 3 * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org> 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34#include <sys/cdefs.h> 35__FBSDID("$FreeBSD: stable/11/sys/netinet/ip_reass.c 337796 2018-08-14 17:54:39Z jtl $"); 36 37#include "opt_rss.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/eventhandler.h> 42#include <sys/hash.h> 43#include <sys/mbuf.h> 44#include <sys/malloc.h> 45#include <sys/limits.h> 46#include <sys/lock.h> 47#include <sys/mutex.h> 48#include <sys/sysctl.h> 49 50#include <net/rss_config.h> 51#include <net/netisr.h> 52#include <net/vnet.h> 53 54#include <netinet/in.h> 55#include <netinet/ip.h> 56#include <netinet/ip_var.h> 57#include <netinet/in_rss.h> 58#ifdef MAC 59#include <security/mac/mac_framework.h> 60#endif 61 62SYSCTL_DECL(_net_inet_ip); 63 64/* 65 * Reassembly headers are stored in hash buckets. 66 */ 67#define IPREASS_NHASH_LOG2 6 68#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) 69#define IPREASS_HMASK (IPREASS_NHASH - 1) 70 71struct ipqbucket { 72 TAILQ_HEAD(ipqhead, ipq) head; 73 struct mtx lock; 74 int count; 75}; 76 77static VNET_DEFINE(struct ipqbucket, ipq[IPREASS_NHASH]); 78#define V_ipq VNET(ipq) 79static VNET_DEFINE(uint32_t, ipq_hashseed); 80#define V_ipq_hashseed VNET(ipq_hashseed) 81 82#define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock) 83#define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock) 84#define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock) 85#define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED) 86 87static VNET_DEFINE(int, ipreass_maxbucketsize); 88#define V_ipreass_maxbucketsize VNET(ipreass_maxbucketsize) 89 90void ipreass_init(void); 91void ipreass_drain(void); 92void ipreass_slowtimo(void); 93#ifdef VIMAGE 94void ipreass_destroy(void); 95#endif 96static int sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS); 97static int sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS); 98static void ipreass_zone_change(void *); 99static void ipreass_drain_tomax(void); 100static void ipq_free(struct ipqbucket *, struct ipq *); 101static struct ipq * ipq_reuse(int); 102 103static inline void 104ipq_timeout(struct ipqbucket *bucket, struct ipq *fp) 105{ 106 107 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 108 ipq_free(bucket, fp); 109} 110 111static inline void 112ipq_drop(struct ipqbucket *bucket, struct ipq *fp) 113{ 114 115 IPSTAT_ADD(ips_fragdropped, fp->ipq_nfrags); 116 ipq_free(bucket, fp); 117} 118 119static int maxfrags; 120static volatile u_int nfrags; 121SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW, 122 &maxfrags, 0, 123 "Maximum number of IPv4 fragments allowed across all reassembly queues"); 124SYSCTL_UINT(_net_inet_ip, OID_AUTO, curfrags, CTLFLAG_RD, 125 __DEVOLATILE(u_int *, &nfrags), 0, 126 "Current number of IPv4 fragments across all reassembly queues"); 127 128static VNET_DEFINE(uma_zone_t, ipq_zone); 129#define V_ipq_zone VNET(ipq_zone) 130SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_VNET | 131 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, sysctl_maxfragpackets, "I", 132 "Maximum number of IPv4 fragment reassembly queue entries"); 133SYSCTL_UMA_CUR(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_VNET, 134 &VNET_NAME(ipq_zone), 135 "Current number of IPv4 fragment reassembly queue entries"); 136 137static VNET_DEFINE(int, noreass); 138#define V_noreass VNET(noreass) 139 140static VNET_DEFINE(int, maxfragsperpacket); 141#define V_maxfragsperpacket VNET(maxfragsperpacket) 142SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_VNET | CTLFLAG_RW, 143 &VNET_NAME(maxfragsperpacket), 0, 144 "Maximum number of IPv4 fragments allowed per packet"); 145SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragbucketsize, 146 CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, 147 sysctl_maxfragbucketsize, "I", 148 "Maximum number of IPv4 fragment reassembly queue entries per bucket"); 149 150/* 151 * Take incoming datagram fragment and try to reassemble it into 152 * whole datagram. If the argument is the first fragment or one 153 * in between the function will return NULL and store the mbuf 154 * in the fragment chain. If the argument is the last fragment 155 * the packet will be reassembled and the pointer to the new 156 * mbuf returned for further processing. Only m_tags attached 157 * to the first packet/fragment are preserved. 158 * The IP header is *NOT* adjusted out of iplen. 159 */ 160#define M_IP_FRAG M_PROTO9 161struct mbuf * 162ip_reass(struct mbuf *m) 163{ 164 struct ip *ip; 165 struct mbuf *p, *q, *nq, *t; 166 struct ipq *fp; 167 struct ipqhead *head; 168 int i, hlen, next, tmpmax; 169 u_int8_t ecn, ecn0; 170 uint32_t hash, hashkey[3]; 171#ifdef RSS 172 uint32_t rss_hash, rss_type; 173#endif 174 175 /* 176 * If no reassembling or maxfragsperpacket are 0, 177 * never accept fragments. 178 * Also, drop packet if it would exceed the maximum 179 * number of fragments. 180 */ 181 tmpmax = maxfrags; 182 if (V_noreass == 1 || V_maxfragsperpacket == 0 || 183 (tmpmax >= 0 && nfrags >= (u_int)tmpmax)) { 184 IPSTAT_INC(ips_fragments); 185 IPSTAT_INC(ips_fragdropped); 186 m_freem(m); 187 return (NULL); 188 } 189 190 ip = mtod(m, struct ip *); 191 hlen = ip->ip_hl << 2; 192 193 /* 194 * Adjust ip_len to not reflect header, 195 * convert offset of this to bytes. 196 */ 197 ip->ip_len = htons(ntohs(ip->ip_len) - hlen); 198 if (ip->ip_off & htons(IP_MF)) { 199 /* 200 * Make sure that fragments have a data length 201 * that's a non-zero multiple of 8 bytes. 202 */ 203 if (ip->ip_len == htons(0) || (ntohs(ip->ip_len) & 0x7) != 0) { 204 IPSTAT_INC(ips_toosmall); /* XXX */ 205 IPSTAT_INC(ips_fragdropped); 206 m_freem(m); 207 return (NULL); 208 } 209 m->m_flags |= M_IP_FRAG; 210 } else 211 m->m_flags &= ~M_IP_FRAG; 212 ip->ip_off = htons(ntohs(ip->ip_off) << 3); 213 214 /* 215 * Attempt reassembly; if it succeeds, proceed. 216 * ip_reass() will return a different mbuf. 217 */ 218 IPSTAT_INC(ips_fragments); 219 m->m_pkthdr.PH_loc.ptr = ip; 220 221 /* 222 * Presence of header sizes in mbufs 223 * would confuse code below. 224 */ 225 m->m_data += hlen; 226 m->m_len -= hlen; 227 228 hashkey[0] = ip->ip_src.s_addr; 229 hashkey[1] = ip->ip_dst.s_addr; 230 hashkey[2] = (uint32_t)ip->ip_p << 16; 231 hashkey[2] += ip->ip_id; 232 hash = jenkins_hash32(hashkey, nitems(hashkey), V_ipq_hashseed); 233 hash &= IPREASS_HMASK; 234 head = &V_ipq[hash].head; 235 IPQ_LOCK(hash); 236 237 /* 238 * Look for queue of fragments 239 * of this datagram. 240 */ 241 TAILQ_FOREACH(fp, head, ipq_list) 242 if (ip->ip_id == fp->ipq_id && 243 ip->ip_src.s_addr == fp->ipq_src.s_addr && 244 ip->ip_dst.s_addr == fp->ipq_dst.s_addr && 245#ifdef MAC 246 mac_ipq_match(m, fp) && 247#endif 248 ip->ip_p == fp->ipq_p) 249 break; 250 /* 251 * If first fragment to arrive, create a reassembly queue. 252 */ 253 if (fp == NULL) { 254 if (V_ipq[hash].count < V_ipreass_maxbucketsize) 255 fp = uma_zalloc(V_ipq_zone, M_NOWAIT); 256 if (fp == NULL) 257 fp = ipq_reuse(hash); 258 if (fp == NULL) 259 goto dropfrag; 260#ifdef MAC 261 if (mac_ipq_init(fp, M_NOWAIT) != 0) { 262 uma_zfree(V_ipq_zone, fp); 263 fp = NULL; 264 goto dropfrag; 265 } 266 mac_ipq_create(m, fp); 267#endif 268 TAILQ_INSERT_HEAD(head, fp, ipq_list); 269 V_ipq[hash].count++; 270 fp->ipq_nfrags = 1; 271 atomic_add_int(&nfrags, 1); 272 fp->ipq_ttl = IPFRAGTTL; 273 fp->ipq_p = ip->ip_p; 274 fp->ipq_id = ip->ip_id; 275 fp->ipq_src = ip->ip_src; 276 fp->ipq_dst = ip->ip_dst; 277 fp->ipq_frags = m; 278 m->m_nextpkt = NULL; 279 goto done; 280 } else { 281 fp->ipq_nfrags++; 282 atomic_add_int(&nfrags, 1); 283#ifdef MAC 284 mac_ipq_update(m, fp); 285#endif 286 } 287 288#define GETIP(m) ((struct ip*)((m)->m_pkthdr.PH_loc.ptr)) 289 290 /* 291 * Handle ECN by comparing this segment with the first one; 292 * if CE is set, do not lose CE. 293 * drop if CE and not-ECT are mixed for the same packet. 294 */ 295 ecn = ip->ip_tos & IPTOS_ECN_MASK; 296 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; 297 if (ecn == IPTOS_ECN_CE) { 298 if (ecn0 == IPTOS_ECN_NOTECT) 299 goto dropfrag; 300 if (ecn0 != IPTOS_ECN_CE) 301 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; 302 } 303 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) 304 goto dropfrag; 305 306 /* 307 * Find a segment which begins after this one does. 308 */ 309 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) 310 if (ntohs(GETIP(q)->ip_off) > ntohs(ip->ip_off)) 311 break; 312 313 /* 314 * If there is a preceding segment, it may provide some of 315 * our data already. If so, drop the data from the incoming 316 * segment. If it provides all of our data, drop us, otherwise 317 * stick new segment in the proper place. 318 * 319 * If some of the data is dropped from the preceding 320 * segment, then it's checksum is invalidated. 321 */ 322 if (p) { 323 i = ntohs(GETIP(p)->ip_off) + ntohs(GETIP(p)->ip_len) - 324 ntohs(ip->ip_off); 325 if (i > 0) { 326 if (i >= ntohs(ip->ip_len)) 327 goto dropfrag; 328 m_adj(m, i); 329 m->m_pkthdr.csum_flags = 0; 330 ip->ip_off = htons(ntohs(ip->ip_off) + i); 331 ip->ip_len = htons(ntohs(ip->ip_len) - i); 332 } 333 m->m_nextpkt = p->m_nextpkt; 334 p->m_nextpkt = m; 335 } else { 336 m->m_nextpkt = fp->ipq_frags; 337 fp->ipq_frags = m; 338 } 339 340 /* 341 * While we overlap succeeding segments trim them or, 342 * if they are completely covered, dequeue them. 343 */ 344 for (; q != NULL && ntohs(ip->ip_off) + ntohs(ip->ip_len) > 345 ntohs(GETIP(q)->ip_off); q = nq) { 346 i = (ntohs(ip->ip_off) + ntohs(ip->ip_len)) - 347 ntohs(GETIP(q)->ip_off); 348 if (i < ntohs(GETIP(q)->ip_len)) { 349 GETIP(q)->ip_len = htons(ntohs(GETIP(q)->ip_len) - i); 350 GETIP(q)->ip_off = htons(ntohs(GETIP(q)->ip_off) + i); 351 m_adj(q, i); 352 q->m_pkthdr.csum_flags = 0; 353 break; 354 } 355 nq = q->m_nextpkt; 356 m->m_nextpkt = nq; 357 IPSTAT_INC(ips_fragdropped); 358 fp->ipq_nfrags--; 359 atomic_subtract_int(&nfrags, 1); 360 m_freem(q); 361 } 362 363 /* 364 * Check for complete reassembly and perform frag per packet 365 * limiting. 366 * 367 * Frag limiting is performed here so that the nth frag has 368 * a chance to complete the packet before we drop the packet. 369 * As a result, n+1 frags are actually allowed per packet, but 370 * only n will ever be stored. (n = maxfragsperpacket.) 371 * 372 */ 373 next = 0; 374 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { 375 if (ntohs(GETIP(q)->ip_off) != next) { 376 if (fp->ipq_nfrags > V_maxfragsperpacket) 377 ipq_drop(&V_ipq[hash], fp); 378 goto done; 379 } 380 next += ntohs(GETIP(q)->ip_len); 381 } 382 /* Make sure the last packet didn't have the IP_MF flag */ 383 if (p->m_flags & M_IP_FRAG) { 384 if (fp->ipq_nfrags > V_maxfragsperpacket) 385 ipq_drop(&V_ipq[hash], fp); 386 goto done; 387 } 388 389 /* 390 * Reassembly is complete. Make sure the packet is a sane size. 391 */ 392 q = fp->ipq_frags; 393 ip = GETIP(q); 394 if (next + (ip->ip_hl << 2) > IP_MAXPACKET) { 395 IPSTAT_INC(ips_toolong); 396 ipq_drop(&V_ipq[hash], fp); 397 goto done; 398 } 399 400 /* 401 * Concatenate fragments. 402 */ 403 m = q; 404 t = m->m_next; 405 m->m_next = NULL; 406 m_cat(m, t); 407 nq = q->m_nextpkt; 408 q->m_nextpkt = NULL; 409 for (q = nq; q != NULL; q = nq) { 410 nq = q->m_nextpkt; 411 q->m_nextpkt = NULL; 412 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags; 413 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data; 414 m_demote_pkthdr(q); 415 m_cat(m, q); 416 } 417 /* 418 * In order to do checksumming faster we do 'end-around carry' here 419 * (and not in for{} loop), though it implies we are not going to 420 * reassemble more than 64k fragments. 421 */ 422 while (m->m_pkthdr.csum_data & 0xffff0000) 423 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_data & 0xffff) + 424 (m->m_pkthdr.csum_data >> 16); 425 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 426#ifdef MAC 427 mac_ipq_reassemble(fp, m); 428 mac_ipq_destroy(fp); 429#endif 430 431 /* 432 * Create header for new ip packet by modifying header of first 433 * packet; dequeue and discard fragment reassembly header. 434 * Make header visible. 435 */ 436 ip->ip_len = htons((ip->ip_hl << 2) + next); 437 ip->ip_src = fp->ipq_src; 438 ip->ip_dst = fp->ipq_dst; 439 TAILQ_REMOVE(head, fp, ipq_list); 440 V_ipq[hash].count--; 441 uma_zfree(V_ipq_zone, fp); 442 m->m_len += (ip->ip_hl << 2); 443 m->m_data -= (ip->ip_hl << 2); 444 /* some debugging cruft by sklower, below, will go away soon */ 445 if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ 446 m_fixhdr(m); 447 IPSTAT_INC(ips_reassembled); 448 IPQ_UNLOCK(hash); 449 450#ifdef RSS 451 /* 452 * Query the RSS layer for the flowid / flowtype for the 453 * mbuf payload. 454 * 455 * For now, just assume we have to calculate a new one. 456 * Later on we should check to see if the assigned flowid matches 457 * what RSS wants for the given IP protocol and if so, just keep it. 458 * 459 * We then queue into the relevant netisr so it can be dispatched 460 * to the correct CPU. 461 * 462 * Note - this may return 1, which means the flowid in the mbuf 463 * is correct for the configured RSS hash types and can be used. 464 */ 465 if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) { 466 m->m_pkthdr.flowid = rss_hash; 467 M_HASHTYPE_SET(m, rss_type); 468 } 469 470 /* 471 * Queue/dispatch for reprocessing. 472 * 473 * Note: this is much slower than just handling the frame in the 474 * current receive context. It's likely worth investigating 475 * why this is. 476 */ 477 netisr_dispatch(NETISR_IP_DIRECT, m); 478 return (NULL); 479#endif 480 481 /* Handle in-line */ 482 return (m); 483 484dropfrag: 485 IPSTAT_INC(ips_fragdropped); 486 if (fp != NULL) { 487 fp->ipq_nfrags--; 488 atomic_subtract_int(&nfrags, 1); 489 } 490 m_freem(m); 491done: 492 IPQ_UNLOCK(hash); 493 return (NULL); 494 495#undef GETIP 496} 497 498/* 499 * Initialize IP reassembly structures. 500 */ 501void 502ipreass_init(void) 503{ 504 int max; 505 506 for (int i = 0; i < IPREASS_NHASH; i++) { 507 TAILQ_INIT(&V_ipq[i].head); 508 mtx_init(&V_ipq[i].lock, "IP reassembly", NULL, 509 MTX_DEF | MTX_DUPOK); 510 V_ipq[i].count = 0; 511 } 512 V_ipq_hashseed = arc4random(); 513 V_maxfragsperpacket = 16; 514 V_ipq_zone = uma_zcreate("ipq", sizeof(struct ipq), NULL, NULL, NULL, 515 NULL, UMA_ALIGN_PTR, 0); 516 max = nmbclusters / 32; 517 max = uma_zone_set_max(V_ipq_zone, max); 518 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 519 520 if (IS_DEFAULT_VNET(curvnet)) { 521 maxfrags = nmbclusters / 32; 522 EVENTHANDLER_REGISTER(nmbclusters_change, ipreass_zone_change, 523 NULL, EVENTHANDLER_PRI_ANY); 524 } 525} 526 527/* 528 * If a timer expires on a reassembly queue, discard it. 529 */ 530void 531ipreass_slowtimo(void) 532{ 533 struct ipq *fp, *tmp; 534 535 for (int i = 0; i < IPREASS_NHASH; i++) { 536 IPQ_LOCK(i); 537 TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, tmp) 538 if (--fp->ipq_ttl == 0) 539 ipq_timeout(&V_ipq[i], fp); 540 IPQ_UNLOCK(i); 541 } 542} 543 544/* 545 * Drain off all datagram fragments. 546 */ 547void 548ipreass_drain(void) 549{ 550 551 for (int i = 0; i < IPREASS_NHASH; i++) { 552 IPQ_LOCK(i); 553 while(!TAILQ_EMPTY(&V_ipq[i].head)) 554 ipq_drop(&V_ipq[i], TAILQ_FIRST(&V_ipq[i].head)); 555 KASSERT(V_ipq[i].count == 0, 556 ("%s: V_ipq[%d] count %d (V_ipq=%p)", __func__, i, 557 V_ipq[i].count, V_ipq)); 558 IPQ_UNLOCK(i); 559 } 560} 561 562#ifdef VIMAGE 563/* 564 * Destroy IP reassembly structures. 565 */ 566void 567ipreass_destroy(void) 568{ 569 570 ipreass_drain(); 571 uma_zdestroy(V_ipq_zone); 572 for (int i = 0; i < IPREASS_NHASH; i++) 573 mtx_destroy(&V_ipq[i].lock); 574} 575#endif 576 577/* 578 * After maxnipq has been updated, propagate the change to UMA. The UMA zone 579 * max has slightly different semantics than the sysctl, for historical 580 * reasons. 581 */ 582static void 583ipreass_drain_tomax(void) 584{ 585 struct ipq *fp; 586 int target; 587 588 /* 589 * Make sure each bucket is under the new limit. If 590 * necessary, drop enough of the oldest elements from 591 * each bucket to get under the new limit. 592 */ 593 for (int i = 0; i < IPREASS_NHASH; i++) { 594 IPQ_LOCK(i); 595 while (V_ipq[i].count > V_ipreass_maxbucketsize && 596 (fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL) 597 ipq_timeout(&V_ipq[i], fp); 598 IPQ_UNLOCK(i); 599 } 600 601 /* 602 * If we are over the maximum number of fragments, 603 * drain off enough to get down to the new limit, 604 * stripping off last elements on queues. Every 605 * run we strip the oldest element from each bucket. 606 */ 607 target = uma_zone_get_max(V_ipq_zone); 608 while (uma_zone_get_cur(V_ipq_zone) > target) { 609 for (int i = 0; i < IPREASS_NHASH; i++) { 610 IPQ_LOCK(i); 611 fp = TAILQ_LAST(&V_ipq[i].head, ipqhead); 612 if (fp != NULL) 613 ipq_timeout(&V_ipq[i], fp); 614 IPQ_UNLOCK(i); 615 } 616 } 617} 618 619static void 620ipreass_zone_change(void *tag) 621{ 622 VNET_ITERATOR_DECL(vnet_iter); 623 int max; 624 625 maxfrags = nmbclusters / 32; 626 max = nmbclusters / 32; 627 VNET_LIST_RLOCK_NOSLEEP(); 628 VNET_FOREACH(vnet_iter) { 629 CURVNET_SET(vnet_iter); 630 max = uma_zone_set_max(V_ipq_zone, max); 631 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 632 ipreass_drain_tomax(); 633 CURVNET_RESTORE(); 634 } 635 VNET_LIST_RUNLOCK_NOSLEEP(); 636} 637 638/* 639 * Change the limit on the UMA zone, or disable the fragment allocation 640 * at all. Since 0 and -1 is a special values here, we need our own handler, 641 * instead of sysctl_handle_uma_zone_max(). 642 */ 643static int 644sysctl_maxfragpackets(SYSCTL_HANDLER_ARGS) 645{ 646 int error, max; 647 648 if (V_noreass == 0) { 649 max = uma_zone_get_max(V_ipq_zone); 650 if (max == 0) 651 max = -1; 652 } else 653 max = 0; 654 error = sysctl_handle_int(oidp, &max, 0, req); 655 if (error || !req->newptr) 656 return (error); 657 if (max > 0) { 658 /* 659 * XXXRW: Might be a good idea to sanity check the argument 660 * and place an extreme upper bound. 661 */ 662 max = uma_zone_set_max(V_ipq_zone, max); 663 V_ipreass_maxbucketsize = imax(max / (IPREASS_NHASH / 2), 1); 664 ipreass_drain_tomax(); 665 V_noreass = 0; 666 } else if (max == 0) { 667 V_noreass = 1; 668 ipreass_drain(); 669 } else if (max == -1) { 670 V_noreass = 0; 671 uma_zone_set_max(V_ipq_zone, 0); 672 V_ipreass_maxbucketsize = INT_MAX; 673 } else 674 return (EINVAL); 675 return (0); 676} 677 678/* 679 * Seek for old fragment queue header that can be reused. Try to 680 * reuse a header from currently locked hash bucket. 681 */ 682static struct ipq * 683ipq_reuse(int start) 684{ 685 struct ipq *fp; 686 int bucket, i; 687 688 IPQ_LOCK_ASSERT(start); 689 690 for (i = 0; i < IPREASS_NHASH; i++) { 691 bucket = (start + i) % IPREASS_NHASH; 692 if (bucket != start && IPQ_TRYLOCK(bucket) == 0) 693 continue; 694 fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead); 695 if (fp) { 696 struct mbuf *m; 697 698 IPSTAT_ADD(ips_fragtimeout, fp->ipq_nfrags); 699 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 700 while (fp->ipq_frags) { 701 m = fp->ipq_frags; 702 fp->ipq_frags = m->m_nextpkt; 703 m_freem(m); 704 } 705 TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list); 706 V_ipq[bucket].count--; 707 if (bucket != start) 708 IPQ_UNLOCK(bucket); 709 break; 710 } 711 if (bucket != start) 712 IPQ_UNLOCK(bucket); 713 } 714 IPQ_LOCK_ASSERT(start); 715 return (fp); 716} 717 718/* 719 * Free a fragment reassembly header and all associated datagrams. 720 */ 721static void 722ipq_free(struct ipqbucket *bucket, struct ipq *fp) 723{ 724 struct mbuf *q; 725 726 atomic_subtract_int(&nfrags, fp->ipq_nfrags); 727 while (fp->ipq_frags) { 728 q = fp->ipq_frags; 729 fp->ipq_frags = q->m_nextpkt; 730 m_freem(q); 731 } 732 TAILQ_REMOVE(&bucket->head, fp, ipq_list); 733 bucket->count--; 734 uma_zfree(V_ipq_zone, fp); 735} 736 737/* 738 * Get or set the maximum number of reassembly queues per bucket. 739 */ 740static int 741sysctl_maxfragbucketsize(SYSCTL_HANDLER_ARGS) 742{ 743 int error, max; 744 745 max = V_ipreass_maxbucketsize; 746 error = sysctl_handle_int(oidp, &max, 0, req); 747 if (error || !req->newptr) 748 return (error); 749 if (max <= 0) 750 return (EINVAL); 751 V_ipreass_maxbucketsize = max; 752 ipreass_drain_tomax(); 753 return (0); 754} 755