1/*- 2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote 14 * products derived from this software without specific prior written 15 * permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30/* 31 * The tcp_hostcache moves the tcp-specific cached metrics from the routing 32 * table to a dedicated structure indexed by the remote IP address. It keeps 33 * information on the measured TCP parameters of past TCP sessions to allow 34 * better initial start values to be used with later connections to/from the 35 * same source. Depending on the network parameters (delay, max MTU, 36 * congestion window) between local and remote sites, this can lead to 37 * significant speed-ups for new TCP connections after the first one. 38 * 39 * Due to the tcp_hostcache, all TCP-specific metrics information in the 40 * routing table have been removed. The inpcb no longer keeps a pointer to 41 * the routing entry, and protocol-initiated route cloning has been removed 42 * as well. With these changes, the routing table has gone back to being 43 * more lightwight and only carries information related to packet forwarding. 44 * 45 * tcp_hostcache is designed for multiple concurrent access in SMP 46 * environments and high contention. All bucket rows have their own lock and 47 * thus multiple lookups and modifies can be done at the same time as long as 48 * they are in different bucket rows. If a request for insertion of a new 49 * record can't be satisfied, it simply returns an empty structure. Nobody 50 * and nothing outside of tcp_hostcache.c will ever point directly to any 51 * entry in the tcp_hostcache. All communication is done in an 52 * object-oriented way and only functions of tcp_hostcache will manipulate 53 * hostcache entries. Otherwise, we are unable to achieve good behaviour in 54 * concurrent access situations. Since tcp_hostcache is only caching 55 * information, there are no fatal consequences if we either can't satisfy 56 * any particular request or have to drop/overwrite an existing entry because 57 * of bucket limit memory constrains. 58 */ 59 60/* 61 * Many thanks to jlemon for basic structure of tcp_syncache which is being 62 * followed here. 63 */ 64 65#include <sys/cdefs.h> 66__FBSDID("$FreeBSD: stable/11/sys/netinet/tcp_hostcache.c 369633 2021-04-16 22:12:41Z rscheff $"); 67 68#include "opt_inet6.h" 69 70#include <sys/param.h> 71#include <sys/systm.h> 72#include <sys/jail.h> 73#include <sys/kernel.h> 74#include <sys/lock.h> 75#include <sys/mutex.h> 76#include <sys/malloc.h> 77#include <sys/proc.h> 78#include <sys/sbuf.h> 79#include <sys/socket.h> 80#include <sys/socketvar.h> 81#include <sys/sysctl.h> 82 83#include <net/if.h> 84#include <net/if_var.h> 85#include <net/route.h> 86#include <net/vnet.h> 87 88#include <netinet/in.h> 89#include <netinet/in_systm.h> 90#include <netinet/ip.h> 91#include <netinet/in_var.h> 92#include <netinet/in_pcb.h> 93#include <netinet/ip_var.h> 94#ifdef INET6 95#include <netinet/ip6.h> 96#include <netinet6/ip6_var.h> 97#endif 98#include <netinet/tcp.h> 99#include <netinet/tcp_var.h> 100#include <netinet/tcp_hostcache.h> 101#ifdef INET6 102#include <netinet6/tcp6_var.h> 103#endif 104 105#include <vm/uma.h> 106 107/* Arbitrary values */ 108#define TCP_HOSTCACHE_HASHSIZE 512 109#define TCP_HOSTCACHE_BUCKETLIMIT 30 110#define TCP_HOSTCACHE_EXPIRE 60*60 /* one hour */ 111#define TCP_HOSTCACHE_PRUNE 5*60 /* every 5 minutes */ 112 113static VNET_DEFINE(struct tcp_hostcache, tcp_hostcache); 114#define V_tcp_hostcache VNET(tcp_hostcache) 115 116static VNET_DEFINE(struct callout, tcp_hc_callout); 117#define V_tcp_hc_callout VNET(tcp_hc_callout) 118 119static struct hc_metrics *tcp_hc_lookup(struct in_conninfo *); 120static struct hc_metrics *tcp_hc_insert(struct in_conninfo *); 121static int sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS); 122static int sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS); 123static void tcp_hc_purge_internal(int); 124static void tcp_hc_purge(void *); 125 126static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hostcache, CTLFLAG_RW, 0, 127 "TCP Host cache"); 128 129VNET_DEFINE(int, tcp_use_hostcache) = 1; 130#define V_tcp_use_hostcache VNET(tcp_use_hostcache) 131SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 132 &VNET_NAME(tcp_use_hostcache), 0, 133 "Enable the TCP hostcache"); 134 135SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, cachelimit, CTLFLAG_VNET | CTLFLAG_RDTUN, 136 &VNET_NAME(tcp_hostcache.cache_limit), 0, 137 "Overall entry limit for hostcache"); 138 139SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, hashsize, CTLFLAG_VNET | CTLFLAG_RDTUN, 140 &VNET_NAME(tcp_hostcache.hashsize), 0, 141 "Size of TCP hostcache hashtable"); 142 143SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit, 144 CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0, 145 "Per-bucket hash limit for hostcache"); 146 147SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD, 148 &VNET_NAME(tcp_hostcache.cache_count), 0, 149 "Current number of entries in hostcache"); 150 151SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW, 152 &VNET_NAME(tcp_hostcache.expire), 0, 153 "Expire time of TCP hostcache entries"); 154 155SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, prune, CTLFLAG_VNET | CTLFLAG_RW, 156 &VNET_NAME(tcp_hostcache.prune), 0, 157 "Time between purge runs"); 158 159SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, purge, CTLFLAG_VNET | CTLFLAG_RW, 160 &VNET_NAME(tcp_hostcache.purgeall), 0, 161 "Expire all entires on next purge run"); 162 163SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, list, 164 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0, 165 sysctl_tcp_hc_list, "A", "List of all hostcache entries"); 166 167SYSCTL_PROC(_net_inet_tcp_hostcache, OID_AUTO, purgenow, 168 CTLTYPE_INT | CTLFLAG_RW, NULL, 0, 169 sysctl_tcp_hc_purgenow, "I", "Immediately purge all entries"); 170 171static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache"); 172 173#define HOSTCACHE_HASH(ip) \ 174 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \ 175 V_tcp_hostcache.hashmask) 176 177/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */ 178#define HOSTCACHE_HASH6(ip6) \ 179 (((ip6)->s6_addr32[0] ^ \ 180 (ip6)->s6_addr32[1] ^ \ 181 (ip6)->s6_addr32[2] ^ \ 182 (ip6)->s6_addr32[3]) & \ 183 V_tcp_hostcache.hashmask) 184 185#define THC_LOCK(lp) mtx_lock(lp) 186#define THC_UNLOCK(lp) mtx_unlock(lp) 187 188void 189tcp_hc_init(void) 190{ 191 u_int cache_limit; 192 int i; 193 194 /* 195 * Initialize hostcache structures. 196 */ 197 atomic_store_int(&V_tcp_hostcache.cache_count, 0); 198 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; 199 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT; 200 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE; 201 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE; 202 203 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize", 204 &V_tcp_hostcache.hashsize); 205 if (!powerof2(V_tcp_hostcache.hashsize)) { 206 printf("WARNING: hostcache hash size is not a power of 2.\n"); 207 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */ 208 } 209 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1; 210 211 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit", 212 &V_tcp_hostcache.bucket_limit); 213 214 cache_limit = V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit; 215 V_tcp_hostcache.cache_limit = cache_limit; 216 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit", 217 &V_tcp_hostcache.cache_limit); 218 if (V_tcp_hostcache.cache_limit > cache_limit) 219 V_tcp_hostcache.cache_limit = cache_limit; 220 221 /* 222 * Allocate the hash table. 223 */ 224 V_tcp_hostcache.hashbase = (struct hc_head *) 225 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head), 226 M_HOSTCACHE, M_WAITOK | M_ZERO); 227 228 /* 229 * Initialize the hash buckets. 230 */ 231 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 232 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket); 233 V_tcp_hostcache.hashbase[i].hch_length = 0; 234 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry", 235 NULL, MTX_DEF); 236 } 237 238 /* 239 * Allocate the hostcache entries. 240 */ 241 V_tcp_hostcache.zone = 242 uma_zcreate("hostcache", sizeof(struct hc_metrics), 243 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 244 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit); 245 246 /* 247 * Set up periodic cache cleanup. 248 */ 249 callout_init(&V_tcp_hc_callout, 1); 250 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 251 tcp_hc_purge, curvnet); 252} 253 254#ifdef VIMAGE 255void 256tcp_hc_destroy(void) 257{ 258 int i; 259 260 callout_drain(&V_tcp_hc_callout); 261 262 /* Purge all hc entries. */ 263 tcp_hc_purge_internal(1); 264 265 /* Free the uma zone and the allocated hash table. */ 266 uma_zdestroy(V_tcp_hostcache.zone); 267 268 for (i = 0; i < V_tcp_hostcache.hashsize; i++) 269 mtx_destroy(&V_tcp_hostcache.hashbase[i].hch_mtx); 270 free(V_tcp_hostcache.hashbase, M_HOSTCACHE); 271} 272#endif 273 274/* 275 * Internal function: look up an entry in the hostcache or return NULL. 276 * 277 * If an entry has been returned, the caller becomes responsible for 278 * unlocking the bucket row after he is done reading/modifying the entry. 279 */ 280static struct hc_metrics * 281tcp_hc_lookup(struct in_conninfo *inc) 282{ 283 int hash; 284 struct hc_head *hc_head; 285 struct hc_metrics *hc_entry; 286 287 if (!V_tcp_use_hostcache) 288 return NULL; 289 290 KASSERT(inc != NULL, ("tcp_hc_lookup with NULL in_conninfo pointer")); 291 292 /* 293 * Hash the foreign ip address. 294 */ 295 if (inc->inc_flags & INC_ISIPV6) 296 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 297 else 298 hash = HOSTCACHE_HASH(&inc->inc_faddr); 299 300 hc_head = &V_tcp_hostcache.hashbase[hash]; 301 302 /* 303 * Acquire lock for this bucket row; we release the lock if we don't 304 * find an entry, otherwise the caller has to unlock after he is 305 * done. 306 */ 307 THC_LOCK(&hc_head->hch_mtx); 308 309 /* 310 * Iterate through entries in bucket row looking for a match. 311 */ 312 TAILQ_FOREACH(hc_entry, &hc_head->hch_bucket, rmx_q) { 313 if (inc->inc_flags & INC_ISIPV6) { 314 /* XXX: check ip6_zoneid */ 315 if (memcmp(&inc->inc6_faddr, &hc_entry->ip6, 316 sizeof(inc->inc6_faddr)) == 0) 317 return hc_entry; 318 } else { 319 if (memcmp(&inc->inc_faddr, &hc_entry->ip4, 320 sizeof(inc->inc_faddr)) == 0) 321 return hc_entry; 322 } 323 } 324 325 /* 326 * We were unsuccessful and didn't find anything. 327 */ 328 THC_UNLOCK(&hc_head->hch_mtx); 329 return NULL; 330} 331 332/* 333 * Internal function: insert an entry into the hostcache or return NULL if 334 * unable to allocate a new one. 335 * 336 * If an entry has been returned, the caller becomes responsible for 337 * unlocking the bucket row after he is done reading/modifying the entry. 338 */ 339static struct hc_metrics * 340tcp_hc_insert(struct in_conninfo *inc) 341{ 342 int hash; 343 struct hc_head *hc_head; 344 struct hc_metrics *hc_entry; 345 346 if (!V_tcp_use_hostcache) 347 return NULL; 348 349 KASSERT(inc != NULL, ("tcp_hc_insert with NULL in_conninfo pointer")); 350 351 /* 352 * Hash the foreign ip address. 353 */ 354 if (inc->inc_flags & INC_ISIPV6) 355 hash = HOSTCACHE_HASH6(&inc->inc6_faddr); 356 else 357 hash = HOSTCACHE_HASH(&inc->inc_faddr); 358 359 hc_head = &V_tcp_hostcache.hashbase[hash]; 360 361 /* 362 * Acquire lock for this bucket row; we release the lock if we don't 363 * find an entry, otherwise the caller has to unlock after he is 364 * done. 365 */ 366 THC_LOCK(&hc_head->hch_mtx); 367 368 /* 369 * If the bucket limit is reached, reuse the least-used element. 370 */ 371 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit || 372 atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) { 373 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead); 374 /* 375 * At first we were dropping the last element, just to 376 * reacquire it in the next two lines again, which isn't very 377 * efficient. Instead just reuse the least used element. 378 * We may drop something that is still "in-use" but we can be 379 * "lossy". 380 * Just give up if this bucket row is empty and we don't have 381 * anything to replace. 382 */ 383 if (hc_entry == NULL) { 384 THC_UNLOCK(&hc_head->hch_mtx); 385 return NULL; 386 } 387 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q); 388 V_tcp_hostcache.hashbase[hash].hch_length--; 389 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); 390 TCPSTAT_INC(tcps_hc_bucketoverflow); 391#if 0 392 uma_zfree(V_tcp_hostcache.zone, hc_entry); 393#endif 394 } else { 395 /* 396 * Allocate a new entry, or balk if not possible. 397 */ 398 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT); 399 if (hc_entry == NULL) { 400 THC_UNLOCK(&hc_head->hch_mtx); 401 return NULL; 402 } 403 } 404 405 /* 406 * Initialize basic information of hostcache entry. 407 */ 408 bzero(hc_entry, sizeof(*hc_entry)); 409 if (inc->inc_flags & INC_ISIPV6) { 410 hc_entry->ip6 = inc->inc6_faddr; 411 hc_entry->ip6_zoneid = inc->inc6_zoneid; 412 } else 413 hc_entry->ip4 = inc->inc_faddr; 414 hc_entry->rmx_head = hc_head; 415 hc_entry->rmx_expire = V_tcp_hostcache.expire; 416 417 /* 418 * Put it upfront. 419 */ 420 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q); 421 V_tcp_hostcache.hashbase[hash].hch_length++; 422 atomic_add_int(&V_tcp_hostcache.cache_count, 1); 423 TCPSTAT_INC(tcps_hc_added); 424 425 return hc_entry; 426} 427 428/* 429 * External function: look up an entry in the hostcache and fill out the 430 * supplied TCP metrics structure. Fills in NULL when no entry was found or 431 * a value is not set. 432 */ 433void 434tcp_hc_get(struct in_conninfo *inc, struct hc_metrics_lite *hc_metrics_lite) 435{ 436 struct hc_metrics *hc_entry; 437 438 if (!V_tcp_use_hostcache) { 439 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite)); 440 return; 441 } 442 443 /* 444 * Find the right bucket. 445 */ 446 hc_entry = tcp_hc_lookup(inc); 447 448 /* 449 * If we don't have an existing object. 450 */ 451 if (hc_entry == NULL) { 452 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite)); 453 return; 454 } 455 hc_entry->rmx_hits++; 456 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 457 458 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu; 459 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh; 460 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt; 461 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar; 462 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd; 463 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe; 464 hc_metrics_lite->rmx_recvpipe = hc_entry->rmx_recvpipe; 465 466 /* 467 * Unlock bucket row. 468 */ 469 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 470} 471 472/* 473 * External function: look up an entry in the hostcache and return the 474 * discovered path MTU. Returns 0 if no entry is found or value is not 475 * set. 476 */ 477u_long 478tcp_hc_getmtu(struct in_conninfo *inc) 479{ 480 struct hc_metrics *hc_entry; 481 u_long mtu; 482 483 if (!V_tcp_use_hostcache) 484 return 0; 485 486 hc_entry = tcp_hc_lookup(inc); 487 if (hc_entry == NULL) { 488 return 0; 489 } 490 hc_entry->rmx_hits++; 491 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 492 493 mtu = hc_entry->rmx_mtu; 494 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 495 return mtu; 496} 497 498/* 499 * External function: update the MTU value of an entry in the hostcache. 500 * Creates a new entry if none was found. 501 */ 502void 503tcp_hc_updatemtu(struct in_conninfo *inc, u_long mtu) 504{ 505 struct hc_metrics *hc_entry; 506 507 if (!V_tcp_use_hostcache) 508 return; 509 510 /* 511 * Find the right bucket. 512 */ 513 hc_entry = tcp_hc_lookup(inc); 514 515 /* 516 * If we don't have an existing object, try to insert a new one. 517 */ 518 if (hc_entry == NULL) { 519 hc_entry = tcp_hc_insert(inc); 520 if (hc_entry == NULL) 521 return; 522 } 523 hc_entry->rmx_updates++; 524 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 525 526 hc_entry->rmx_mtu = mtu; 527 528 /* 529 * Put it upfront so we find it faster next time. 530 */ 531 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 532 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 533 534 /* 535 * Unlock bucket row. 536 */ 537 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 538} 539 540/* 541 * External function: update the TCP metrics of an entry in the hostcache. 542 * Creates a new entry if none was found. 543 */ 544void 545tcp_hc_update(struct in_conninfo *inc, struct hc_metrics_lite *hcml) 546{ 547 struct hc_metrics *hc_entry; 548 549 if (!V_tcp_use_hostcache) 550 return; 551 552 hc_entry = tcp_hc_lookup(inc); 553 if (hc_entry == NULL) { 554 hc_entry = tcp_hc_insert(inc); 555 if (hc_entry == NULL) 556 return; 557 } 558 hc_entry->rmx_updates++; 559 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */ 560 561 if (hcml->rmx_rtt != 0) { 562 if (hc_entry->rmx_rtt == 0) 563 hc_entry->rmx_rtt = hcml->rmx_rtt; 564 else 565 hc_entry->rmx_rtt = 566 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2; 567 TCPSTAT_INC(tcps_cachedrtt); 568 } 569 if (hcml->rmx_rttvar != 0) { 570 if (hc_entry->rmx_rttvar == 0) 571 hc_entry->rmx_rttvar = hcml->rmx_rttvar; 572 else 573 hc_entry->rmx_rttvar = 574 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2; 575 TCPSTAT_INC(tcps_cachedrttvar); 576 } 577 if (hcml->rmx_ssthresh != 0) { 578 if (hc_entry->rmx_ssthresh == 0) 579 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh; 580 else 581 hc_entry->rmx_ssthresh = 582 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2; 583 TCPSTAT_INC(tcps_cachedssthresh); 584 } 585 if (hcml->rmx_cwnd != 0) { 586 if (hc_entry->rmx_cwnd == 0) 587 hc_entry->rmx_cwnd = hcml->rmx_cwnd; 588 else 589 hc_entry->rmx_cwnd = 590 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2; 591 /* TCPSTAT_INC(tcps_cachedcwnd); */ 592 } 593 if (hcml->rmx_sendpipe != 0) { 594 if (hc_entry->rmx_sendpipe == 0) 595 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe; 596 else 597 hc_entry->rmx_sendpipe = 598 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2; 599 /* TCPSTAT_INC(tcps_cachedsendpipe); */ 600 } 601 if (hcml->rmx_recvpipe != 0) { 602 if (hc_entry->rmx_recvpipe == 0) 603 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe; 604 else 605 hc_entry->rmx_recvpipe = 606 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2; 607 /* TCPSTAT_INC(tcps_cachedrecvpipe); */ 608 } 609 610 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 611 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q); 612 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx); 613} 614 615/* 616 * Sysctl function: prints the list and values of all hostcache entries in 617 * unsorted order. 618 */ 619static int 620sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS) 621{ 622 const int linesize = 128; 623 struct sbuf sb; 624 int i, error, len; 625 struct hc_metrics *hc_entry; 626 char ip4buf[INET_ADDRSTRLEN]; 627#ifdef INET6 628 char ip6buf[INET6_ADDRSTRLEN]; 629#endif 630 631 if (jailed_without_vnet(curthread->td_ucred) != 0) 632 return (EPERM); 633 634 /* Optimize Buffer length query by sbin/sysctl */ 635 if (req->oldptr == NULL) { 636 len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) * 637 linesize; 638 return (SYSCTL_OUT(req, NULL, len)); 639 } 640 641 error = sysctl_wire_old_buffer(req, 0); 642 if (error != 0) { 643 return(error); 644 } 645 646 /* Use a buffer sized for one full bucket */ 647 sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit * 648 linesize, req); 649 650 sbuf_printf(&sb, 651 "\nIP address MTU SSTRESH RTT RTTVAR " 652 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n"); 653 sbuf_drain(&sb); 654 655#define msec(u) (((u) + 500) / 1000) 656 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 657 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 658 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket, 659 rmx_q) { 660 sbuf_printf(&sb, 661 "%-15s %5lu %8lu %6lums %6lums %8lu %8lu %8lu %4lu " 662 "%4lu %4i\n", 663 hc_entry->ip4.s_addr ? 664 inet_ntoa_r(hc_entry->ip4, ip4buf) : 665#ifdef INET6 666 ip6_sprintf(ip6buf, &hc_entry->ip6), 667#else 668 "IPv6?", 669#endif 670 hc_entry->rmx_mtu, 671 hc_entry->rmx_ssthresh, 672 msec(hc_entry->rmx_rtt * 673 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE))), 674 msec(hc_entry->rmx_rttvar * 675 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE))), 676 hc_entry->rmx_cwnd, 677 hc_entry->rmx_sendpipe, 678 hc_entry->rmx_recvpipe, 679 hc_entry->rmx_hits, 680 hc_entry->rmx_updates, 681 hc_entry->rmx_expire); 682 } 683 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 684 sbuf_drain(&sb); 685 } 686#undef msec 687 error = sbuf_finish(&sb); 688 sbuf_delete(&sb); 689 return(error); 690} 691 692/* 693 * Caller has to make sure the curvnet is set properly. 694 */ 695static void 696tcp_hc_purge_internal(int all) 697{ 698 struct hc_metrics *hc_entry, *hc_next; 699 int i; 700 701 for (i = 0; i < V_tcp_hostcache.hashsize; i++) { 702 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 703 TAILQ_FOREACH_SAFE(hc_entry, 704 &V_tcp_hostcache.hashbase[i].hch_bucket, rmx_q, hc_next) { 705 if (all || hc_entry->rmx_expire <= 0) { 706 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket, 707 hc_entry, rmx_q); 708 uma_zfree(V_tcp_hostcache.zone, hc_entry); 709 V_tcp_hostcache.hashbase[i].hch_length--; 710 atomic_subtract_int(&V_tcp_hostcache.cache_count, 1); 711 } else 712 hc_entry->rmx_expire -= V_tcp_hostcache.prune; 713 } 714 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx); 715 } 716} 717 718/* 719 * Expire and purge (old|all) entries in the tcp_hostcache. Runs 720 * periodically from the callout. 721 */ 722static void 723tcp_hc_purge(void *arg) 724{ 725 CURVNET_SET((struct vnet *) arg); 726 int all = 0; 727 728 if (V_tcp_hostcache.purgeall) { 729 all = 1; 730 V_tcp_hostcache.purgeall = 0; 731 } 732 733 tcp_hc_purge_internal(all); 734 735 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 736 tcp_hc_purge, arg); 737 CURVNET_RESTORE(); 738} 739 740/* 741 * Expire and purge all entries in hostcache immediately. 742 */ 743static int 744sysctl_tcp_hc_purgenow(SYSCTL_HANDLER_ARGS) 745{ 746 int error, val; 747 748 val = 0; 749 error = sysctl_handle_int(oidp, &val, 0, req); 750 if (error || !req->newptr) 751 return (error); 752 753 tcp_hc_purge_internal(1); 754 755 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, 756 tcp_hc_purge, curvnet); 757 758 return (0); 759} 760