Deleted Added
full compact
tcp_hostcache.c (172467) tcp_hostcache.c (181803)
1/*-
2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58 */
59
60/*
61 * Many thanks to jlemon for basic structure of tcp_syncache which is being
62 * followed here.
63 */
64
65#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2002 Andre Oppermann, Internet Business Solutions AG
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 49 unchanged lines hidden (view full) ---

58 */
59
60/*
61 * Many thanks to jlemon for basic structure of tcp_syncache which is being
62 * followed here.
63 */
64
65#include <sys/cdefs.h>
66__FBSDID("$FreeBSD: head/sys/netinet/tcp_hostcache.c 172467 2007-10-07 20:44:24Z silby $");
66__FBSDID("$FreeBSD: head/sys/netinet/tcp_hostcache.c 181803 2008-08-17 23:27:27Z bz $");
67
68#include "opt_inet6.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/kernel.h>
73#include <sys/lock.h>
74#include <sys/mutex.h>
75#include <sys/malloc.h>
76#include <sys/socket.h>
77#include <sys/socketvar.h>
78#include <sys/sysctl.h>
67
68#include "opt_inet6.h"
69
70#include <sys/param.h>
71#include <sys/systm.h>
72#include <sys/kernel.h>
73#include <sys/lock.h>
74#include <sys/mutex.h>
75#include <sys/malloc.h>
76#include <sys/socket.h>
77#include <sys/socketvar.h>
78#include <sys/sysctl.h>
79#include <sys/vimage.h>
79
80#include <net/if.h>
81
82#include <netinet/in.h>
83#include <netinet/in_systm.h>
84#include <netinet/ip.h>
85#include <netinet/in_var.h>
86#include <netinet/in_pcb.h>

--- 94 unchanged lines hidden (view full) ---

181 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
182 sysctl_tcp_hc_list, "A", "List of all hostcache entries");
183
184
185static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
186
187#define HOSTCACHE_HASH(ip) \
188 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
80
81#include <net/if.h>
82
83#include <netinet/in.h>
84#include <netinet/in_systm.h>
85#include <netinet/ip.h>
86#include <netinet/in_var.h>
87#include <netinet/in_pcb.h>

--- 94 unchanged lines hidden (view full) ---

182 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_SKIP, 0, 0,
183 sysctl_tcp_hc_list, "A", "List of all hostcache entries");
184
185
186static MALLOC_DEFINE(M_HOSTCACHE, "hostcache", "TCP hostcache");
187
188#define HOSTCACHE_HASH(ip) \
189 (((ip)->s_addr ^ ((ip)->s_addr >> 7) ^ ((ip)->s_addr >> 17)) & \
189 tcp_hostcache.hashmask)
190 V_tcp_hostcache.hashmask)
190
191/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
192#define HOSTCACHE_HASH6(ip6) \
193 (((ip6)->s6_addr32[0] ^ \
194 (ip6)->s6_addr32[1] ^ \
195 (ip6)->s6_addr32[2] ^ \
196 (ip6)->s6_addr32[3]) & \
191
192/* XXX: What is the recommended hash to get good entropy for IPv6 addresses? */
193#define HOSTCACHE_HASH6(ip6) \
194 (((ip6)->s6_addr32[0] ^ \
195 (ip6)->s6_addr32[1] ^ \
196 (ip6)->s6_addr32[2] ^ \
197 (ip6)->s6_addr32[3]) & \
197 tcp_hostcache.hashmask)
198 V_tcp_hostcache.hashmask)
198
199#define THC_LOCK(lp) mtx_lock(lp)
200#define THC_UNLOCK(lp) mtx_unlock(lp)
201
202void
203tcp_hc_init(void)
204{
205 int i;
206
207 /*
208 * Initialize hostcache structures.
209 */
199
200#define THC_LOCK(lp) mtx_lock(lp)
201#define THC_UNLOCK(lp) mtx_unlock(lp)
202
203void
204tcp_hc_init(void)
205{
206 int i;
207
208 /*
209 * Initialize hostcache structures.
210 */
210 tcp_hostcache.cache_count = 0;
211 tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
212 tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
213 tcp_hostcache.cache_limit =
214 tcp_hostcache.hashsize * tcp_hostcache.bucket_limit;
215 tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
216 tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
211 V_tcp_hostcache.cache_count = 0;
212 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
213 V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
214 V_tcp_hostcache.cache_limit =
215 V_tcp_hostcache.hashsize * V_tcp_hostcache.bucket_limit;
216 V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
217 V_tcp_hostcache.prune = TCP_HOSTCACHE_PRUNE;
217
218 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
218
219 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.hashsize",
219 &tcp_hostcache.hashsize);
220 &V_tcp_hostcache.hashsize);
220 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
221 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.cachelimit",
221 &tcp_hostcache.cache_limit);
222 &V_tcp_hostcache.cache_limit);
222 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
223 TUNABLE_INT_FETCH("net.inet.tcp.hostcache.bucketlimit",
223 &tcp_hostcache.bucket_limit);
224 if (!powerof2(tcp_hostcache.hashsize)) {
224 &V_tcp_hostcache.bucket_limit);
225 if (!powerof2(V_tcp_hostcache.hashsize)) {
225 printf("WARNING: hostcache hash size is not a power of 2.\n");
226 printf("WARNING: hostcache hash size is not a power of 2.\n");
226 tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
227 V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE; /* default */
227 }
228 }
228 tcp_hostcache.hashmask = tcp_hostcache.hashsize - 1;
229 V_tcp_hostcache.hashmask = V_tcp_hostcache.hashsize - 1;
229
230 /*
231 * Allocate the hash table.
232 */
230
231 /*
232 * Allocate the hash table.
233 */
233 tcp_hostcache.hashbase = (struct hc_head *)
234 malloc(tcp_hostcache.hashsize * sizeof(struct hc_head),
234 V_tcp_hostcache.hashbase = (struct hc_head *)
235 malloc(V_tcp_hostcache.hashsize * sizeof(struct hc_head),
235 M_HOSTCACHE, M_WAITOK | M_ZERO);
236
237 /*
238 * Initialize the hash buckets.
239 */
236 M_HOSTCACHE, M_WAITOK | M_ZERO);
237
238 /*
239 * Initialize the hash buckets.
240 */
240 for (i = 0; i < tcp_hostcache.hashsize; i++) {
241 TAILQ_INIT(&tcp_hostcache.hashbase[i].hch_bucket);
242 tcp_hostcache.hashbase[i].hch_length = 0;
243 mtx_init(&tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
241 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
242 TAILQ_INIT(&V_tcp_hostcache.hashbase[i].hch_bucket);
243 V_tcp_hostcache.hashbase[i].hch_length = 0;
244 mtx_init(&V_tcp_hostcache.hashbase[i].hch_mtx, "tcp_hc_entry",
244 NULL, MTX_DEF);
245 }
246
247 /*
248 * Allocate the hostcache entries.
249 */
245 NULL, MTX_DEF);
246 }
247
248 /*
249 * Allocate the hostcache entries.
250 */
250 tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
251 V_tcp_hostcache.zone = uma_zcreate("hostcache", sizeof(struct hc_metrics),
251 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
252 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
252 uma_zone_set_max(tcp_hostcache.zone, tcp_hostcache.cache_limit);
253 uma_zone_set_max(V_tcp_hostcache.zone, V_tcp_hostcache.cache_limit);
253
254 /*
255 * Set up periodic cache cleanup.
256 */
254
255 /*
256 * Set up periodic cache cleanup.
257 */
257 callout_init(&tcp_hc_callout, CALLOUT_MPSAFE);
258 callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
258 callout_init(&V_tcp_hc_callout, CALLOUT_MPSAFE);
259 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
259}
260
261/*
262 * Internal function: look up an entry in the hostcache or return NULL.
263 *
264 * If an entry has been returned, the caller becomes responsible for
265 * unlocking the bucket row after he is done reading/modifying the entry.
266 */

--- 9 unchanged lines hidden (view full) ---

276 /*
277 * Hash the foreign ip address.
278 */
279 if (inc->inc_isipv6)
280 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
281 else
282 hash = HOSTCACHE_HASH(&inc->inc_faddr);
283
260}
261
262/*
263 * Internal function: look up an entry in the hostcache or return NULL.
264 *
265 * If an entry has been returned, the caller becomes responsible for
266 * unlocking the bucket row after he is done reading/modifying the entry.
267 */

--- 9 unchanged lines hidden (view full) ---

277 /*
278 * Hash the foreign ip address.
279 */
280 if (inc->inc_isipv6)
281 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
282 else
283 hash = HOSTCACHE_HASH(&inc->inc_faddr);
284
284 hc_head = &tcp_hostcache.hashbase[hash];
285 hc_head = &V_tcp_hostcache.hashbase[hash];
285
286 /*
287 * Acquire lock for this bucket row; we release the lock if we don't
288 * find an entry, otherwise the caller has to unlock after he is
289 * done.
290 */
291 THC_LOCK(&hc_head->hch_mtx);
292

--- 38 unchanged lines hidden (view full) ---

331 /*
332 * Hash the foreign ip address.
333 */
334 if (inc->inc_isipv6)
335 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
336 else
337 hash = HOSTCACHE_HASH(&inc->inc_faddr);
338
286
287 /*
288 * Acquire lock for this bucket row; we release the lock if we don't
289 * find an entry, otherwise the caller has to unlock after he is
290 * done.
291 */
292 THC_LOCK(&hc_head->hch_mtx);
293

--- 38 unchanged lines hidden (view full) ---

332 /*
333 * Hash the foreign ip address.
334 */
335 if (inc->inc_isipv6)
336 hash = HOSTCACHE_HASH6(&inc->inc6_faddr);
337 else
338 hash = HOSTCACHE_HASH(&inc->inc_faddr);
339
339 hc_head = &tcp_hostcache.hashbase[hash];
340 hc_head = &V_tcp_hostcache.hashbase[hash];
340
341 /*
342 * Acquire lock for this bucket row; we release the lock if we don't
343 * find an entry, otherwise the caller has to unlock after he is
344 * done.
345 */
346 THC_LOCK(&hc_head->hch_mtx);
347
348 /*
349 * If the bucket limit is reached, reuse the least-used element.
350 */
341
342 /*
343 * Acquire lock for this bucket row; we release the lock if we don't
344 * find an entry, otherwise the caller has to unlock after he is
345 * done.
346 */
347 THC_LOCK(&hc_head->hch_mtx);
348
349 /*
350 * If the bucket limit is reached, reuse the least-used element.
351 */
351 if (hc_head->hch_length >= tcp_hostcache.bucket_limit ||
352 tcp_hostcache.cache_count >= tcp_hostcache.cache_limit) {
352 if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
353 V_tcp_hostcache.cache_count >= V_tcp_hostcache.cache_limit) {
353 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
354 /*
355 * At first we were dropping the last element, just to
356 * reacquire it in the next two lines again, which isn't very
357 * efficient. Instead just reuse the least used element.
358 * We may drop something that is still "in-use" but we can be
359 * "lossy".
360 * Just give up if this bucket row is empty and we don't have
361 * anything to replace.
362 */
363 if (hc_entry == NULL) {
364 THC_UNLOCK(&hc_head->hch_mtx);
365 return NULL;
366 }
367 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
354 hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
355 /*
356 * At first we were dropping the last element, just to
357 * reacquire it in the next two lines again, which isn't very
358 * efficient. Instead just reuse the least used element.
359 * We may drop something that is still "in-use" but we can be
360 * "lossy".
361 * Just give up if this bucket row is empty and we don't have
362 * anything to replace.
363 */
364 if (hc_entry == NULL) {
365 THC_UNLOCK(&hc_head->hch_mtx);
366 return NULL;
367 }
368 TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
368 tcp_hostcache.hashbase[hash].hch_length--;
369 tcp_hostcache.cache_count--;
370 tcpstat.tcps_hc_bucketoverflow++;
369 V_tcp_hostcache.hashbase[hash].hch_length--;
370 V_tcp_hostcache.cache_count--;
371 V_tcpstat.tcps_hc_bucketoverflow++;
371#if 0
372#if 0
372 uma_zfree(tcp_hostcache.zone, hc_entry);
373 uma_zfree(V_tcp_hostcache.zone, hc_entry);
373#endif
374 } else {
375 /*
376 * Allocate a new entry, or balk if not possible.
377 */
374#endif
375 } else {
376 /*
377 * Allocate a new entry, or balk if not possible.
378 */
378 hc_entry = uma_zalloc(tcp_hostcache.zone, M_NOWAIT);
379 hc_entry = uma_zalloc(V_tcp_hostcache.zone, M_NOWAIT);
379 if (hc_entry == NULL) {
380 THC_UNLOCK(&hc_head->hch_mtx);
381 return NULL;
382 }
383 }
384
385 /*
386 * Initialize basic information of hostcache entry.
387 */
388 bzero(hc_entry, sizeof(*hc_entry));
389 if (inc->inc_isipv6)
390 bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6));
391 else
392 hc_entry->ip4 = inc->inc_faddr;
393 hc_entry->rmx_head = hc_head;
380 if (hc_entry == NULL) {
381 THC_UNLOCK(&hc_head->hch_mtx);
382 return NULL;
383 }
384 }
385
386 /*
387 * Initialize basic information of hostcache entry.
388 */
389 bzero(hc_entry, sizeof(*hc_entry));
390 if (inc->inc_isipv6)
391 bcopy(&inc->inc6_faddr, &hc_entry->ip6, sizeof(hc_entry->ip6));
392 else
393 hc_entry->ip4 = inc->inc_faddr;
394 hc_entry->rmx_head = hc_head;
394 hc_entry->rmx_expire = tcp_hostcache.expire;
395 hc_entry->rmx_expire = V_tcp_hostcache.expire;
395
396 /*
397 * Put it upfront.
398 */
399 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
396
397 /*
398 * Put it upfront.
399 */
400 TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
400 tcp_hostcache.hashbase[hash].hch_length++;
401 tcp_hostcache.cache_count++;
402 tcpstat.tcps_hc_added++;
401 V_tcp_hostcache.hashbase[hash].hch_length++;
402 V_tcp_hostcache.cache_count++;
403 V_tcpstat.tcps_hc_added++;
403
404 return hc_entry;
405}
406
407/*
408 * External function: look up an entry in the hostcache and fill out the
409 * supplied TCP metrics structure. Fills in NULL when no entry was found or
410 * a value is not set.

--- 11 unchanged lines hidden (view full) ---

422 /*
423 * If we don't have an existing object.
424 */
425 if (hc_entry == NULL) {
426 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
427 return;
428 }
429 hc_entry->rmx_hits++;
404
405 return hc_entry;
406}
407
408/*
409 * External function: look up an entry in the hostcache and fill out the
410 * supplied TCP metrics structure. Fills in NULL when no entry was found or
411 * a value is not set.

--- 11 unchanged lines hidden (view full) ---

423 /*
424 * If we don't have an existing object.
425 */
426 if (hc_entry == NULL) {
427 bzero(hc_metrics_lite, sizeof(*hc_metrics_lite));
428 return;
429 }
430 hc_entry->rmx_hits++;
430 hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
431 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
431
432 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
433 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
434 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
435 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
436 hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth;
437 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
438 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;

--- 16 unchanged lines hidden (view full) ---

455 struct hc_metrics *hc_entry;
456 u_long mtu;
457
458 hc_entry = tcp_hc_lookup(inc);
459 if (hc_entry == NULL) {
460 return 0;
461 }
462 hc_entry->rmx_hits++;
432
433 hc_metrics_lite->rmx_mtu = hc_entry->rmx_mtu;
434 hc_metrics_lite->rmx_ssthresh = hc_entry->rmx_ssthresh;
435 hc_metrics_lite->rmx_rtt = hc_entry->rmx_rtt;
436 hc_metrics_lite->rmx_rttvar = hc_entry->rmx_rttvar;
437 hc_metrics_lite->rmx_bandwidth = hc_entry->rmx_bandwidth;
438 hc_metrics_lite->rmx_cwnd = hc_entry->rmx_cwnd;
439 hc_metrics_lite->rmx_sendpipe = hc_entry->rmx_sendpipe;

--- 16 unchanged lines hidden (view full) ---

456 struct hc_metrics *hc_entry;
457 u_long mtu;
458
459 hc_entry = tcp_hc_lookup(inc);
460 if (hc_entry == NULL) {
461 return 0;
462 }
463 hc_entry->rmx_hits++;
463 hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
464 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
464
465 mtu = hc_entry->rmx_mtu;
466 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
467 return mtu;
468}
469
470/*
471 * External function: update the MTU value of an entry in the hostcache.

--- 13 unchanged lines hidden (view full) ---

485 * If we don't have an existing object, try to insert a new one.
486 */
487 if (hc_entry == NULL) {
488 hc_entry = tcp_hc_insert(inc);
489 if (hc_entry == NULL)
490 return;
491 }
492 hc_entry->rmx_updates++;
465
466 mtu = hc_entry->rmx_mtu;
467 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
468 return mtu;
469}
470
471/*
472 * External function: update the MTU value of an entry in the hostcache.

--- 13 unchanged lines hidden (view full) ---

486 * If we don't have an existing object, try to insert a new one.
487 */
488 if (hc_entry == NULL) {
489 hc_entry = tcp_hc_insert(inc);
490 if (hc_entry == NULL)
491 return;
492 }
493 hc_entry->rmx_updates++;
493 hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
494 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
494
495 hc_entry->rmx_mtu = mtu;
496
497 /*
498 * Put it upfront so we find it faster next time.
499 */
500 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
501 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);

--- 15 unchanged lines hidden (view full) ---

517
518 hc_entry = tcp_hc_lookup(inc);
519 if (hc_entry == NULL) {
520 hc_entry = tcp_hc_insert(inc);
521 if (hc_entry == NULL)
522 return;
523 }
524 hc_entry->rmx_updates++;
495
496 hc_entry->rmx_mtu = mtu;
497
498 /*
499 * Put it upfront so we find it faster next time.
500 */
501 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
502 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);

--- 15 unchanged lines hidden (view full) ---

518
519 hc_entry = tcp_hc_lookup(inc);
520 if (hc_entry == NULL) {
521 hc_entry = tcp_hc_insert(inc);
522 if (hc_entry == NULL)
523 return;
524 }
525 hc_entry->rmx_updates++;
525 hc_entry->rmx_expire = tcp_hostcache.expire; /* start over again */
526 hc_entry->rmx_expire = V_tcp_hostcache.expire; /* start over again */
526
527 if (hcml->rmx_rtt != 0) {
528 if (hc_entry->rmx_rtt == 0)
529 hc_entry->rmx_rtt = hcml->rmx_rtt;
530 else
531 hc_entry->rmx_rtt =
532 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
527
528 if (hcml->rmx_rtt != 0) {
529 if (hc_entry->rmx_rtt == 0)
530 hc_entry->rmx_rtt = hcml->rmx_rtt;
531 else
532 hc_entry->rmx_rtt =
533 (hc_entry->rmx_rtt + hcml->rmx_rtt) / 2;
533 tcpstat.tcps_cachedrtt++;
534 V_tcpstat.tcps_cachedrtt++;
534 }
535 if (hcml->rmx_rttvar != 0) {
536 if (hc_entry->rmx_rttvar == 0)
537 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
538 else
539 hc_entry->rmx_rttvar =
540 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
535 }
536 if (hcml->rmx_rttvar != 0) {
537 if (hc_entry->rmx_rttvar == 0)
538 hc_entry->rmx_rttvar = hcml->rmx_rttvar;
539 else
540 hc_entry->rmx_rttvar =
541 (hc_entry->rmx_rttvar + hcml->rmx_rttvar) / 2;
541 tcpstat.tcps_cachedrttvar++;
542 V_tcpstat.tcps_cachedrttvar++;
542 }
543 if (hcml->rmx_ssthresh != 0) {
544 if (hc_entry->rmx_ssthresh == 0)
545 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
546 else
547 hc_entry->rmx_ssthresh =
548 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
543 }
544 if (hcml->rmx_ssthresh != 0) {
545 if (hc_entry->rmx_ssthresh == 0)
546 hc_entry->rmx_ssthresh = hcml->rmx_ssthresh;
547 else
548 hc_entry->rmx_ssthresh =
549 (hc_entry->rmx_ssthresh + hcml->rmx_ssthresh) / 2;
549 tcpstat.tcps_cachedssthresh++;
550 V_tcpstat.tcps_cachedssthresh++;
550 }
551 if (hcml->rmx_bandwidth != 0) {
552 if (hc_entry->rmx_bandwidth == 0)
553 hc_entry->rmx_bandwidth = hcml->rmx_bandwidth;
554 else
555 hc_entry->rmx_bandwidth =
556 (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
551 }
552 if (hcml->rmx_bandwidth != 0) {
553 if (hc_entry->rmx_bandwidth == 0)
554 hc_entry->rmx_bandwidth = hcml->rmx_bandwidth;
555 else
556 hc_entry->rmx_bandwidth =
557 (hc_entry->rmx_bandwidth + hcml->rmx_bandwidth) / 2;
557 /* tcpstat.tcps_cachedbandwidth++; */
558 /* V_tcpstat.tcps_cachedbandwidth++; */
558 }
559 if (hcml->rmx_cwnd != 0) {
560 if (hc_entry->rmx_cwnd == 0)
561 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
562 else
563 hc_entry->rmx_cwnd =
564 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
559 }
560 if (hcml->rmx_cwnd != 0) {
561 if (hc_entry->rmx_cwnd == 0)
562 hc_entry->rmx_cwnd = hcml->rmx_cwnd;
563 else
564 hc_entry->rmx_cwnd =
565 (hc_entry->rmx_cwnd + hcml->rmx_cwnd) / 2;
565 /* tcpstat.tcps_cachedcwnd++; */
566 /* V_tcpstat.tcps_cachedcwnd++; */
566 }
567 if (hcml->rmx_sendpipe != 0) {
568 if (hc_entry->rmx_sendpipe == 0)
569 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
570 else
571 hc_entry->rmx_sendpipe =
572 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
567 }
568 if (hcml->rmx_sendpipe != 0) {
569 if (hc_entry->rmx_sendpipe == 0)
570 hc_entry->rmx_sendpipe = hcml->rmx_sendpipe;
571 else
572 hc_entry->rmx_sendpipe =
573 (hc_entry->rmx_sendpipe + hcml->rmx_sendpipe) /2;
573 /* tcpstat.tcps_cachedsendpipe++; */
574 /* V_tcpstat.tcps_cachedsendpipe++; */
574 }
575 if (hcml->rmx_recvpipe != 0) {
576 if (hc_entry->rmx_recvpipe == 0)
577 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
578 else
579 hc_entry->rmx_recvpipe =
580 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
575 }
576 if (hcml->rmx_recvpipe != 0) {
577 if (hc_entry->rmx_recvpipe == 0)
578 hc_entry->rmx_recvpipe = hcml->rmx_recvpipe;
579 else
580 hc_entry->rmx_recvpipe =
581 (hc_entry->rmx_recvpipe + hcml->rmx_recvpipe) /2;
581 /* tcpstat.tcps_cachedrecvpipe++; */
582 /* V_tcpstat.tcps_cachedrecvpipe++; */
582 }
583
584 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
585 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
586 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
587}
588
589/*

--- 7 unchanged lines hidden (view full) ---

597 int linesize = 128;
598 char *p, *buf;
599 int len, i, error;
600 struct hc_metrics *hc_entry;
601#ifdef INET6
602 char ip6buf[INET6_ADDRSTRLEN];
603#endif
604
583 }
584
585 TAILQ_REMOVE(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
586 TAILQ_INSERT_HEAD(&hc_entry->rmx_head->hch_bucket, hc_entry, rmx_q);
587 THC_UNLOCK(&hc_entry->rmx_head->hch_mtx);
588}
589
590/*

--- 7 unchanged lines hidden (view full) ---

598 int linesize = 128;
599 char *p, *buf;
600 int len, i, error;
601 struct hc_metrics *hc_entry;
602#ifdef INET6
603 char ip6buf[INET6_ADDRSTRLEN];
604#endif
605
605 bufsize = linesize * (tcp_hostcache.cache_count + 1);
606 bufsize = linesize * (V_tcp_hostcache.cache_count + 1);
606
607 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
608
609 len = snprintf(p, linesize,
610 "\nIP address MTU SSTRESH RTT RTTVAR BANDWIDTH "
611 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
612 p += len;
613
614#define msec(u) (((u) + 500) / 1000)
607
608 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
609
610 len = snprintf(p, linesize,
611 "\nIP address MTU SSTRESH RTT RTTVAR BANDWIDTH "
612 " CWND SENDPIPE RECVPIPE HITS UPD EXP\n");
613 p += len;
614
615#define msec(u) (((u) + 500) / 1000)
615 for (i = 0; i < tcp_hostcache.hashsize; i++) {
616 THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
617 TAILQ_FOREACH(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
616 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
617 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
618 TAILQ_FOREACH(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
618 rmx_q) {
619 len = snprintf(p, linesize,
620 "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
621 "%4lu %4lu %4i\n",
622 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
623#ifdef INET6
624 ip6_sprintf(ip6buf, &hc_entry->ip6),
625#else

--- 9 unchanged lines hidden (view full) ---

635 hc_entry->rmx_cwnd,
636 hc_entry->rmx_sendpipe,
637 hc_entry->rmx_recvpipe,
638 hc_entry->rmx_hits,
639 hc_entry->rmx_updates,
640 hc_entry->rmx_expire);
641 p += len;
642 }
619 rmx_q) {
620 len = snprintf(p, linesize,
621 "%-15s %5lu %8lu %6lums %6lums %9lu %8lu %8lu %8lu "
622 "%4lu %4lu %4i\n",
623 hc_entry->ip4.s_addr ? inet_ntoa(hc_entry->ip4) :
624#ifdef INET6
625 ip6_sprintf(ip6buf, &hc_entry->ip6),
626#else

--- 9 unchanged lines hidden (view full) ---

636 hc_entry->rmx_cwnd,
637 hc_entry->rmx_sendpipe,
638 hc_entry->rmx_recvpipe,
639 hc_entry->rmx_hits,
640 hc_entry->rmx_updates,
641 hc_entry->rmx_expire);
642 p += len;
643 }
643 THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
644 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
644 }
645#undef msec
646 error = SYSCTL_OUT(req, buf, p - buf);
647 free(buf, M_TEMP);
648 return(error);
649}
650
651/*
652 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
653 * periodically from the callout.
654 */
655static void
656tcp_hc_purge(void *arg)
657{
658 struct hc_metrics *hc_entry, *hc_next;
659 int all = (intptr_t)arg;
660 int i;
661
645 }
646#undef msec
647 error = SYSCTL_OUT(req, buf, p - buf);
648 free(buf, M_TEMP);
649 return(error);
650}
651
652/*
653 * Expire and purge (old|all) entries in the tcp_hostcache. Runs
654 * periodically from the callout.
655 */
656static void
657tcp_hc_purge(void *arg)
658{
659 struct hc_metrics *hc_entry, *hc_next;
660 int all = (intptr_t)arg;
661 int i;
662
662 if (tcp_hostcache.purgeall) {
663 if (V_tcp_hostcache.purgeall) {
663 all = 1;
664 all = 1;
664 tcp_hostcache.purgeall = 0;
665 V_tcp_hostcache.purgeall = 0;
665 }
666
666 }
667
667 for (i = 0; i < tcp_hostcache.hashsize; i++) {
668 THC_LOCK(&tcp_hostcache.hashbase[i].hch_mtx);
669 TAILQ_FOREACH_SAFE(hc_entry, &tcp_hostcache.hashbase[i].hch_bucket,
668 for (i = 0; i < V_tcp_hostcache.hashsize; i++) {
669 THC_LOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
670 TAILQ_FOREACH_SAFE(hc_entry, &V_tcp_hostcache.hashbase[i].hch_bucket,
670 rmx_q, hc_next) {
671 if (all || hc_entry->rmx_expire <= 0) {
671 rmx_q, hc_next) {
672 if (all || hc_entry->rmx_expire <= 0) {
672 TAILQ_REMOVE(&tcp_hostcache.hashbase[i].hch_bucket,
673 TAILQ_REMOVE(&V_tcp_hostcache.hashbase[i].hch_bucket,
673 hc_entry, rmx_q);
674 hc_entry, rmx_q);
674 uma_zfree(tcp_hostcache.zone, hc_entry);
675 tcp_hostcache.hashbase[i].hch_length--;
676 tcp_hostcache.cache_count--;
675 uma_zfree(V_tcp_hostcache.zone, hc_entry);
676 V_tcp_hostcache.hashbase[i].hch_length--;
677 V_tcp_hostcache.cache_count--;
677 } else
678 } else
678 hc_entry->rmx_expire -= tcp_hostcache.prune;
679 hc_entry->rmx_expire -= V_tcp_hostcache.prune;
679 }
680 }
680 THC_UNLOCK(&tcp_hostcache.hashbase[i].hch_mtx);
681 THC_UNLOCK(&V_tcp_hostcache.hashbase[i].hch_mtx);
681 }
682 }
682 callout_reset(&tcp_hc_callout, tcp_hostcache.prune * hz, tcp_hc_purge, 0);
683 callout_reset(&V_tcp_hc_callout, V_tcp_hostcache.prune * hz, tcp_hc_purge, 0);
683}
684}