netflow.c revision 186119
1/*-
2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
28 */
29
30static const char rcs_id[] =
31    "@(#) $FreeBSD: head/sys/netgraph/netflow/netflow.c 186119 2008-12-15 06:10:57Z qingli $";
32
33#include <sys/param.h>
34#include <sys/kernel.h>
35#include <sys/limits.h>
36#include <sys/mbuf.h>
37#include <sys/syslog.h>
38#include <sys/systm.h>
39#include <sys/socket.h>
40
41#include <machine/atomic.h>
42
43#include <net/if.h>
44#include <net/route.h>
45#include <netinet/in.h>
46#include <netinet/in_systm.h>
47#include <netinet/ip.h>
48#include <netinet/tcp.h>
49#include <netinet/udp.h>
50
51#include <netgraph/ng_message.h>
52#include <netgraph/netgraph.h>
53
54#include <netgraph/netflow/netflow.h>
55#include <netgraph/netflow/ng_netflow.h>
56
57#define	NBUCKETS	(65536)		/* must be power of 2 */
58
59/* This hash is for TCP or UDP packets. */
60#define FULL_HASH(addr1, addr2, port1, port2)	\
61	(((addr1 ^ (addr1 >> 16) ^ 		\
62	htons(addr2 ^ (addr2 >> 16))) ^ 	\
63	port1 ^ htons(port2)) &			\
64	(NBUCKETS - 1))
65
66/* This hash is for all other IP packets. */
67#define ADDR_HASH(addr1, addr2)			\
68	((addr1 ^ (addr1 >> 16) ^ 		\
69	htons(addr2 ^ (addr2 >> 16))) &		\
70	(NBUCKETS - 1))
71
72/* Macros to shorten logical constructions */
73/* XXX: priv must exist in namespace */
74#define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75#define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76#define	ISFREE(fle)	(fle->f.packets == 0)
77
78/*
79 * 4 is a magical number: statistically number of 4-packet flows is
80 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82 * of reachable host and 4-packet otherwise.
83 */
84#define	SMALL(fle)	(fle->f.packets <= 4)
85
86/*
87 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88 * every 48+ days. But we will do same to keep compatibility. This macro
89 * does overflowable multiplication to 1000.
90 */
91#define	MILLIUPTIME(t)	(((t) << 9) +	/* 512 */	\
92			 ((t) << 8) +	/* 256 */	\
93			 ((t) << 7) +	/* 128 */	\
94			 ((t) << 6) +	/* 64  */	\
95			 ((t) << 5) +	/* 32  */	\
96			 ((t) << 3))	/* 8   */
97
98MALLOC_DECLARE(M_NETFLOW_HASH);
99MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
100
101static int export_add(item_p, struct flow_entry *);
102static int export_send(priv_p, item_p, int flags);
103
104/* Generate hash for a given flow record. */
105static __inline uint32_t
106ip_hash(struct flow_rec *r)
107{
108	switch (r->r_ip_p) {
109	case IPPROTO_TCP:
110	case IPPROTO_UDP:
111		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112		    r->r_sport, r->r_dport);
113	default:
114		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
115	}
116}
117
118/* This is callback from uma(9), called on alloc. */
119static int
120uma_ctor_flow(void *mem, int size, void *arg, int how)
121{
122	priv_p priv = (priv_p )arg;
123
124	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
125		return (ENOMEM);
126
127	atomic_add_32(&priv->info.nfinfo_used, 1);
128
129	return (0);
130}
131
132/* This is callback from uma(9), called on free. */
133static void
134uma_dtor_flow(void *mem, int size, void *arg)
135{
136	priv_p priv = (priv_p )arg;
137
138	atomic_subtract_32(&priv->info.nfinfo_used, 1);
139}
140
141/*
142 * Detach export datagram from priv, if there is any.
143 * If there is no, allocate a new one.
144 */
145static item_p
146get_export_dgram(priv_p priv)
147{
148	item_p	item = NULL;
149
150	mtx_lock(&priv->export_mtx);
151	if (priv->export_item != NULL) {
152		item = priv->export_item;
153		priv->export_item = NULL;
154	}
155	mtx_unlock(&priv->export_mtx);
156
157	if (item == NULL) {
158		struct netflow_v5_export_dgram *dgram;
159		struct mbuf *m;
160
161		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
162		if (m == NULL)
163			return (NULL);
164		item = ng_package_data(m, NG_NOFLAGS);
165		if (item == NULL)
166			return (NULL);
167		dgram = mtod(m, struct netflow_v5_export_dgram *);
168		dgram->header.count = 0;
169		dgram->header.version = htons(NETFLOW_V5);
170
171	}
172
173	return (item);
174}
175
176/*
177 * Re-attach incomplete datagram back to priv.
178 * If there is already another one, then send incomplete. */
179static void
180return_export_dgram(priv_p priv, item_p item, int flags)
181{
182	/*
183	 * It may happen on SMP, that some thread has already
184	 * put its item there, in this case we bail out and
185	 * send what we have to collector.
186	 */
187	mtx_lock(&priv->export_mtx);
188	if (priv->export_item == NULL) {
189		priv->export_item = item;
190		mtx_unlock(&priv->export_mtx);
191	} else {
192		mtx_unlock(&priv->export_mtx);
193		export_send(priv, item, flags);
194	}
195}
196
197/*
198 * The flow is over. Call export_add() and free it. If datagram is
199 * full, then call export_send().
200 */
201static __inline void
202expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
203{
204	if (*item == NULL)
205		*item = get_export_dgram(priv);
206	if (*item == NULL) {
207		atomic_add_32(&priv->info.nfinfo_export_failed, 1);
208		uma_zfree_arg(priv->zone, fle, priv);
209		return;
210	}
211	if (export_add(*item, fle) > 0) {
212		export_send(priv, *item, flags);
213		*item = NULL;
214	}
215	uma_zfree_arg(priv->zone, fle, priv);
216}
217
218/* Get a snapshot of node statistics */
219void
220ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
221{
222	/* XXX: atomic */
223	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
224}
225
226/*
227 * Insert a record into defined slot.
228 *
229 * First we get for us a free flow entry, then fill in all
230 * possible fields in it.
231 *
232 * TODO: consider dropping hash mutex while filling in datagram,
233 * as this was done in previous version. Need to test & profile
234 * to be sure.
235 */
236static __inline int
237hash_insert(priv_p priv, struct flow_hash_entry  *hsh, struct flow_rec *r,
238	int plen, uint8_t tcp_flags)
239{
240	struct flow_entry *fle;
241	struct sockaddr_in sin;
242	struct rtentry *rt;
243
244	mtx_assert(&hsh->mtx, MA_OWNED);
245
246	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
247	if (fle == NULL) {
248		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
249		return (ENOMEM);
250	}
251
252	/*
253	 * Now fle is totally ours. It is detached from all lists,
254	 * we can safely edit it.
255	 */
256
257	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
258	fle->f.bytes = plen;
259	fle->f.packets = 1;
260	fle->f.tcp_flags = tcp_flags;
261
262	fle->f.first = fle->f.last = time_uptime;
263
264	/*
265	 * First we do route table lookup on destination address. So we can
266	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
267	 */
268	bzero(&sin, sizeof(sin));
269	sin.sin_len = sizeof(struct sockaddr_in);
270	sin.sin_family = AF_INET;
271	sin.sin_addr = fle->f.r.r_dst;
272	/* XXX MRT 0 as a default.. need the m here to get fib */
273	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
274	if (rt != NULL) {
275		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
276
277		if (rt->rt_flags & RTF_GATEWAY &&
278		    rt->rt_gateway->sa_family == AF_INET)
279			fle->f.next_hop =
280			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
281
282		if (rt_mask(rt))
283			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
284			    rt_mask(rt))->sin_addr.s_addr);
285		else if (rt->rt_flags & RTF_HOST)
286			/* Give up. We can't determine mask :( */
287			fle->f.dst_mask = 32;
288
289		RTFREE_LOCKED(rt);
290	}
291
292	/* Do route lookup on source address, to fill in src_mask. */
293	bzero(&sin, sizeof(sin));
294	sin.sin_len = sizeof(struct sockaddr_in);
295	sin.sin_family = AF_INET;
296	sin.sin_addr = fle->f.r.r_src;
297	/* XXX MRT 0 as a default  revisit.  need the mbuf for fib*/
298	rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
299	if (rt != NULL) {
300		if (rt_mask(rt))
301			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
302			    rt_mask(rt))->sin_addr.s_addr);
303		else if (rt->rt_flags & RTF_HOST)
304			/* Give up. We can't determine mask :( */
305			fle->f.src_mask = 32;
306
307		RTFREE_LOCKED(rt);
308	}
309
310	/* Push new flow at the and of hash. */
311	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
312
313	return (0);
314}
315
316
317/*
318 * Non-static functions called from ng_netflow.c
319 */
320
321/* Allocate memory and set up flow cache */
322int
323ng_netflow_cache_init(priv_p priv)
324{
325	struct flow_hash_entry	*hsh;
326	int i;
327
328	/* Initialize cache UMA zone. */
329	priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
330	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
331	uma_zone_set_max(priv->zone, CACHESIZE);
332
333	/* Allocate hash. */
334	priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
335	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
336
337	if (priv->hash == NULL) {
338		uma_zdestroy(priv->zone);
339		return (ENOMEM);
340	}
341
342	/* Initialize hash. */
343	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
344		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
345		TAILQ_INIT(&hsh->head);
346	}
347
348	mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
349
350	return (0);
351}
352
353/* Free all flow cache memory. Called from node close method. */
354void
355ng_netflow_cache_flush(priv_p priv)
356{
357	struct flow_entry	*fle, *fle1;
358	struct flow_hash_entry	*hsh;
359	item_p			item = NULL;
360	int i;
361
362	/*
363	 * We are going to free probably billable data.
364	 * Expire everything before freeing it.
365	 * No locking is required since callout is already drained.
366	 */
367	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
368		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
369			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
370			expire_flow(priv, &item, fle, NG_QUEUE);
371		}
372
373	if (item != NULL)
374		export_send(priv, item, NG_QUEUE);
375
376	uma_zdestroy(priv->zone);
377
378	/* Destroy hash mutexes. */
379	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
380		mtx_destroy(&hsh->mtx);
381
382	/* Free hash memory. */
383	if (priv->hash)
384		free(priv->hash, M_NETFLOW_HASH);
385
386	mtx_destroy(&priv->export_mtx);
387}
388
389/* Insert packet from into flow cache. */
390int
391ng_netflow_flow_add(priv_p priv, struct ip *ip, unsigned int src_if_index)
392{
393	register struct flow_entry	*fle, *fle1;
394	struct flow_hash_entry		*hsh;
395	struct flow_rec		r;
396	item_p			item = NULL;
397	int			hlen, plen;
398	int			error = 0;
399	uint8_t			tcp_flags = 0;
400
401	/* Try to fill flow_rec r */
402	bzero(&r, sizeof(r));
403	/* check version */
404	if (ip->ip_v != IPVERSION)
405		return (EINVAL);
406
407	/* verify min header length */
408	hlen = ip->ip_hl << 2;
409
410	if (hlen < sizeof(struct ip))
411		return (EINVAL);
412
413	r.r_src = ip->ip_src;
414	r.r_dst = ip->ip_dst;
415
416	/* save packet length */
417	plen = ntohs(ip->ip_len);
418
419	r.r_ip_p = ip->ip_p;
420	r.r_tos = ip->ip_tos;
421
422	r.r_i_ifx = src_if_index;
423
424	/*
425	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
426	 * ICMP packet will be recorded with proper s_port and d_port.
427	 * Following fragments will be recorded simply as IP packet with
428	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
429	 * I know, it looks like bug. But I don't want to re-implement
430	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
431	 * and nobody complains yet :)
432	 */
433	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
434		switch(r.r_ip_p) {
435		case IPPROTO_TCP:
436		{
437			register struct tcphdr *tcp;
438
439			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
440			r.r_sport = tcp->th_sport;
441			r.r_dport = tcp->th_dport;
442			tcp_flags = tcp->th_flags;
443			break;
444		}
445			case IPPROTO_UDP:
446			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
447			break;
448		}
449
450	/* Update node statistics. XXX: race... */
451	priv->info.nfinfo_packets ++;
452	priv->info.nfinfo_bytes += plen;
453
454	/* Find hash slot. */
455	hsh = &priv->hash[ip_hash(&r)];
456
457	mtx_lock(&hsh->mtx);
458
459	/*
460	 * Go through hash and find our entry. If we encounter an
461	 * entry, that should be expired, purge it. We do a reverse
462	 * search since most active entries are first, and most
463	 * searches are done on most active entries.
464	 */
465	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
466		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
467			break;
468		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
469			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
470			expire_flow(priv, &item, fle, NG_QUEUE);
471			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
472		}
473	}
474
475	if (fle) {			/* An existent entry. */
476
477		fle->f.bytes += plen;
478		fle->f.packets ++;
479		fle->f.tcp_flags |= tcp_flags;
480		fle->f.last = time_uptime;
481
482		/*
483		 * We have the following reasons to expire flow in active way:
484		 * - it hit active timeout
485		 * - a TCP connection closed
486		 * - it is going to overflow counter
487		 */
488		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
489		    (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
490			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
491			expire_flow(priv, &item, fle, NG_QUEUE);
492			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
493		} else {
494			/*
495			 * It is the newest, move it to the tail,
496			 * if it isn't there already. Next search will
497			 * locate it quicker.
498			 */
499			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
500				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
501				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
502			}
503		}
504	} else				/* A new flow entry. */
505		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
506
507	mtx_unlock(&hsh->mtx);
508
509	if (item != NULL)
510		return_export_dgram(priv, item, NG_QUEUE);
511
512	return (error);
513}
514
515/*
516 * Return records from cache to userland.
517 *
518 * TODO: matching particular IP should be done in kernel, here.
519 */
520int
521ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
522{
523	struct flow_hash_entry *hsh;
524	struct flow_entry *fle;
525	struct ngnf_flows *data;
526	int i;
527
528	data = (struct ngnf_flows *)resp->data;
529	data->last = 0;
530	data->nentries = 0;
531
532	/* Check if this is a first run */
533	if (last == 0) {
534		hsh = priv->hash;
535		i = 0;
536	} else {
537		if (last > NBUCKETS-1)
538			return (EINVAL);
539		hsh = priv->hash + last;
540		i = last;
541	}
542
543	/*
544	 * We will transfer not more than NREC_AT_ONCE. More data
545	 * will come in next message.
546	 * We send current hash index to userland, and userland should
547	 * return it back to us. Then, we will restart with new entry.
548	 *
549	 * The resulting cache snapshot is inaccurate for the
550	 * following reasons:
551	 *  - we skip locked hash entries
552	 *  - we bail out, if someone wants our entry
553	 *  - we skip rest of entry, when hit NREC_AT_ONCE
554	 */
555	for (; i < NBUCKETS; hsh++, i++) {
556		if (mtx_trylock(&hsh->mtx) == 0)
557			continue;
558
559		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
560			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
561				break;
562
563			bcopy(&fle->f, &(data->entries[data->nentries]),
564			    sizeof(fle->f));
565			data->nentries++;
566			if (data->nentries == NREC_AT_ONCE) {
567				mtx_unlock(&hsh->mtx);
568				if (++i < NBUCKETS)
569					data->last = i;
570				return (0);
571			}
572		}
573		mtx_unlock(&hsh->mtx);
574	}
575
576	return (0);
577}
578
579/* We have full datagram in privdata. Send it to export hook. */
580static int
581export_send(priv_p priv, item_p item, int flags)
582{
583	struct mbuf *m = NGI_M(item);
584	struct netflow_v5_export_dgram *dgram = mtod(m,
585					struct netflow_v5_export_dgram *);
586	struct netflow_v5_header *header = &dgram->header;
587	struct timespec ts;
588	int error = 0;
589
590	/* Fill mbuf header. */
591	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
592	   header->count + sizeof(struct netflow_v5_header);
593
594	/* Fill export header. */
595	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
596	getnanotime(&ts);
597	header->unix_secs  = htonl(ts.tv_sec);
598	header->unix_nsecs = htonl(ts.tv_nsec);
599	header->engine_type = 0;
600	header->engine_id = 0;
601	header->pad = 0;
602	header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
603	    header->count));
604	header->count = htons(header->count);
605
606	if (priv->export != NULL)
607		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
608	else
609		NG_FREE_ITEM(item);
610
611	return (error);
612}
613
614
615/* Add export record to dgram. */
616static int
617export_add(item_p item, struct flow_entry *fle)
618{
619	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
620					struct netflow_v5_export_dgram *);
621	struct netflow_v5_header *header = &dgram->header;
622	struct netflow_v5_record *rec;
623
624	rec = &dgram->r[header->count];
625	header->count ++;
626
627	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
628	    ("ng_netflow: export too big"));
629
630	/* Fill in export record. */
631	rec->src_addr = fle->f.r.r_src.s_addr;
632	rec->dst_addr = fle->f.r.r_dst.s_addr;
633	rec->next_hop = fle->f.next_hop.s_addr;
634	rec->i_ifx    = htons(fle->f.fle_i_ifx);
635	rec->o_ifx    = htons(fle->f.fle_o_ifx);
636	rec->packets  = htonl(fle->f.packets);
637	rec->octets   = htonl(fle->f.bytes);
638	rec->first    = htonl(MILLIUPTIME(fle->f.first));
639	rec->last     = htonl(MILLIUPTIME(fle->f.last));
640	rec->s_port   = fle->f.r.r_sport;
641	rec->d_port   = fle->f.r.r_dport;
642	rec->flags    = fle->f.tcp_flags;
643	rec->prot     = fle->f.r.r_ip_p;
644	rec->tos      = fle->f.r.r_tos;
645	rec->dst_mask = fle->f.dst_mask;
646	rec->src_mask = fle->f.src_mask;
647
648	/* Not supported fields. */
649	rec->src_as = rec->dst_as = 0;
650
651	if (header->count == NETFLOW_V5_MAX_RECORDS)
652		return (1); /* end of datagram */
653	else
654		return (0);
655}
656
657/* Periodic flow expiry run. */
658void
659ng_netflow_expire(void *arg)
660{
661	struct flow_entry	*fle, *fle1;
662	struct flow_hash_entry	*hsh;
663	priv_p			priv = (priv_p )arg;
664	item_p			item = NULL;
665	uint32_t		used;
666	int			i;
667
668	/*
669	 * Going through all the cache.
670	 */
671	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
672		/*
673		 * Skip entries, that are already being worked on.
674		 */
675		if (mtx_trylock(&hsh->mtx) == 0)
676			continue;
677
678		used = atomic_load_acq_32(&priv->info.nfinfo_used);
679		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
680			/*
681			 * Interrupt thread wants this entry!
682			 * Quick! Quick! Bail out!
683			 */
684			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
685				break;
686
687			/*
688			 * Don't expire aggressively while hash collision
689			 * ratio is predicted small.
690			 */
691			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
692				break;
693
694			if ((INACTIVE(fle) && (SMALL(fle) ||
695			    (used > (NBUCKETS*2)))) || AGED(fle)) {
696				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
697				expire_flow(priv, &item, fle, NG_NOFLAGS);
698				used--;
699				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
700			}
701		}
702		mtx_unlock(&hsh->mtx);
703	}
704
705	if (item != NULL)
706		return_export_dgram(priv, item, NG_NOFLAGS);
707
708	/* Schedule next expire. */
709	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
710	    (void *)priv);
711}
712