netflow.c revision 163241
1/*-
2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
28 */
29
30static const char rcs_id[] =
31    "@(#) $FreeBSD: head/sys/netgraph/netflow/netflow.c 163241 2006-10-11 13:28:37Z glebius $";
32
33#include <sys/param.h>
34#include <sys/kernel.h>
35#include <sys/limits.h>
36#include <sys/mbuf.h>
37#include <sys/syslog.h>
38#include <sys/systm.h>
39#include <sys/socket.h>
40
41#include <machine/atomic.h>
42
43#include <net/if.h>
44#include <net/route.h>
45#include <netinet/in.h>
46#include <netinet/in_systm.h>
47#include <netinet/ip.h>
48#include <netinet/tcp.h>
49#include <netinet/udp.h>
50
51#include <netgraph/ng_message.h>
52#include <netgraph/netgraph.h>
53
54#include <netgraph/netflow/netflow.h>
55#include <netgraph/netflow/ng_netflow.h>
56
57#define	NBUCKETS	(65536)		/* must be power of 2 */
58
59/* This hash is for TCP or UDP packets. */
60#define FULL_HASH(addr1, addr2, port1, port2)	\
61	(((addr1 ^ (addr1 >> 16) ^ 		\
62	htons(addr2 ^ (addr2 >> 16))) ^ 	\
63	port1 ^ htons(port2)) &			\
64	(NBUCKETS - 1))
65
66/* This hash is for all other IP packets. */
67#define ADDR_HASH(addr1, addr2)			\
68	((addr1 ^ (addr1 >> 16) ^ 		\
69	htons(addr2 ^ (addr2 >> 16))) &		\
70	(NBUCKETS - 1))
71
72/* Macros to shorten logical constructions */
73/* XXX: priv must exist in namespace */
74#define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75#define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76#define	ISFREE(fle)	(fle->f.packets == 0)
77
78/*
79 * 4 is a magical number: statistically number of 4-packet flows is
80 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82 * of reachable host and 4-packet otherwise.
83 */
84#define	SMALL(fle)	(fle->f.packets <= 4)
85
86/*
87 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88 * every 48+ days. But we will do same to keep compatibility. This macro
89 * does overflowable multiplication to 1000.
90 */
91#define	MILLIUPTIME(t)	(((t) << 9) +	/* 512 */	\
92			 ((t) << 8) +	/* 256 */	\
93			 ((t) << 7) +	/* 128 */	\
94			 ((t) << 6) +	/* 64  */	\
95			 ((t) << 5) +	/* 32  */	\
96			 ((t) << 3))	/* 8   */
97
98MALLOC_DECLARE(M_NETFLOW_HASH);
99MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
100
101static int export_add(item_p, struct flow_entry *);
102static int export_send(priv_p, item_p, int flags);
103
104/* Generate hash for a given flow record. */
105static __inline uint32_t
106ip_hash(struct flow_rec *r)
107{
108	switch (r->r_ip_p) {
109	case IPPROTO_TCP:
110	case IPPROTO_UDP:
111		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112		    r->r_sport, r->r_dport);
113	default:
114		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
115	}
116}
117
118/* This is callback from uma(9), called on alloc. */
119static int
120uma_ctor_flow(void *mem, int size, void *arg, int how)
121{
122	priv_p priv = (priv_p )arg;
123
124	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
125		return (ENOMEM);
126
127	atomic_add_32(&priv->info.nfinfo_used, 1);
128
129	return (0);
130}
131
132/* This is callback from uma(9), called on free. */
133static void
134uma_dtor_flow(void *mem, int size, void *arg)
135{
136	priv_p priv = (priv_p )arg;
137
138	atomic_subtract_32(&priv->info.nfinfo_used, 1);
139}
140
141/*
142 * Detach export datagram from priv, if there is any.
143 * If there is no, allocate a new one.
144 */
145static item_p
146get_export_dgram(priv_p priv)
147{
148	item_p	item = NULL;
149
150	mtx_lock(&priv->export_mtx);
151	if (priv->export_item != NULL) {
152		item = priv->export_item;
153		priv->export_item = NULL;
154	}
155	mtx_unlock(&priv->export_mtx);
156
157	if (item == NULL) {
158		struct netflow_v5_export_dgram *dgram;
159		struct mbuf *m;
160
161		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
162		if (m == NULL)
163			return (NULL);
164		item = ng_package_data(m, NG_NOFLAGS);
165		if (item == NULL)
166			return (NULL);
167		dgram = mtod(m, struct netflow_v5_export_dgram *);
168		dgram->header.count = 0;
169		dgram->header.version = htons(NETFLOW_V5);
170
171	}
172
173	return (item);
174}
175
176/*
177 * Re-attach incomplete datagram back to priv.
178 * If there is already another one, then send incomplete. */
179static void
180return_export_dgram(priv_p priv, item_p item, int flags)
181{
182	/*
183	 * It may happen on SMP, that some thread has already
184	 * put its item there, in this case we bail out and
185	 * send what we have to collector.
186	 */
187	mtx_lock(&priv->export_mtx);
188	if (priv->export_item == NULL) {
189		priv->export_item = item;
190		mtx_unlock(&priv->export_mtx);
191	} else {
192		mtx_unlock(&priv->export_mtx);
193		export_send(priv, item, flags);
194	}
195}
196
197/*
198 * The flow is over. Call export_add() and free it. If datagram is
199 * full, then call export_send().
200 */
201static __inline void
202expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
203{
204	if (*item == NULL)
205		*item = get_export_dgram(priv);
206	if (*item == NULL) {
207		atomic_add_32(&priv->info.nfinfo_export_failed, 1);
208		uma_zfree_arg(priv->zone, fle, priv);
209		return;
210	}
211	if (export_add(*item, fle) > 0) {
212		export_send(priv, *item, flags);
213		*item = NULL;
214	}
215	uma_zfree_arg(priv->zone, fle, priv);
216}
217
218/* Get a snapshot of node statistics */
219void
220ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
221{
222	/* XXX: atomic */
223	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
224}
225
226/*
227 * Insert a record into defined slot.
228 *
229 * First we get for us a free flow entry, then fill in all
230 * possible fields in it.
231 *
232 * TODO: consider dropping hash mutex while filling in datagram,
233 * as this was done in previous version. Need to test & profile
234 * to be sure.
235 */
236static __inline int
237hash_insert(priv_p priv, struct flow_hash_entry  *hsh, struct flow_rec *r,
238	int plen, uint8_t tcp_flags)
239{
240	struct flow_entry	*fle;
241	struct route ro;
242	struct sockaddr_in *sin;
243
244	mtx_assert(&hsh->mtx, MA_OWNED);
245
246	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
247	if (fle == NULL) {
248		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
249		return (ENOMEM);
250	}
251
252	/*
253	 * Now fle is totally ours. It is detached from all lists,
254	 * we can safely edit it.
255	 */
256
257	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
258	fle->f.bytes = plen;
259	fle->f.packets = 1;
260	fle->f.tcp_flags = tcp_flags;
261
262	fle->f.first = fle->f.last = time_uptime;
263
264	/*
265	 * First we do route table lookup on destination address. So we can
266	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
267	 */
268	bzero((caddr_t)&ro, sizeof(ro));
269	sin = (struct sockaddr_in *)&ro.ro_dst;
270	sin->sin_len = sizeof(*sin);
271	sin->sin_family = AF_INET;
272	sin->sin_addr = fle->f.r.r_dst;
273	rtalloc_ign(&ro, RTF_CLONING);
274	if (ro.ro_rt != NULL) {
275		struct rtentry *rt = ro.ro_rt;
276
277		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
278
279		if (rt->rt_flags & RTF_GATEWAY &&
280		    rt->rt_gateway->sa_family == AF_INET)
281			fle->f.next_hop =
282			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
283
284		if (rt_mask(rt))
285			fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
286			    rt_mask(rt))->sin_addr.s_addr);
287		else if (rt->rt_flags & RTF_HOST)
288			/* Give up. We can't determine mask :( */
289			fle->f.dst_mask = 32;
290
291		RTFREE(ro.ro_rt);
292	}
293
294	/* Do route lookup on source address, to fill in src_mask. */
295
296	bzero((caddr_t)&ro, sizeof(ro));
297	sin = (struct sockaddr_in *)&ro.ro_dst;
298	sin->sin_len = sizeof(*sin);
299	sin->sin_family = AF_INET;
300	sin->sin_addr = fle->f.r.r_src;
301	rtalloc_ign(&ro, RTF_CLONING);
302	if (ro.ro_rt != NULL) {
303		struct rtentry *rt = ro.ro_rt;
304
305		if (rt_mask(rt))
306			fle->f.src_mask = bitcount32(((struct sockaddr_in *)
307			    rt_mask(rt))->sin_addr.s_addr);
308		else if (rt->rt_flags & RTF_HOST)
309			/* Give up. We can't determine mask :( */
310			fle->f.src_mask = 32;
311
312		RTFREE(ro.ro_rt);
313	}
314
315	/* Push new flow at the and of hash. */
316	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
317
318	return (0);
319}
320
321
322/*
323 * Non-static functions called from ng_netflow.c
324 */
325
326/* Allocate memory and set up flow cache */
327int
328ng_netflow_cache_init(priv_p priv)
329{
330	struct flow_hash_entry	*hsh;
331	int i;
332
333	/* Initialize cache UMA zone. */
334	priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
335	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
336	uma_zone_set_max(priv->zone, CACHESIZE);
337
338	/* Allocate hash. */
339	MALLOC(priv->hash, struct flow_hash_entry *,
340	    NBUCKETS * sizeof(struct flow_hash_entry),
341	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
342
343	if (priv->hash == NULL) {
344		uma_zdestroy(priv->zone);
345		return (ENOMEM);
346	}
347
348	/* Initialize hash. */
349	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
350		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
351		TAILQ_INIT(&hsh->head);
352	}
353
354	mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
355
356	return (0);
357}
358
359/* Free all flow cache memory. Called from node close method. */
360void
361ng_netflow_cache_flush(priv_p priv)
362{
363	struct flow_entry	*fle, *fle1;
364	struct flow_hash_entry	*hsh;
365	item_p			item = NULL;
366	int i;
367
368	/*
369	 * We are going to free probably billable data.
370	 * Expire everything before freeing it.
371	 * No locking is required since callout is already drained.
372	 */
373	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
374		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
375			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
376			expire_flow(priv, &item, fle, NG_QUEUE);
377		}
378
379	if (item != NULL)
380		export_send(priv, item, NG_QUEUE);
381
382	uma_zdestroy(priv->zone);
383
384	/* Destroy hash mutexes. */
385	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
386		mtx_destroy(&hsh->mtx);
387
388	/* Free hash memory. */
389	if (priv->hash)
390		FREE(priv->hash, M_NETFLOW_HASH);
391
392	mtx_destroy(&priv->export_mtx);
393}
394
395/* Insert packet from into flow cache. */
396int
397ng_netflow_flow_add(priv_p priv, struct ip *ip, iface_p iface,
398	struct ifnet *ifp)
399{
400	register struct flow_entry	*fle, *fle1;
401	struct flow_hash_entry		*hsh;
402	struct flow_rec		r;
403	item_p			item = NULL;
404	int			hlen, plen;
405	int			error = 0;
406	uint8_t			tcp_flags = 0;
407
408	/* Try to fill flow_rec r */
409	bzero(&r, sizeof(r));
410	/* check version */
411	if (ip->ip_v != IPVERSION)
412		return (EINVAL);
413
414	/* verify min header length */
415	hlen = ip->ip_hl << 2;
416
417	if (hlen < sizeof(struct ip))
418		return (EINVAL);
419
420	r.r_src = ip->ip_src;
421	r.r_dst = ip->ip_dst;
422
423	/* save packet length */
424	plen = ntohs(ip->ip_len);
425
426	r.r_ip_p = ip->ip_p;
427	r.r_tos = ip->ip_tos;
428
429	/* Configured in_ifx overrides mbuf's */
430	if (iface->info.ifinfo_index == 0) {
431		if (ifp != NULL)
432			r.r_i_ifx = ifp->if_index;
433	} else
434		r.r_i_ifx = iface->info.ifinfo_index;
435
436	/*
437	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
438	 * ICMP packet will be recorded with proper s_port and d_port.
439	 * Following fragments will be recorded simply as IP packet with
440	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
441	 * I know, it looks like bug. But I don't want to re-implement
442	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
443	 * and nobody complains yet :)
444	 */
445	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
446		switch(r.r_ip_p) {
447		case IPPROTO_TCP:
448		{
449			register struct tcphdr *tcp;
450
451			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
452			r.r_sport = tcp->th_sport;
453			r.r_dport = tcp->th_dport;
454			tcp_flags = tcp->th_flags;
455			break;
456		}
457			case IPPROTO_UDP:
458			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
459			break;
460		}
461
462	/* Update node statistics. XXX: race... */
463	priv->info.nfinfo_packets ++;
464	priv->info.nfinfo_bytes += plen;
465
466	/* Find hash slot. */
467	hsh = &priv->hash[ip_hash(&r)];
468
469	mtx_lock(&hsh->mtx);
470
471	/*
472	 * Go through hash and find our entry. If we encounter an
473	 * entry, that should be expired, purge it. We do a reverse
474	 * search since most active entries are first, and most
475	 * searches are done on most active entries.
476	 */
477	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
478		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
479			break;
480		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
481			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
482			expire_flow(priv, &item, fle, NG_QUEUE);
483			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
484		}
485	}
486
487	if (fle) {			/* An existent entry. */
488
489		fle->f.bytes += plen;
490		fle->f.packets ++;
491		fle->f.tcp_flags |= tcp_flags;
492		fle->f.last = time_uptime;
493
494		/*
495		 * We have the following reasons to expire flow in active way:
496		 * - it hit active timeout
497		 * - a TCP connection closed
498		 * - it is going to overflow counter
499		 */
500		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
501		    (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
502			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
503			expire_flow(priv, &item, fle, NG_QUEUE);
504			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
505		} else {
506			/*
507			 * It is the newest, move it to the tail,
508			 * if it isn't there already. Next search will
509			 * locate it quicker.
510			 */
511			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
512				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
513				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
514			}
515		}
516	} else				/* A new flow entry. */
517		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
518
519	mtx_unlock(&hsh->mtx);
520
521	if (item != NULL)
522		return_export_dgram(priv, item, NG_QUEUE);
523
524	return (error);
525}
526
527/*
528 * Return records from cache to userland.
529 *
530 * TODO: matching particular IP should be done in kernel, here.
531 */
532int
533ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
534{
535	struct flow_hash_entry *hsh;
536	struct flow_entry *fle;
537	struct ngnf_flows *data;
538	int i;
539
540	data = (struct ngnf_flows *)resp->data;
541	data->last = 0;
542	data->nentries = 0;
543
544	/* Check if this is a first run */
545	if (last == 0) {
546		hsh = priv->hash;
547		i = 0;
548	} else {
549		if (last > NBUCKETS-1)
550			return (EINVAL);
551		hsh = priv->hash + last;
552		i = last;
553	}
554
555	/*
556	 * We will transfer not more than NREC_AT_ONCE. More data
557	 * will come in next message.
558	 * We send current hash index to userland, and userland should
559	 * return it back to us. Then, we will restart with new entry.
560	 *
561	 * The resulting cache snapshot is inaccurate for the
562	 * following reasons:
563	 *  - we skip locked hash entries
564	 *  - we bail out, if someone wants our entry
565	 *  - we skip rest of entry, when hit NREC_AT_ONCE
566	 */
567	for (; i < NBUCKETS; hsh++, i++) {
568		if (mtx_trylock(&hsh->mtx) == 0)
569			continue;
570
571		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
572			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
573				break;
574
575			bcopy(&fle->f, &(data->entries[data->nentries]),
576			    sizeof(fle->f));
577			data->nentries++;
578			if (data->nentries == NREC_AT_ONCE) {
579				mtx_unlock(&hsh->mtx);
580				if (++i < NBUCKETS)
581					data->last = i;
582				return (0);
583			}
584		}
585		mtx_unlock(&hsh->mtx);
586	}
587
588	return (0);
589}
590
591/* We have full datagram in privdata. Send it to export hook. */
592static int
593export_send(priv_p priv, item_p item, int flags)
594{
595	struct mbuf *m = NGI_M(item);
596	struct netflow_v5_export_dgram *dgram = mtod(m,
597					struct netflow_v5_export_dgram *);
598	struct netflow_v5_header *header = &dgram->header;
599	struct timespec ts;
600	int error = 0;
601
602	/* Fill mbuf header. */
603	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
604	   header->count + sizeof(struct netflow_v5_header);
605
606	/* Fill export header. */
607	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
608	getnanotime(&ts);
609	header->unix_secs  = htonl(ts.tv_sec);
610	header->unix_nsecs = htonl(ts.tv_nsec);
611	header->engine_type = 0;
612	header->engine_id = 0;
613	header->pad = 0;
614	header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
615	    header->count));
616	header->count = htons(header->count);
617
618	if (priv->export != NULL)
619		/* Should also NET_LOCK_GIANT(). */
620		NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
621
622	return (error);
623}
624
625
626/* Add export record to dgram. */
627static int
628export_add(item_p item, struct flow_entry *fle)
629{
630	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
631					struct netflow_v5_export_dgram *);
632	struct netflow_v5_header *header = &dgram->header;
633	struct netflow_v5_record *rec;
634
635	if (header->count == 0 ) {	/* first record */
636		rec = &dgram->r[0];
637		header->count = 1;
638	} else {			/* continue filling datagram */
639		rec = &dgram->r[header->count];
640		header->count ++;
641	}
642
643	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
644	    ("ng_netflow: export too big"));
645
646	/* Fill in export record. */
647	rec->src_addr = fle->f.r.r_src.s_addr;
648	rec->dst_addr = fle->f.r.r_dst.s_addr;
649	rec->next_hop = fle->f.next_hop.s_addr;
650	rec->i_ifx    = htons(fle->f.fle_i_ifx);
651	rec->o_ifx    = htons(fle->f.fle_o_ifx);
652	rec->packets  = htonl(fle->f.packets);
653	rec->octets   = htonl(fle->f.bytes);
654	rec->first    = htonl(MILLIUPTIME(fle->f.first));
655	rec->last     = htonl(MILLIUPTIME(fle->f.last));
656	rec->s_port   = fle->f.r.r_sport;
657	rec->d_port   = fle->f.r.r_dport;
658	rec->flags    = fle->f.tcp_flags;
659	rec->prot     = fle->f.r.r_ip_p;
660	rec->tos      = fle->f.r.r_tos;
661	rec->dst_mask = fle->f.dst_mask;
662	rec->src_mask = fle->f.src_mask;
663
664	/* Not supported fields. */
665	rec->src_as = rec->dst_as = 0;
666
667	if (header->count == NETFLOW_V5_MAX_RECORDS)
668		return (1); /* end of datagram */
669	else
670		return (0);
671}
672
673/* Periodic flow expiry run. */
674void
675ng_netflow_expire(void *arg)
676{
677	struct flow_entry	*fle, *fle1;
678	struct flow_hash_entry	*hsh;
679	priv_p			priv = (priv_p )arg;
680	item_p			item = NULL;
681	uint32_t		used;
682	int			i;
683
684	/*
685	 * Going through all the cache.
686	 */
687	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
688		/*
689		 * Skip entries, that are already being worked on.
690		 */
691		if (mtx_trylock(&hsh->mtx) == 0)
692			continue;
693
694		used = atomic_load_acq_32(&priv->info.nfinfo_used);
695		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
696			/*
697			 * Interrupt thread wants this entry!
698			 * Quick! Quick! Bail out!
699			 */
700			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
701				break;
702
703			/*
704			 * Don't expire aggressively while hash collision
705			 * ratio is predicted small.
706			 */
707			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
708				break;
709
710			if ((INACTIVE(fle) && (SMALL(fle) ||
711			    (used > (NBUCKETS*2)))) || AGED(fle)) {
712				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
713				expire_flow(priv, &item, fle, NG_NOFLAGS);
714				used--;
715				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
716			}
717		}
718		mtx_unlock(&hsh->mtx);
719	}
720
721	if (item != NULL)
722		return_export_dgram(priv, item, NG_NOFLAGS);
723
724	/* Schedule next expire. */
725	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
726	    (void *)priv);
727}
728