netflow.c revision 146285
1/*-
2 * Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
3 * Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
28 */
29
30static const char rcs_id[] =
31    "@(#) $FreeBSD: head/sys/netgraph/netflow/netflow.c 146285 2005-05-16 17:10:08Z glebius $";
32
33#include <sys/param.h>
34#include <sys/kernel.h>
35#include <sys/limits.h>
36#include <sys/mbuf.h>
37#include <sys/syslog.h>
38#include <sys/systm.h>
39#include <sys/socket.h>
40
41#include <machine/atomic.h>
42
43#include <net/if.h>
44#include <net/route.h>
45#include <netinet/in.h>
46#include <netinet/in_systm.h>
47#include <netinet/ip.h>
48#include <netinet/tcp.h>
49#include <netinet/udp.h>
50
51#include <netgraph/ng_message.h>
52#include <netgraph/netgraph.h>
53
54#include <netgraph/netflow/netflow.h>
55#include <netgraph/netflow/ng_netflow.h>
56
57#define	NBUCKETS	(65536)		/* must be power of 2 */
58
59/* This hash is for TCP or UDP packets */
60#define FULL_HASH(addr1,addr2,port1,port2)\
61	(((addr1 >> 16) ^		\
62	  (addr2 & 0x00FF) ^		\
63	  ((port1 ^ port2) << 8) )&	\
64	 (NBUCKETS - 1))
65
66/* This hash is for all other IP packets */
67#define ADDR_HASH(addr1,addr2)\
68	(((addr1 >> 16) ^		\
69	  (addr2 & 0x00FF) )&		\
70	 (NBUCKETS - 1))
71
72/* Macros to shorten logical constructions */
73/* XXX: priv must exist in namespace */
74#define	INACTIVE(fle)	(time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
75#define	AGED(fle)	(time_uptime - fle->f.first > priv->info.nfinfo_act_t)
76#define	ISFREE(fle)	(fle->f.packets == 0)
77
78/*
79 * 4 is a magical number: statistically number of 4-packet flows is
80 * bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
81 * scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
82 * of reachable host and 4-packet otherwise.
83 */
84#define	SMALL(fle)	(fle->f.packets <= 4)
85
86/*
87 * Cisco uses milliseconds for uptime. Bad idea, since it overflows
88 * every 48+ days. But we will do same to keep compatibility. This macro
89 * does overflowable multiplication to 1000.
90 */
91#define	MILLIUPTIME(t)	(((t) << 9) +	/* 512 */	\
92			 ((t) << 8) +	/* 256 */	\
93			 ((t) << 7) +	/* 128 */	\
94			 ((t) << 6) +	/* 64  */	\
95			 ((t) << 5) +	/* 32  */	\
96			 ((t) << 3))	/* 8   */
97
98MALLOC_DECLARE(M_NETFLOW_HASH);
99MALLOC_DEFINE(M_NETFLOW_HASH, "NetFlow hash", "NetFlow hash");
100
101static int export_add(item_p, struct flow_entry *);
102static int export_send(priv_p, item_p);
103
104/* Generate hash for a given flow record. */
105static __inline uint32_t
106ip_hash(struct flow_rec *r)
107{
108	switch (r->r_ip_p) {
109	case IPPROTO_TCP:
110	case IPPROTO_UDP:
111		return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
112		    r->r_sport, r->r_dport);
113	default:
114		return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
115	}
116}
117
118/* This is callback from uma(9), called on alloc. */
119static int
120uma_ctor_flow(void *mem, int size, void *arg, int how)
121{
122	priv_p priv = (priv_p )arg;
123
124	if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
125		return (ENOMEM);
126
127	atomic_add_32(&priv->info.nfinfo_used, 1);
128
129	return (0);
130}
131
132/* This is callback from uma(9), called on free. */
133static void
134uma_dtor_flow(void *mem, int size, void *arg)
135{
136	priv_p priv = (priv_p )arg;
137
138	atomic_subtract_32(&priv->info.nfinfo_used, 1);
139}
140
141/*
142 * Detach export datagram from priv, if there is any.
143 * If there is no, allocate a new one.
144 */
145static item_p
146get_export_dgram(priv_p priv)
147{
148	item_p	item = NULL;
149
150	mtx_lock(&priv->export_mtx);
151	if (priv->export_item != NULL) {
152		item = priv->export_item;
153		priv->export_item = NULL;
154	}
155	mtx_unlock(&priv->export_mtx);
156
157	if (item == NULL) {
158		struct netflow_v5_export_dgram *dgram;
159		struct mbuf *m;
160
161		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
162		if (m == NULL)
163			return (NULL);
164		item = ng_package_data(m, NG_NOFLAGS);
165		if (item == NULL)
166			return (NULL);
167		dgram = mtod(m, struct netflow_v5_export_dgram *);
168		dgram->header.count = 0;
169		dgram->header.version = htons(NETFLOW_V5);
170
171	}
172
173	return (item);
174}
175
176/*
177 * Re-attach incomplete datagram back to priv.
178 * If there is already another one, then send incomplete. */
179static void
180return_export_dgram(priv_p priv, item_p item)
181{
182	/*
183	 * It may happen on SMP, that some thread has already
184	 * put its item there, in this case we bail out and
185	 * send what we have to collector.
186	 */
187	mtx_lock(&priv->export_mtx);
188	if (priv->export_item == NULL) {
189		priv->export_item = item;
190		mtx_unlock(&priv->export_mtx);
191	} else {
192		mtx_unlock(&priv->export_mtx);
193		export_send(priv, item);
194	}
195}
196
197/*
198 * The flow is over. Call export_add() and free it. If datagram is
199 * full, then call export_send().
200 */
201static __inline void
202expire_flow(priv_p priv, item_p *item, struct flow_entry *fle)
203{
204	if (*item == NULL)
205		*item = get_export_dgram(priv);
206	if (*item == NULL) {
207		atomic_add_32(&priv->info.nfinfo_export_failed, 1);
208		uma_zfree_arg(priv->zone, fle, priv);
209		return;
210	}
211	if (export_add(*item, fle) > 0) {
212		export_send(priv, *item);
213		*item = NULL;
214	}
215	uma_zfree_arg(priv->zone, fle, priv);
216}
217
218/* Get a snapshot of node statistics */
219void
220ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
221{
222	/* XXX: atomic */
223	memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
224}
225
226/* Calculate number of bits in netmask */
227#define	g21	0x55555555ul	/* = 0101_0101_0101_0101_0101_0101_0101_0101 */
228#define	g22	0x33333333ul	/* = 0011_0011_0011_0011_0011_0011_0011_0011 */
229#define	g23	0x0f0f0f0ful	/* = 0000_1111_0000_1111_0000_1111_0000_1111 */
230static __inline u_char
231bit_count(uint32_t v)
232{
233	v = (v & g21) + ((v >> 1) & g21);
234	v = (v & g22) + ((v >> 2) & g22);
235	v = (v + (v >> 4)) & g23;
236	return (v + (v >> 8) + (v >> 16) + (v >> 24)) & 0x3f;
237}
238
239/*
240 * Insert a record into defined slot.
241 *
242 * First we get for us a free flow entry, then fill in all
243 * possible fields in it.
244 *
245 * TODO: consider dropping hash mutex while filling in datagram,
246 * as this was done in previous version. Need to test & profile
247 * to be sure.
248 */
249static __inline int
250hash_insert(priv_p priv, struct flow_hash_entry  *hsh, struct flow_rec *r,
251	int plen, uint8_t tcp_flags)
252{
253	struct flow_entry	*fle;
254	struct route ro;
255	struct sockaddr_in *sin;
256
257	mtx_assert(&hsh->mtx, MA_OWNED);
258
259	fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
260	if (fle == NULL) {
261		atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
262		return (ENOMEM);
263	}
264
265	/*
266	 * Now fle is totally ours. It is detached from all lists,
267	 * we can safely edit it.
268	 */
269
270	bcopy(r, &fle->f.r, sizeof(struct flow_rec));
271	fle->f.bytes = plen;
272	fle->f.packets = 1;
273	fle->f.tcp_flags = tcp_flags;
274
275	fle->f.first = fle->f.last = time_uptime;
276
277	/*
278	 * First we do route table lookup on destination address. So we can
279	 * fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
280	 */
281	bzero((caddr_t)&ro, sizeof(ro));
282	sin = (struct sockaddr_in *)&ro.ro_dst;
283	sin->sin_len = sizeof(*sin);
284	sin->sin_family = AF_INET;
285	sin->sin_addr = fle->f.r.r_dst;
286	rtalloc_ign(&ro, RTF_CLONING);
287	if (ro.ro_rt != NULL) {
288		struct rtentry *rt = ro.ro_rt;
289
290		fle->f.fle_o_ifx = rt->rt_ifp->if_index;
291
292		if (rt->rt_flags & RTF_GATEWAY &&
293		    rt->rt_gateway->sa_family == AF_INET)
294			fle->f.next_hop =
295			    ((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
296
297		if (rt_mask(rt))
298			fle->f.dst_mask =
299			    bit_count(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
300		else if (rt->rt_flags & RTF_HOST)
301			/* Give up. We can't determine mask :( */
302			fle->f.dst_mask = 32;
303
304		RTFREE(ro.ro_rt);
305	}
306
307	/* Do route lookup on source address, to fill in src_mask. */
308
309	bzero((caddr_t)&ro, sizeof(ro));
310	sin = (struct sockaddr_in *)&ro.ro_dst;
311	sin->sin_len = sizeof(*sin);
312	sin->sin_family = AF_INET;
313	sin->sin_addr = fle->f.r.r_src;
314	rtalloc_ign(&ro, RTF_CLONING);
315	if (ro.ro_rt != NULL) {
316		struct rtentry *rt = ro.ro_rt;
317
318		if (rt_mask(rt))
319			fle->f.src_mask =
320			    bit_count(((struct sockaddr_in *)rt_mask(rt))->sin_addr.s_addr);
321		else if (rt->rt_flags & RTF_HOST)
322			/* Give up. We can't determine mask :( */
323			fle->f.src_mask = 32;
324
325		RTFREE(ro.ro_rt);
326	}
327
328	/* Push new flow at the and of hash. */
329	TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
330
331	return (0);
332}
333
334
335/*
336 * Non-static functions called from ng_netflow.c
337 */
338
339/* Allocate memory and set up flow cache */
340int
341ng_netflow_cache_init(priv_p priv)
342{
343	struct flow_hash_entry	*hsh;
344	int i;
345
346	/* Initialize cache UMA zone. */
347	priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
348	    uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
349	uma_zone_set_max(priv->zone, CACHESIZE);
350
351	/* Allocate hash. */
352	MALLOC(priv->hash, struct flow_hash_entry *,
353	    NBUCKETS * sizeof(struct flow_hash_entry),
354	    M_NETFLOW_HASH, M_WAITOK | M_ZERO);
355
356	if (priv->hash == NULL) {
357		uma_zdestroy(priv->zone);
358		return (ENOMEM);
359	}
360
361	/* Initialize hash. */
362	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
363		mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
364		TAILQ_INIT(&hsh->head);
365	}
366
367	mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
368
369	return (0);
370}
371
372/* Free all flow cache memory. Called from node close method. */
373void
374ng_netflow_cache_flush(priv_p priv)
375{
376	struct flow_entry	*fle, *fle1;
377	struct flow_hash_entry	*hsh;
378	item_p			item = NULL;
379	int i;
380
381	/*
382	 * We are going to free probably billable data.
383	 * Expire everything before freeing it.
384	 * No locking is required since callout is already drained.
385	 */
386	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
387		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
388			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
389			expire_flow(priv, &item, fle);
390		}
391
392	if (item != NULL)
393		export_send(priv, item);
394
395	uma_zdestroy(priv->zone);
396
397	/* Destroy hash mutexes. */
398	for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
399		mtx_destroy(&hsh->mtx);
400
401	/* Free hash memory. */
402	if (priv->hash)
403		FREE(priv->hash, M_NETFLOW_HASH);
404
405	mtx_destroy(&priv->export_mtx);
406}
407
408/* Insert packet from into flow cache. */
409int
410ng_netflow_flow_add(priv_p priv, struct ip *ip, iface_p iface,
411	struct ifnet *ifp)
412{
413	register struct flow_entry	*fle, *fle1;
414	struct flow_hash_entry		*hsh;
415	struct flow_rec		r;
416	item_p			item = NULL;
417	int			hlen, plen;
418	int			error = 0;
419	uint8_t			tcp_flags = 0;
420
421	/* Try to fill flow_rec r */
422	bzero(&r, sizeof(r));
423	/* check version */
424	if (ip->ip_v != IPVERSION)
425		return (EINVAL);
426
427	/* verify min header length */
428	hlen = ip->ip_hl << 2;
429
430	if (hlen < sizeof(struct ip))
431		return (EINVAL);
432
433	r.r_src = ip->ip_src;
434	r.r_dst = ip->ip_dst;
435
436	/* save packet length */
437	plen = ntohs(ip->ip_len);
438
439	r.r_ip_p = ip->ip_p;
440	r.r_tos = ip->ip_tos;
441
442	/* Configured in_ifx overrides mbuf's */
443	if (iface->info.ifinfo_index == 0) {
444		if (ifp != NULL)
445			r.r_i_ifx = ifp->if_index;
446	} else
447		r.r_i_ifx = iface->info.ifinfo_index;
448
449	/*
450	 * XXX NOTE: only first fragment of fragmented TCP, UDP and
451	 * ICMP packet will be recorded with proper s_port and d_port.
452	 * Following fragments will be recorded simply as IP packet with
453	 * ip_proto = ip->ip_p and s_port, d_port set to zero.
454	 * I know, it looks like bug. But I don't want to re-implement
455	 * ip packet assebmling here. Anyway, (in)famous trafd works this way -
456	 * and nobody complains yet :)
457	 */
458	if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
459		switch(r.r_ip_p) {
460		case IPPROTO_TCP:
461		{
462			register struct tcphdr *tcp;
463
464			tcp = (struct tcphdr *)((caddr_t )ip + hlen);
465			r.r_sport = tcp->th_sport;
466			r.r_dport = tcp->th_dport;
467			tcp_flags = tcp->th_flags;
468			break;
469		}
470			case IPPROTO_UDP:
471			r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
472			break;
473		}
474
475	/* Update node statistics. XXX: race... */
476	priv->info.nfinfo_packets ++;
477	priv->info.nfinfo_bytes += plen;
478
479	/* Find hash slot. */
480	hsh = &priv->hash[ip_hash(&r)];
481
482	mtx_lock(&hsh->mtx);
483
484	/*
485	 * Go through hash and find our entry. If we encounter an
486	 * entry, that should be expired, purge it. We do a reverse
487	 * search since most active entries are first, and most
488	 * searches are done on most active entries.
489	 */
490	TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
491		if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
492			break;
493		if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
494			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
495			expire_flow(priv, &item, fle);
496			atomic_add_32(&priv->info.nfinfo_act_exp, 1);
497		}
498	}
499
500	if (fle) {			/* An existent entry. */
501
502		fle->f.bytes += plen;
503		fle->f.packets ++;
504		fle->f.tcp_flags |= tcp_flags;
505		fle->f.last = time_uptime;
506
507		/*
508		 * We have the following reasons to expire flow in active way:
509		 * - it hit active timeout
510		 * - a TCP connection closed
511		 * - it is going to overflow counter
512		 */
513		if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
514		    (fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
515			TAILQ_REMOVE(&hsh->head, fle, fle_hash);
516			expire_flow(priv, &item, fle);
517		} else {
518			/*
519			 * It is the newest, move it to the tail,
520			 * if it isn't there already. Next search will
521			 * locate it quicker.
522			 */
523			if (fle != TAILQ_LAST(&hsh->head, fhead)) {
524				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
525				TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
526			}
527		}
528	} else				/* A new flow entry. */
529		error = hash_insert(priv, hsh, &r, plen, tcp_flags);
530
531	mtx_unlock(&hsh->mtx);
532
533	if (item != NULL)
534		return_export_dgram(priv, item);
535
536	return (error);
537}
538
539/*
540 * Return records from cache to userland.
541 *
542 * TODO: consider NGM_READONLY
543 * TODO: matching particular IP should be done in kernel, here.
544 */
545int
546ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
547{
548	struct flow_hash_entry *hsh;
549	struct flow_entry *fle;
550	struct ngnf_flows *data;
551	int i;
552
553	data = (struct ngnf_flows *)resp->data;
554	data->last = 0;
555	data->nentries = 0;
556
557	/* Check if this is a first run */
558	if (last == 0) {
559		hsh = priv->hash;
560		i = 0;
561	} else {
562		if (last > NBUCKETS-1)
563			return (EINVAL);
564		hsh = priv->hash + last;
565		i = last;
566	}
567
568	/*
569	 * We will transfer not more than NREC_AT_ONCE. More data
570	 * will come in next message.
571	 * We send current hash index to userland, and userland should
572	 * return it back to us. Then, we will restart with new entry.
573	 *
574	 * The resulting cache snapshot is inaccurate for the
575	 * following reasons:
576	 *  - we skip locked hash entries
577	 *  - we bail out, if someone wants our entry
578	 *  - we skip rest of entry, when hit NREC_AT_ONCE
579	 */
580	for (; i < NBUCKETS; hsh++, i++) {
581		if (mtx_trylock(&hsh->mtx) == 0)
582			continue;
583
584		TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
585			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
586				break;
587
588			bcopy(&fle->f, &(data->entries[data->nentries]),
589			    sizeof(fle->f));
590			data->nentries++;
591			if (data->nentries == NREC_AT_ONCE) {
592				mtx_unlock(&hsh->mtx);
593				if (++i < NBUCKETS)
594					data->last = i;
595				return (0);
596			}
597		}
598		mtx_unlock(&hsh->mtx);
599	}
600
601	return (0);
602}
603
604/* We have full datagram in privdata. Send it to export hook. */
605static int
606export_send(priv_p priv, item_p item)
607{
608	struct mbuf *m = NGI_M(item);
609	struct netflow_v5_export_dgram *dgram = mtod(m,
610					struct netflow_v5_export_dgram *);
611	struct netflow_v5_header *header = &dgram->header;
612	struct timespec ts;
613	int error = 0;
614
615	/* Fill mbuf header. */
616	m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
617	   header->count + sizeof(struct netflow_v5_header);
618
619	/* Fill export header. */
620	header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
621	getnanotime(&ts);
622	header->unix_secs  = htonl(ts.tv_sec);
623	header->unix_nsecs = htonl(ts.tv_nsec);
624	header->count = htons(header->count);
625	header->flow_seq = htonl(atomic_load_acq_32(&priv->flow_seq));
626
627	/* Flow sequence contains number of first record, so it
628	   is updated after being put in header. */
629	atomic_add_32(&priv->flow_seq, header->count);
630
631	if (priv->export != NULL)
632		/* Should also NET_LOCK_GIANT(). */
633		NG_FWD_ITEM_HOOK(error, item, priv->export);
634
635	return (error);
636}
637
638
639/* Add export record to dgram. */
640static int
641export_add(item_p item, struct flow_entry *fle)
642{
643	struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
644					struct netflow_v5_export_dgram *);
645	struct netflow_v5_header *header = &dgram->header;
646	struct netflow_v5_record *rec;
647
648	if (header->count == 0 ) {	/* first record */
649		rec = &dgram->r[0];
650		header->count = 1;
651	} else {			/* continue filling datagram */
652		rec = &dgram->r[header->count];
653		header->count ++;
654	}
655
656	KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
657	    ("ng_netflow: export too big"));
658
659	/* Fill in export record. */
660	rec->src_addr = fle->f.r.r_src.s_addr;
661	rec->dst_addr = fle->f.r.r_dst.s_addr;
662	rec->next_hop = fle->f.next_hop.s_addr;
663	rec->i_ifx    = htons(fle->f.fle_i_ifx);
664	rec->o_ifx    = htons(fle->f.fle_o_ifx);
665	rec->packets  = htonl(fle->f.packets);
666	rec->octets   = htonl(fle->f.bytes);
667	rec->first    = htonl(MILLIUPTIME(fle->f.first));
668	rec->last     = htonl(MILLIUPTIME(fle->f.last));
669	rec->s_port   = fle->f.r.r_sport;
670	rec->d_port   = fle->f.r.r_dport;
671	rec->flags    = fle->f.tcp_flags;
672	rec->prot     = fle->f.r.r_ip_p;
673	rec->tos      = fle->f.r.r_tos;
674	rec->dst_mask = fle->f.dst_mask;
675	rec->src_mask = fle->f.src_mask;
676
677	/* Not supported fields. */
678	rec->src_as = rec->dst_as = 0;
679
680	if (header->count == NETFLOW_V5_MAX_RECORDS)
681		return (1); /* end of datagram */
682	else
683		return (0);
684}
685
686/* Periodic flow expiry run. */
687void
688ng_netflow_expire(void *arg)
689{
690	struct flow_entry	*fle, *fle1;
691	struct flow_hash_entry	*hsh;
692	priv_p			priv = (priv_p )arg;
693	item_p			item = NULL;
694	uint32_t		used;
695	int			i;
696
697	/*
698	 * Going through all the cache.
699	 */
700	for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
701		/*
702		 * Skip entries, that are already being worked on.
703		 */
704		if (mtx_trylock(&hsh->mtx) == 0)
705			continue;
706
707		used = atomic_load_acq_32(&priv->info.nfinfo_used);
708		TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
709			/*
710			 * Interrupt thread wants this entry!
711			 * Quick! Quick! Bail out!
712			 */
713			if (hsh->mtx.mtx_lock & MTX_CONTESTED)
714				break;
715
716			/*
717			 * Don't expire aggressively while hash collision
718			 * ratio is predicted small.
719			 */
720			if (used <= (NBUCKETS*2) && !INACTIVE(fle))
721				break;
722
723			if ((INACTIVE(fle) && (SMALL(fle) || (used > (NBUCKETS*2)))) ||
724			    AGED(fle)) {
725				TAILQ_REMOVE(&hsh->head, fle, fle_hash);
726				expire_flow(priv, &item, fle);
727				used--;
728				atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
729			}
730		}
731		mtx_unlock(&hsh->mtx);
732	}
733
734	if (item != NULL)
735		return_export_dgram(priv, item);
736
737	/* Schedule next expire. */
738	callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
739	    (void *)priv);
740}
741