radix.c revision 3443
1/*
2 * Copyright (c) 1988, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *	This product includes software developed by the University of
16 *	California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	@(#)radix.c	8.2 (Berkeley) 1/4/94
34 * $Id: radix.c,v 1.3 1994/08/02 07:46:29 davidg Exp $
35 */
36
37/*
38 * Routines to build and maintain radix trees for routing lookups.
39 */
40#ifndef RNF_NORMAL
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/malloc.h>
44#define	M_DONTWAIT M_NOWAIT
45#ifdef	KERNEL
46#include <sys/domain.h>
47#endif
48#endif
49
50#include <net/radix.h>
51
52int	max_keylen;
53struct radix_mask *rn_mkfreelist;
54struct radix_node_head *mask_rnhead;
55static int gotOddMasks;
56static char *maskedKey;
57static char *rn_zeros, *rn_ones;
58
59#define rn_masktop (mask_rnhead->rnh_treetop)
60#undef Bcmp
61#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
62/*
63 * The data structure for the keys is a radix tree with one way
64 * branching removed.  The index rn_b at an internal node n represents a bit
65 * position to be tested.  The tree is arranged so that all descendants
66 * of a node n have keys whose bits all agree up to position rn_b - 1.
67 * (We say the index of n is rn_b.)
68 *
69 * There is at least one descendant which has a one bit at position rn_b,
70 * and at least one with a zero there.
71 *
72 * A route is determined by a pair of key and mask.  We require that the
73 * bit-wise logical and of the key and mask to be the key.
74 * We define the index of a route to associated with the mask to be
75 * the first bit number in the mask where 0 occurs (with bit number 0
76 * representing the highest order bit).
77 *
78 * We say a mask is normal if every bit is 0, past the index of the mask.
79 * If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
80 * and m is a normal mask, then the route applies to every descendant of n.
81 * If the index(m) < rn_b, this implies the trailing last few bits of k
82 * before bit b are all 0, (and hence consequently true of every descendant
83 * of n), so the route applies to all descendants of the node as well.
84 *
85 * The present version of the code makes no use of normal routes,
86 * but similar logic shows that a non-normal mask m such that
87 * index(m) <= index(n) could potentially apply to many children of n.
88 * Thus, for each non-host route, we attach its mask to a list at an internal
89 * node as high in the tree as we can go.
90 */
91
92struct radix_node *
93rn_search(v_arg, head)
94	void *v_arg;
95	struct radix_node *head;
96{
97	register struct radix_node *x;
98	register caddr_t v;
99
100	for (x = head, v = v_arg; x->rn_b >= 0;) {
101		if (x->rn_bmask & v[x->rn_off])
102			x = x->rn_r;
103		else
104			x = x->rn_l;
105	}
106	return (x);
107};
108
109struct radix_node *
110rn_search_m(v_arg, head, m_arg)
111	struct radix_node *head;
112	void *v_arg, *m_arg;
113{
114	register struct radix_node *x;
115	register caddr_t v = v_arg, m = m_arg;
116
117	for (x = head; x->rn_b >= 0;) {
118		if ((x->rn_bmask & m[x->rn_off]) &&
119		    (x->rn_bmask & v[x->rn_off]))
120			x = x->rn_r;
121		else
122			x = x->rn_l;
123	}
124	return x;
125};
126
127int
128rn_refines(m_arg, n_arg)
129	void *m_arg, *n_arg;
130{
131	register caddr_t m = m_arg, n = n_arg;
132	register caddr_t lim, lim2 = lim = n + *(u_char *)n;
133	int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
134	int masks_are_equal = 1;
135
136	if (longer > 0)
137		lim -= longer;
138	while (n < lim) {
139		if (*n & ~(*m))
140			return 0;
141		if (*n++ != *m++)
142			masks_are_equal = 0;
143
144	}
145	while (n < lim2)
146		if (*n++)
147			return 0;
148	if (masks_are_equal && (longer < 0))
149		for (lim2 = m - longer; m < lim2; )
150			if (*m++)
151				return 1;
152	return (!masks_are_equal);
153}
154
155
156struct radix_node *
157rn_match(v_arg, head)
158	void *v_arg;
159	struct radix_node_head *head;
160{
161	caddr_t v = v_arg;
162	register struct radix_node *t = head->rnh_treetop, *x;
163	register caddr_t cp = v, cp2, cp3;
164	caddr_t cplim, mstart;
165	struct radix_node *saved_t, *top = t;
166	int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
167
168	/*
169	 * Open code rn_search(v, top) to avoid overhead of extra
170	 * subroutine call.
171	 */
172	for (; t->rn_b >= 0; ) {
173		if (t->rn_bmask & cp[t->rn_off])
174			t = t->rn_r;
175		else
176			t = t->rn_l;
177	}
178	/*
179	 * See if we match exactly as a host destination
180	 */
181	cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
182	for (; cp < cplim; cp++, cp2++)
183		if (*cp != *cp2)
184			goto on1;
185	/*
186	 * This extra grot is in case we are explicitly asked
187	 * to look up the default.  Ugh!
188	 */
189	if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
190		t = t->rn_dupedkey;
191	return t;
192on1:
193	matched_off = cp - v;
194	saved_t = t;
195	do {
196	    if (t->rn_mask) {
197		/*
198		 * Even if we don't match exactly as a hosts;
199		 * we may match if the leaf we wound up at is
200		 * a route to a net.
201		 */
202		cp3 = matched_off + t->rn_mask;
203		cp2 = matched_off + t->rn_key;
204		for (; cp < cplim; cp++)
205			if ((*cp2++ ^ *cp) & *cp3++)
206				break;
207		if (cp == cplim)
208			return t;
209		cp = matched_off + v;
210	    }
211	} while ((t = t->rn_dupedkey) != 0);
212	t = saved_t;
213	/* start searching up the tree */
214	do {
215		register struct radix_mask *m;
216		t = t->rn_p;
217		m = t->rn_mklist;
218		if (m) {
219			/*
220			 * After doing measurements here, it may
221			 * turn out to be faster to open code
222			 * rn_search_m here instead of always
223			 * copying and masking.
224			 */
225			off = min(t->rn_off, matched_off);
226			mstart = maskedKey + off;
227			do {
228				cp2 = mstart;
229				cp3 = m->rm_mask + off;
230				for (cp = v + off; cp < cplim;)
231					*cp2++ =  *cp++ & *cp3++;
232				x = rn_search(maskedKey, t);
233				while (x && x->rn_mask != m->rm_mask)
234					x = x->rn_dupedkey;
235				if (x &&
236				    (Bcmp(mstart, x->rn_key + off,
237					vlen - off) == 0))
238					    return x;
239			} while ((m = m->rm_mklist) != 0);
240		}
241	} while (t != top);
242	return 0;
243};
244
245#ifdef RN_DEBUG
246int	rn_nodenum;
247struct	radix_node *rn_clist;
248int	rn_saveinfo;
249int	rn_debug =  1;
250#endif
251
252struct radix_node *
253rn_newpair(v, b, nodes)
254	void *v;
255	int b;
256	struct radix_node nodes[2];
257{
258	register struct radix_node *tt = nodes, *t = tt + 1;
259	t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
260	t->rn_l = tt; t->rn_off = b >> 3;
261	tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t;
262	tt->rn_flags = t->rn_flags = RNF_ACTIVE;
263#ifdef RN_DEBUG
264	tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
265	tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
266#endif
267	return t;
268}
269
270struct radix_node *
271rn_insert(v_arg, head, dupentry, nodes)
272	void *v_arg;
273	struct radix_node_head *head;
274	int *dupentry;
275	struct radix_node nodes[2];
276{
277	caddr_t v = v_arg;
278	struct radix_node *top = head->rnh_treetop;
279	int head_off = top->rn_off, vlen = (int)*((u_char *)v);
280	register struct radix_node *t = rn_search(v_arg, top);
281	register caddr_t cp = v + head_off;
282	register int b;
283	struct radix_node *tt;
284    	/*
285	 *find first bit at which v and t->rn_key differ
286	 */
287    {
288	register caddr_t cp2 = t->rn_key + head_off;
289	register int cmp_res;
290	caddr_t cplim = v + vlen;
291
292	while (cp < cplim)
293		if (*cp2++ != *cp++)
294			goto on1;
295	*dupentry = 1;
296	return t;
297on1:
298	*dupentry = 0;
299	cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
300	for (b = (cp - v) << 3; cmp_res; b--)
301		cmp_res >>= 1;
302    }
303    {
304	register struct radix_node *p, *x = top;
305	cp = v;
306	do {
307		p = x;
308		if (cp[x->rn_off] & x->rn_bmask)
309			x = x->rn_r;
310		else x = x->rn_l;
311	} while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
312#ifdef RN_DEBUG
313	if (rn_debug)
314		printf("Going In:\n"), traverse(p);
315#endif
316	t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
317	if ((cp[p->rn_off] & p->rn_bmask) == 0)
318		p->rn_l = t;
319	else
320		p->rn_r = t;
321	x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
322	if ((cp[t->rn_off] & t->rn_bmask) == 0) {
323		t->rn_r = x;
324	} else {
325		t->rn_r = tt; t->rn_l = x;
326	}
327#ifdef RN_DEBUG
328	if (rn_debug)
329		printf("Coming out:\n"), traverse(p);
330#endif
331    }
332	return (tt);
333}
334
335struct radix_node *
336rn_addmask(n_arg, search, skip)
337	int search, skip;
338	void *n_arg;
339{
340	caddr_t netmask = (caddr_t)n_arg;
341	register struct radix_node *x;
342	register caddr_t cp, cplim;
343	register int b, mlen, j;
344	int maskduplicated;
345
346	mlen = *(u_char *)netmask;
347	if (search) {
348		x = rn_search(netmask, rn_masktop);
349		mlen = *(u_char *)netmask;
350		if (Bcmp(netmask, x->rn_key, mlen) == 0)
351			return (x);
352	}
353	R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
354	if (x == 0)
355		return (0);
356	Bzero(x, max_keylen + 2 * sizeof (*x));
357	cp = (caddr_t)(x + 2);
358	Bcopy(netmask, cp, mlen);
359	netmask = cp;
360	x = rn_insert(netmask, mask_rnhead, &maskduplicated, x);
361	/*
362	 * Calculate index of mask.
363	 */
364	cplim = netmask + mlen;
365	for (cp = netmask + skip; cp < cplim; cp++)
366		if (*(u_char *)cp != 0xff)
367			break;
368	b = (cp - netmask) << 3;
369	if (cp != cplim) {
370		if (*cp != 0) {
371			gotOddMasks = 1;
372			for (j = 0x80; j; b++, j >>= 1)
373				if ((j & *cp) == 0)
374					break;
375		}
376	}
377	x->rn_b = -1 - b;
378	return (x);
379}
380
381struct radix_node *
382rn_addroute(v_arg, n_arg, head, treenodes)
383	void *v_arg, *n_arg;
384	struct radix_node_head *head;
385	struct radix_node treenodes[2];
386{
387	caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
388	register struct radix_node *t, *x = 0, *tt;
389	struct radix_node *saved_tt, *top = head->rnh_treetop;
390	short b = 0, b_leaf;
391	int mlen, keyduplicated;
392	caddr_t cplim;
393	struct radix_mask *m, **mp;
394
395	/*
396	 * In dealing with non-contiguous masks, there may be
397	 * many different routes which have the same mask.
398	 * We will find it useful to have a unique pointer to
399	 * the mask to speed avoiding duplicate references at
400	 * nodes and possibly save time in calculating indices.
401	 */
402	if (netmask)  {
403		x = rn_search(netmask, rn_masktop);
404		mlen = *(u_char *)netmask;
405		if (Bcmp(netmask, x->rn_key, mlen) != 0) {
406			x = rn_addmask(netmask, 0, top->rn_off);
407			if (x == 0)
408				return (0);
409		}
410		netmask = x->rn_key;
411		b = -1 - x->rn_b;
412	}
413	/*
414	 * Deal with duplicated keys: attach node to previous instance
415	 */
416	saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
417	if (keyduplicated) {
418		do {
419			if (tt->rn_mask == netmask)
420				return (0);
421			t = tt;
422			if (netmask == 0 ||
423			    (tt->rn_mask && rn_refines(netmask, tt->rn_mask)))
424				break;
425		} while ((tt = tt->rn_dupedkey) != 0);
426		/*
427		 * If the mask is not duplicated, we wouldn't
428		 * find it among possible duplicate key entries
429		 * anyway, so the above test doesn't hurt.
430		 *
431		 * We sort the masks for a duplicated key the same way as
432		 * in a masklist -- most specific to least specific.
433		 * This may require the unfortunate nuisance of relocating
434		 * the head of the list.
435		 */
436		if (tt && t == saved_tt) {
437			struct	radix_node *xx = x;
438			/* link in at head of list */
439			(tt = treenodes)->rn_dupedkey = t;
440			tt->rn_flags = t->rn_flags;
441			tt->rn_p = x = t->rn_p;
442			if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
443			saved_tt = tt; x = xx;
444		} else {
445			(tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
446			t->rn_dupedkey = tt;
447		}
448#ifdef RN_DEBUG
449		t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
450		tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
451#endif
452		t = saved_tt;
453		tt->rn_key = (caddr_t) v;
454		tt->rn_b = -1;
455		tt->rn_flags = t->rn_flags & ~RNF_ROOT;
456	}
457	/*
458	 * Put mask in tree.
459	 */
460	if (netmask) {
461		tt->rn_mask = netmask;
462		tt->rn_b = x->rn_b;
463	}
464	t = saved_tt->rn_p;
465	b_leaf = -1 - t->rn_b;
466	if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
467	/* Promote general routes from below */
468	if (x->rn_b < 0) {
469		if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
470			MKGet(m);
471			if (m) {
472				Bzero(m, sizeof *m);
473				m->rm_b = x->rn_b;
474				m->rm_mask = x->rn_mask;
475				x->rn_mklist = t->rn_mklist = m;
476			}
477		}
478	} else if (x->rn_mklist) {
479		/*
480		 * Skip over masks whose index is > that of new node
481		 */
482		for (mp = &x->rn_mklist; (m = *mp) != 0; mp = &m->rm_mklist)
483			if (m->rm_b >= b_leaf)
484				break;
485		t->rn_mklist = m; *mp = 0;
486	}
487	/* Add new route to highest possible ancestor's list */
488	if ((netmask == 0) || (b > t->rn_b ))
489		return tt; /* can't lift at all */
490	b_leaf = tt->rn_b;
491	do {
492		x = t;
493		t = t->rn_p;
494	} while (b <= t->rn_b && x != top);
495	/*
496	 * Search through routes associated with node to
497	 * insert new route according to index.
498	 * For nodes of equal index, place more specific
499	 * masks first.
500	 */
501	cplim = netmask + mlen;
502	for (mp = &x->rn_mklist; (m = *mp) != 0; mp = &m->rm_mklist) {
503		if (m->rm_b < b_leaf)
504			continue;
505		if (m->rm_b > b_leaf)
506			break;
507		if (m->rm_mask == netmask) {
508			m->rm_refs++;
509			tt->rn_mklist = m;
510			return tt;
511		}
512		if (rn_refines(netmask, m->rm_mask))
513			break;
514	}
515	MKGet(m);
516	if (m == 0) {
517		printf("Mask for route not entered\n");
518		return (tt);
519	}
520	Bzero(m, sizeof *m);
521	m->rm_b = b_leaf;
522	m->rm_mask = netmask;
523	m->rm_mklist = *mp;
524	*mp = m;
525	tt->rn_mklist = m;
526	return tt;
527}
528
529struct radix_node *
530rn_delete(v_arg, netmask_arg, head)
531	void *v_arg, *netmask_arg;
532	struct radix_node_head *head;
533{
534	register struct radix_node *t, *p, *x, *tt;
535	struct radix_mask *m, *saved_m, **mp;
536	struct radix_node *dupedkey, *saved_tt, *top;
537	caddr_t v, netmask;
538	int b, head_off, vlen;
539
540	v = v_arg;
541	netmask = netmask_arg;
542	x = head->rnh_treetop;
543	tt = rn_search(v, x);
544	head_off = x->rn_off;
545	vlen =  *(u_char *)v;
546	saved_tt = tt;
547	top = x;
548	if (tt == 0 ||
549	    Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
550		return (0);
551	/*
552	 * Delete our route from mask lists.
553	 */
554	dupedkey = tt->rn_dupedkey;
555	if (dupedkey) {
556		if (netmask)
557			netmask = rn_search(netmask, rn_masktop)->rn_key;
558		while (tt->rn_mask != netmask)
559			if ((tt = tt->rn_dupedkey) == 0)
560				return (0);
561	}
562	if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
563		goto on1;
564	if (m->rm_mask != tt->rn_mask) {
565		printf("rn_delete: inconsistent annotation\n");
566		goto on1;
567	}
568	if (--m->rm_refs >= 0)
569		goto on1;
570	b = -1 - tt->rn_b;
571	t = saved_tt->rn_p;
572	if (b > t->rn_b)
573		goto on1; /* Wasn't lifted at all */
574	do {
575		x = t;
576		t = t->rn_p;
577	} while (b <= t->rn_b && x != top);
578	for (mp = &x->rn_mklist; (m = *mp) != 0; mp = &m->rm_mklist)
579		if (m == saved_m) {
580			*mp = m->rm_mklist;
581			MKFree(m);
582			break;
583		}
584	if (m == 0)
585		printf("rn_delete: couldn't find our annotation\n");
586on1:
587	/*
588	 * Eliminate us from tree
589	 */
590	if (tt->rn_flags & RNF_ROOT)
591		return (0);
592#ifdef RN_DEBUG
593	/* Get us out of the creation list */
594	for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
595	if (t) t->rn_ybro = tt->rn_ybro;
596#endif
597	t = tt->rn_p;
598	if (dupedkey) {
599		if (tt == saved_tt) {
600			x = dupedkey; x->rn_p = t;
601			if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
602		} else {
603			for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
604				p = p->rn_dupedkey;
605			if (p) p->rn_dupedkey = tt->rn_dupedkey;
606			else printf("rn_delete: couldn't find us\n");
607		}
608		t = tt + 1;
609		if  (t->rn_flags & RNF_ACTIVE) {
610#ifndef RN_DEBUG
611			*++x = *t; p = t->rn_p;
612#else
613			b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
614#endif
615			if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
616			x->rn_l->rn_p = x; x->rn_r->rn_p = x;
617		}
618		goto out;
619	}
620	if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
621	p = t->rn_p;
622	if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
623	x->rn_p = p;
624	/*
625	 * Demote routes attached to us.
626	 */
627	if (t->rn_mklist) {
628		if (x->rn_b >= 0) {
629			for (mp = &x->rn_mklist; (m = *mp) != 0;)
630				mp = &m->rm_mklist;
631			*mp = t->rn_mklist;
632		} else {
633			for (m = t->rn_mklist; m;) {
634				struct radix_mask *mm = m->rm_mklist;
635				if (m == x->rn_mklist && (--(m->rm_refs) < 0)) {
636					x->rn_mklist = 0;
637					MKFree(m);
638				} else
639					printf("%s %p at %p\n",
640					    "rn_delete: Orphaned Mask", m, x);
641				m = mm;
642			}
643		}
644	}
645	/*
646	 * We may be holding an active internal node in the tree.
647	 */
648	x = tt + 1;
649	if (t != x) {
650#ifndef RN_DEBUG
651		*t = *x;
652#else
653		b = t->rn_info; *t = *x; t->rn_info = b;
654#endif
655		t->rn_l->rn_p = t; t->rn_r->rn_p = t;
656		p = x->rn_p;
657		if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
658	}
659out:
660	tt->rn_flags &= ~RNF_ACTIVE;
661	tt[1].rn_flags &= ~RNF_ACTIVE;
662	return (tt);
663}
664
665int
666rn_walktree(h, f, w)
667	struct radix_node_head *h;
668	register int (*f)();
669	void *w;
670{
671	int error;
672	struct radix_node *base, *next;
673	register struct radix_node *rn = h->rnh_treetop;
674	/*
675	 * This gets complicated because we may delete the node
676	 * while applying the function f to it, so we need to calculate
677	 * the successor node in advance.
678	 */
679	/* First time through node, go left */
680	while (rn->rn_b >= 0)
681		rn = rn->rn_l;
682	for (;;) {
683		base = rn;
684		/* If at right child go back up, otherwise, go right */
685		while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
686			rn = rn->rn_p;
687		/* Find the next *leaf* since next node might vanish, too */
688		for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
689			rn = rn->rn_l;
690		next = rn;
691		/* Process leaves */
692		while ((rn = base) != 0) {
693			base = rn->rn_dupedkey;
694			if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
695				return (error);
696		}
697		rn = next;
698		if (rn->rn_flags & RNF_ROOT)
699			return (0);
700	}
701	/* NOTREACHED */
702}
703
704int
705rn_inithead(head, off)
706	void **head;
707	int off;
708{
709	register struct radix_node_head *rnh;
710	register struct radix_node *t, *tt, *ttt;
711	if (*head)
712		return (1);
713	R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
714	if (rnh == 0)
715		return (0);
716	Bzero(rnh, sizeof (*rnh));
717	*head = rnh;
718	t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
719	ttt = rnh->rnh_nodes + 2;
720	t->rn_r = ttt;
721	t->rn_p = t;
722	tt = t->rn_l;
723	tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
724	tt->rn_b = -1 - off;
725	*ttt = *tt;
726	ttt->rn_key = rn_ones;
727	rnh->rnh_addaddr = rn_addroute;
728	rnh->rnh_deladdr = rn_delete;
729	rnh->rnh_matchaddr = rn_match;
730	rnh->rnh_walktree = rn_walktree;
731	rnh->rnh_treetop = t;
732	return (1);
733}
734
735void
736rn_init()
737{
738	char *cp, *cplim;
739#ifdef KERNEL
740	struct domain *dom;
741
742	for (dom = domains; dom; dom = dom->dom_next)
743		if (dom->dom_maxrtkey > max_keylen)
744			max_keylen = dom->dom_maxrtkey;
745#endif
746	if (max_keylen == 0) {
747		printf("rn_init: radix functions require max_keylen be set\n");
748		return;
749	}
750	R_Malloc(rn_zeros, char *, 3 * max_keylen);
751	if (rn_zeros == NULL)
752		panic("rn_init");
753	Bzero(rn_zeros, 3 * max_keylen);
754	rn_ones = cp = rn_zeros + max_keylen;
755	maskedKey = cplim = rn_ones + max_keylen;
756	while (cp < cplim)
757		*cp++ = -1;
758	if (rn_inithead((void **)&mask_rnhead, 0) == 0)
759		panic("rn_init 2");
760}
761