gencode.c revision 39294
1/*
2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 */
21#ifndef lint
22static const char rcsid[] =
23    "@(#) $Header: gencode.c,v 1.94 98/07/12 13:06:49 leres Exp $ (LBL)";
24#endif
25
26#include <sys/types.h>
27#include <sys/socket.h>
28#include <sys/time.h>
29
30#if __STDC__
31struct mbuf;
32struct rtentry;
33#endif
34
35#include <net/if.h>
36#include <net/ethernet.h>
37
38#include <netinet/in.h>
39
40#include <stdlib.h>
41#include <memory.h>
42#include <setjmp.h>
43#include <net/if_llc.h>
44#if __STDC__
45#include <stdarg.h>
46#else
47#include <varargs.h>
48#endif
49
50#include "pcap-int.h"
51
52#include "ethertype.h"
53#include "nlpid.h"
54#include "gencode.h"
55#include "ppp.h"
56#include <pcap-namedb.h>
57
58#include "gnuc.h"
59#ifdef HAVE_OS_PROTO_H
60#include "os-proto.h"
61#endif
62
63#define JMP(c) ((c)|BPF_JMP|BPF_K)
64
65/* Locals */
66static jmp_buf top_ctx;
67static pcap_t *bpf_pcap;
68
69/* XXX */
70#ifdef PCAP_FDDIPAD
71int	pcap_fddipad = PCAP_FDDIPAD;
72#else
73int	pcap_fddipad;
74#endif
75
76/* VARARGS */
77__dead void
78#if __STDC__
79bpf_error(const char *fmt, ...)
80#else
81bpf_error(fmt, va_alist)
82	const char *fmt;
83	va_dcl
84#endif
85{
86	va_list ap;
87
88#if __STDC__
89	va_start(ap, fmt);
90#else
91	va_start(ap);
92#endif
93	if (bpf_pcap != NULL)
94		(void)vsprintf(pcap_geterr(bpf_pcap), fmt, ap);
95	va_end(ap);
96	longjmp(top_ctx, 1);
97	/* NOTREACHED */
98}
99
100static void init_linktype(int);
101
102static int alloc_reg(void);
103static void free_reg(int);
104
105static struct block *root;
106
107/*
108 * We divy out chunks of memory rather than call malloc each time so
109 * we don't have to worry about leaking memory.  It's probably
110 * not a big deal if all this memory was wasted but it this ever
111 * goes into a library that would probably not be a good idea.
112 */
113#define NCHUNKS 16
114#define CHUNK0SIZE 1024
115struct chunk {
116	u_int n_left;
117	void *m;
118};
119
120static struct chunk chunks[NCHUNKS];
121static int cur_chunk;
122
123static void *newchunk(u_int);
124static void freechunks(void);
125static inline struct block *new_block(int);
126static inline struct slist *new_stmt(int);
127static struct block *gen_retblk(int);
128static inline void syntax(void);
129
130static void backpatch(struct block *, struct block *);
131static void merge(struct block *, struct block *);
132static struct block *gen_cmp(u_int, u_int, bpf_int32);
133static struct block *gen_mcmp(u_int, u_int, bpf_int32, bpf_u_int32);
134static struct block *gen_bcmp(u_int, u_int, const u_char *);
135static struct block *gen_uncond(int);
136static inline struct block *gen_true(void);
137static inline struct block *gen_false(void);
138static struct block *gen_linktype(int);
139static struct block *gen_hostop(bpf_u_int32, bpf_u_int32, int, int, u_int, u_int);
140static struct block *gen_ehostop(const u_char *, int);
141static struct block *gen_fhostop(const u_char *, int);
142static struct block *gen_dnhostop(bpf_u_int32, int, u_int);
143static struct block *gen_host(bpf_u_int32, bpf_u_int32, int, int);
144static struct block *gen_gateway(const u_char *, bpf_u_int32 **, int, int);
145static struct block *gen_ipfrag(void);
146static struct block *gen_portatom(int, bpf_int32);
147struct block *gen_portop(int, int, int);
148static struct block *gen_port(int, int, int);
149static int lookup_proto(const char *, int);
150static struct block *gen_proto(int, int, int);
151static struct slist *xfer_to_x(struct arth *);
152static struct slist *xfer_to_a(struct arth *);
153static struct block *gen_len(int, int);
154
155static void *
156newchunk(n)
157	u_int n;
158{
159	struct chunk *cp;
160	int k, size;
161
162	/* XXX Round up to nearest long. */
163	n = (n + sizeof(long) - 1) & ~(sizeof(long) - 1);
164
165	cp = &chunks[cur_chunk];
166	if (n > cp->n_left) {
167		++cp, k = ++cur_chunk;
168		if (k >= NCHUNKS)
169			bpf_error("out of memory");
170		size = CHUNK0SIZE << k;
171		cp->m = (void *)malloc(size);
172		memset((char *)cp->m, 0, size);
173		cp->n_left = size;
174		if (n > size)
175			bpf_error("out of memory");
176	}
177	cp->n_left -= n;
178	return (void *)((char *)cp->m + cp->n_left);
179}
180
181static void
182freechunks()
183{
184	int i;
185
186	cur_chunk = 0;
187	for (i = 0; i < NCHUNKS; ++i)
188		if (chunks[i].m != NULL) {
189			free(chunks[i].m);
190			chunks[i].m = NULL;
191		}
192}
193
194/*
195 * A strdup whose allocations are freed after code generation is over.
196 */
197char *
198sdup(s)
199	register const char *s;
200{
201	int n = strlen(s) + 1;
202	char *cp = newchunk(n);
203
204	strcpy(cp, s);
205	return (cp);
206}
207
208static inline struct block *
209new_block(code)
210	int code;
211{
212	struct block *p;
213
214	p = (struct block *)newchunk(sizeof(*p));
215	p->s.code = code;
216	p->head = p;
217
218	return p;
219}
220
221static inline struct slist *
222new_stmt(code)
223	int code;
224{
225	struct slist *p;
226
227	p = (struct slist *)newchunk(sizeof(*p));
228	p->s.code = code;
229
230	return p;
231}
232
233static struct block *
234gen_retblk(v)
235	int v;
236{
237	struct block *b = new_block(BPF_RET|BPF_K);
238
239	b->s.k = v;
240	return b;
241}
242
243static inline void
244syntax()
245{
246	bpf_error("syntax error in filter expression");
247}
248
249static bpf_u_int32 netmask;
250static int snaplen;
251
252int
253pcap_compile(pcap_t *p, struct bpf_program *program,
254	     char *buf, int optimize, bpf_u_int32 mask)
255{
256	extern int n_errors;
257	int len;
258
259	n_errors = 0;
260	root = NULL;
261	bpf_pcap = p;
262	if (setjmp(top_ctx)) {
263		freechunks();
264		return (-1);
265	}
266
267	netmask = mask;
268	snaplen = pcap_snapshot(p);
269
270	lex_init(buf ? buf : "");
271	init_linktype(pcap_datalink(p));
272	(void)pcap_parse();
273
274	if (n_errors)
275		syntax();
276
277	if (root == NULL)
278		root = gen_retblk(snaplen);
279
280	if (optimize) {
281		bpf_optimize(&root);
282		if (root == NULL ||
283		    (root->s.code == (BPF_RET|BPF_K) && root->s.k == 0))
284			bpf_error("expression rejects all packets");
285	}
286	program->bf_insns = icode_to_fcode(root, &len);
287	program->bf_len = len;
288
289	freechunks();
290	return (0);
291}
292
293/*
294 * Backpatch the blocks in 'list' to 'target'.  The 'sense' field indicates
295 * which of the jt and jf fields has been resolved and which is a pointer
296 * back to another unresolved block (or nil).  At least one of the fields
297 * in each block is already resolved.
298 */
299static void
300backpatch(list, target)
301	struct block *list, *target;
302{
303	struct block *next;
304
305	while (list) {
306		if (!list->sense) {
307			next = JT(list);
308			JT(list) = target;
309		} else {
310			next = JF(list);
311			JF(list) = target;
312		}
313		list = next;
314	}
315}
316
317/*
318 * Merge the lists in b0 and b1, using the 'sense' field to indicate
319 * which of jt and jf is the link.
320 */
321static void
322merge(b0, b1)
323	struct block *b0, *b1;
324{
325	register struct block **p = &b0;
326
327	/* Find end of list. */
328	while (*p)
329		p = !((*p)->sense) ? &JT(*p) : &JF(*p);
330
331	/* Concatenate the lists. */
332	*p = b1;
333}
334
335void
336finish_parse(p)
337	struct block *p;
338{
339	backpatch(p, gen_retblk(snaplen));
340	p->sense = !p->sense;
341	backpatch(p, gen_retblk(0));
342	root = p->head;
343}
344
345void
346gen_and(b0, b1)
347	struct block *b0, *b1;
348{
349	backpatch(b0, b1->head);
350	b0->sense = !b0->sense;
351	b1->sense = !b1->sense;
352	merge(b1, b0);
353	b1->sense = !b1->sense;
354	b1->head = b0->head;
355}
356
357void
358gen_or(b0, b1)
359	struct block *b0, *b1;
360{
361	b0->sense = !b0->sense;
362	backpatch(b0, b1->head);
363	b0->sense = !b0->sense;
364	merge(b1, b0);
365	b1->head = b0->head;
366}
367
368void
369gen_not(b)
370	struct block *b;
371{
372	b->sense = !b->sense;
373}
374
375static struct block *
376gen_cmp(offset, size, v)
377	u_int offset, size;
378	bpf_int32 v;
379{
380	struct slist *s;
381	struct block *b;
382
383	s = new_stmt(BPF_LD|BPF_ABS|size);
384	s->s.k = offset;
385
386	b = new_block(JMP(BPF_JEQ));
387	b->stmts = s;
388	b->s.k = v;
389
390	return b;
391}
392
393static struct block *
394gen_mcmp(offset, size, v, mask)
395	u_int offset, size;
396	bpf_int32 v;
397	bpf_u_int32 mask;
398{
399	struct block *b = gen_cmp(offset, size, v);
400	struct slist *s;
401
402	if (mask != 0xffffffff) {
403		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
404		s->s.k = mask;
405		b->stmts->next = s;
406	}
407	return b;
408}
409
410static struct block *
411gen_bcmp(offset, size, v)
412	register u_int offset, size;
413	register const u_char *v;
414{
415	register struct block *b, *tmp;
416
417	b = NULL;
418	while (size >= 4) {
419		register const u_char *p = &v[size - 4];
420		bpf_int32 w = ((bpf_int32)p[0] << 24) |
421		    ((bpf_int32)p[1] << 16) | ((bpf_int32)p[2] << 8) | p[3];
422
423		tmp = gen_cmp(offset + size - 4, BPF_W, w);
424		if (b != NULL)
425			gen_and(b, tmp);
426		b = tmp;
427		size -= 4;
428	}
429	while (size >= 2) {
430		register const u_char *p = &v[size - 2];
431		bpf_int32 w = ((bpf_int32)p[0] << 8) | p[1];
432
433		tmp = gen_cmp(offset + size - 2, BPF_H, w);
434		if (b != NULL)
435			gen_and(b, tmp);
436		b = tmp;
437		size -= 2;
438	}
439	if (size > 0) {
440		tmp = gen_cmp(offset, BPF_B, (bpf_int32)v[0]);
441		if (b != NULL)
442			gen_and(b, tmp);
443		b = tmp;
444	}
445	return b;
446}
447
448/*
449 * Various code constructs need to know the layout of the data link
450 * layer.  These variables give the necessary offsets.  off_linktype
451 * is set to -1 for no encapsulation, in which case, IP is assumed.
452 */
453static u_int off_linktype;
454static u_int off_nl;
455static int linktype;
456
457static void
458init_linktype(type)
459	int type;
460{
461	linktype = type;
462
463	switch (type) {
464
465	case DLT_EN10MB:
466		off_linktype = 12;
467		off_nl = 14;
468		return;
469
470	case DLT_SLIP:
471		/*
472		 * SLIP doesn't have a link level type.  The 16 byte
473		 * header is hacked into our SLIP driver.
474		 */
475		off_linktype = -1;
476		off_nl = 16;
477		return;
478
479	case DLT_SLIP_BSDOS:
480		/* XXX this may be the same as the DLT_PPP_BSDOS case */
481		off_linktype = -1;
482		/* XXX end */
483		off_nl = 24;
484		return;
485
486	case DLT_NULL:
487		off_linktype = 0;
488		off_nl = 4;
489		return;
490
491	case DLT_PPP:
492		off_linktype = 2;
493		off_nl = 4;
494		return;
495
496	case DLT_PPP_BSDOS:
497		off_linktype = 5;
498		off_nl = 24;
499		return;
500
501	case DLT_FDDI:
502		/*
503		 * FDDI doesn't really have a link-level type field.
504		 * We assume that SSAP = SNAP is being used and pick
505		 * out the encapsulated Ethernet type.
506		 */
507		off_linktype = 19;
508#ifdef PCAP_FDDIPAD
509		off_linktype += pcap_fddipad;
510#endif
511		off_nl = 21;
512#ifdef PCAP_FDDIPAD
513		off_nl += pcap_fddipad;
514#endif
515		return;
516
517	case DLT_IEEE802:
518		off_linktype = 20;
519		off_nl = 22;
520		return;
521
522	case DLT_ATM_RFC1483:
523		/*
524		 * assume routed, non-ISO PDUs
525		 * (i.e., LLC = 0xAA-AA-03, OUT = 0x00-00-00)
526		 */
527		off_linktype = 6;
528		off_nl = 8;
529		return;
530
531	case DLT_RAW:
532		off_linktype = -1;
533		off_nl = 0;
534		return;
535	}
536	bpf_error("unknown data link type 0x%x", linktype);
537	/* NOTREACHED */
538}
539
540static struct block *
541gen_uncond(rsense)
542	int rsense;
543{
544	struct block *b;
545	struct slist *s;
546
547	s = new_stmt(BPF_LD|BPF_IMM);
548	s->s.k = !rsense;
549	b = new_block(JMP(BPF_JEQ));
550	b->stmts = s;
551
552	return b;
553}
554
555static inline struct block *
556gen_true()
557{
558	return gen_uncond(1);
559}
560
561static inline struct block *
562gen_false()
563{
564	return gen_uncond(0);
565}
566
567static struct block *
568gen_linktype(proto)
569	register int proto;
570{
571	struct block *b0, *b1;
572
573	/* If we're not using encapsulation and checking for IP, we're done */
574	if (off_linktype == -1 && proto == ETHERTYPE_IP)
575		return gen_true();
576
577	switch (linktype) {
578
579	case DLT_SLIP:
580		return gen_false();
581
582	case DLT_PPP:
583		if (proto == ETHERTYPE_IP)
584			proto = PPP_IP;			/* XXX was 0x21 */
585		break;
586
587	case DLT_PPP_BSDOS:
588		switch (proto) {
589
590		case ETHERTYPE_IP:
591			b0 = gen_cmp(off_linktype, BPF_H, PPP_IP);
592			b1 = gen_cmp(off_linktype, BPF_H, PPP_VJC);
593			gen_or(b0, b1);
594			b0 = gen_cmp(off_linktype, BPF_H, PPP_VJNC);
595			gen_or(b1, b0);
596			return b0;
597
598		case ETHERTYPE_DN:
599			proto = PPP_DECNET;
600			break;
601
602		case ETHERTYPE_ATALK:
603			proto = PPP_APPLE;
604			break;
605
606		case ETHERTYPE_NS:
607			proto = PPP_NS;
608			break;
609		}
610		break;
611
612	case DLT_NULL:
613		/* XXX */
614		if (proto == ETHERTYPE_IP)
615			return (gen_cmp(0, BPF_W, (bpf_int32)htonl(AF_INET)));
616		else
617			return gen_false();
618	case DLT_EN10MB:
619		/*
620	 	 * Having to look at SAP's here is quite disgusting,
621		 * but given an internal architecture that _knows_ that
622		 * it's looking at IP on Ethernet...
623		 */
624		if (proto == LLC_ISO_LSAP) {
625			struct block *b0, *b1;
626
627			b0 = gen_cmp(off_linktype, BPF_H, (long)ETHERMTU);
628			b0->s.code = JMP(BPF_JGT);
629			gen_not(b0);
630			b1 = gen_cmp(off_linktype + 2, BPF_H, (long)
631				     ((LLC_ISO_LSAP << 8) | LLC_ISO_LSAP));
632			gen_and(b0, b1);
633			return b1;
634		}
635		break;
636	}
637	return gen_cmp(off_linktype, BPF_H, (bpf_int32)proto);
638}
639
640static struct block *
641gen_hostop(addr, mask, dir, proto, src_off, dst_off)
642	bpf_u_int32 addr;
643	bpf_u_int32 mask;
644	int dir, proto;
645	u_int src_off, dst_off;
646{
647	struct block *b0, *b1;
648	u_int offset;
649
650	switch (dir) {
651
652	case Q_SRC:
653		offset = src_off;
654		break;
655
656	case Q_DST:
657		offset = dst_off;
658		break;
659
660	case Q_AND:
661		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
662		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
663		gen_and(b0, b1);
664		return b1;
665
666	case Q_OR:
667	case Q_DEFAULT:
668		b0 = gen_hostop(addr, mask, Q_SRC, proto, src_off, dst_off);
669		b1 = gen_hostop(addr, mask, Q_DST, proto, src_off, dst_off);
670		gen_or(b0, b1);
671		return b1;
672
673	default:
674		abort();
675	}
676	b0 = gen_linktype(proto);
677	b1 = gen_mcmp(offset, BPF_W, (bpf_int32)addr, mask);
678	gen_and(b0, b1);
679	return b1;
680}
681
682static struct block *
683gen_ehostop(eaddr, dir)
684	register const u_char *eaddr;
685	register int dir;
686{
687	register struct block *b0, *b1;
688
689	switch (dir) {
690	case Q_SRC:
691		return gen_bcmp(6, 6, eaddr);
692
693	case Q_DST:
694		return gen_bcmp(0, 6, eaddr);
695
696	case Q_AND:
697		b0 = gen_ehostop(eaddr, Q_SRC);
698		b1 = gen_ehostop(eaddr, Q_DST);
699		gen_and(b0, b1);
700		return b1;
701
702	case Q_DEFAULT:
703	case Q_OR:
704		b0 = gen_ehostop(eaddr, Q_SRC);
705		b1 = gen_ehostop(eaddr, Q_DST);
706		gen_or(b0, b1);
707		return b1;
708	}
709	abort();
710	/* NOTREACHED */
711}
712
713/*
714 * Like gen_ehostop, but for DLT_FDDI
715 */
716static struct block *
717gen_fhostop(eaddr, dir)
718	register const u_char *eaddr;
719	register int dir;
720{
721	struct block *b0, *b1;
722
723	switch (dir) {
724	case Q_SRC:
725#ifdef PCAP_FDDIPAD
726		return gen_bcmp(6 + 1 + pcap_fddipad, 6, eaddr);
727#else
728		return gen_bcmp(6 + 1, 6, eaddr);
729#endif
730
731	case Q_DST:
732#ifdef PCAP_FDDIPAD
733		return gen_bcmp(0 + 1 + pcap_fddipad, 6, eaddr);
734#else
735		return gen_bcmp(0 + 1, 6, eaddr);
736#endif
737
738	case Q_AND:
739		b0 = gen_fhostop(eaddr, Q_SRC);
740		b1 = gen_fhostop(eaddr, Q_DST);
741		gen_and(b0, b1);
742		return b1;
743
744	case Q_DEFAULT:
745	case Q_OR:
746		b0 = gen_fhostop(eaddr, Q_SRC);
747		b1 = gen_fhostop(eaddr, Q_DST);
748		gen_or(b0, b1);
749		return b1;
750	}
751	abort();
752	/* NOTREACHED */
753}
754
755/*
756 * This is quite tricky because there may be pad bytes in front of the
757 * DECNET header, and then there are two possible data packet formats that
758 * carry both src and dst addresses, plus 5 packet types in a format that
759 * carries only the src node, plus 2 types that use a different format and
760 * also carry just the src node.
761 *
762 * Yuck.
763 *
764 * Instead of doing those all right, we just look for data packets with
765 * 0 or 1 bytes of padding.  If you want to look at other packets, that
766 * will require a lot more hacking.
767 *
768 * To add support for filtering on DECNET "areas" (network numbers)
769 * one would want to add a "mask" argument to this routine.  That would
770 * make the filter even more inefficient, although one could be clever
771 * and not generate masking instructions if the mask is 0xFFFF.
772 */
773static struct block *
774gen_dnhostop(addr, dir, base_off)
775	bpf_u_int32 addr;
776	int dir;
777	u_int base_off;
778{
779	struct block *b0, *b1, *b2, *tmp;
780	u_int offset_lh;	/* offset if long header is received */
781	u_int offset_sh;	/* offset if short header is received */
782
783	switch (dir) {
784
785	case Q_DST:
786		offset_sh = 1;	/* follows flags */
787		offset_lh = 7;	/* flgs,darea,dsubarea,HIORD */
788		break;
789
790	case Q_SRC:
791		offset_sh = 3;	/* follows flags, dstnode */
792		offset_lh = 15;	/* flgs,darea,dsubarea,did,sarea,ssub,HIORD */
793		break;
794
795	case Q_AND:
796		/* Inefficient because we do our Calvinball dance twice */
797		b0 = gen_dnhostop(addr, Q_SRC, base_off);
798		b1 = gen_dnhostop(addr, Q_DST, base_off);
799		gen_and(b0, b1);
800		return b1;
801
802	case Q_OR:
803	case Q_DEFAULT:
804		/* Inefficient because we do our Calvinball dance twice */
805		b0 = gen_dnhostop(addr, Q_SRC, base_off);
806		b1 = gen_dnhostop(addr, Q_DST, base_off);
807		gen_or(b0, b1);
808		return b1;
809
810	default:
811		abort();
812	}
813	b0 = gen_linktype(ETHERTYPE_DN);
814	/* Check for pad = 1, long header case */
815	tmp = gen_mcmp(base_off + 2, BPF_H,
816	    (bpf_int32)ntohs(0x0681), (bpf_int32)ntohs(0x07FF));
817	b1 = gen_cmp(base_off + 2 + 1 + offset_lh,
818	    BPF_H, (bpf_int32)ntohs(addr));
819	gen_and(tmp, b1);
820	/* Check for pad = 0, long header case */
821	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x06, (bpf_int32)0x7);
822	b2 = gen_cmp(base_off + 2 + offset_lh, BPF_H, (bpf_int32)ntohs(addr));
823	gen_and(tmp, b2);
824	gen_or(b2, b1);
825	/* Check for pad = 1, short header case */
826	tmp = gen_mcmp(base_off + 2, BPF_H,
827	    (bpf_int32)ntohs(0x0281), (bpf_int32)ntohs(0x07FF));
828	b2 = gen_cmp(base_off + 2 + 1 + offset_sh,
829	    BPF_H, (bpf_int32)ntohs(addr));
830	gen_and(tmp, b2);
831	gen_or(b2, b1);
832	/* Check for pad = 0, short header case */
833	tmp = gen_mcmp(base_off + 2, BPF_B, (bpf_int32)0x02, (bpf_int32)0x7);
834	b2 = gen_cmp(base_off + 2 + offset_sh, BPF_H, (bpf_int32)ntohs(addr));
835	gen_and(tmp, b2);
836	gen_or(b2, b1);
837
838	/* Combine with test for linktype */
839	gen_and(b0, b1);
840	return b1;
841}
842
843static struct block *
844gen_host(addr, mask, proto, dir)
845	bpf_u_int32 addr;
846	bpf_u_int32 mask;
847	int proto;
848	int dir;
849{
850	struct block *b0, *b1;
851
852	switch (proto) {
853
854	case Q_DEFAULT:
855		b0 = gen_host(addr, mask, Q_IP, dir);
856		b1 = gen_host(addr, mask, Q_ARP, dir);
857		gen_or(b0, b1);
858		b0 = gen_host(addr, mask, Q_RARP, dir);
859		gen_or(b1, b0);
860		return b0;
861
862	case Q_IP:
863		return gen_hostop(addr, mask, dir, ETHERTYPE_IP,
864				  off_nl + 12, off_nl + 16);
865
866	case Q_RARP:
867		return gen_hostop(addr, mask, dir, ETHERTYPE_REVARP,
868				  off_nl + 14, off_nl + 24);
869
870	case Q_ARP:
871		return gen_hostop(addr, mask, dir, ETHERTYPE_ARP,
872				  off_nl + 14, off_nl + 24);
873
874	case Q_TCP:
875		bpf_error("'tcp' modifier applied to host");
876
877	case Q_UDP:
878		bpf_error("'udp' modifier applied to host");
879
880	case Q_ICMP:
881		bpf_error("'icmp' modifier applied to host");
882
883	case Q_IGMP:
884		bpf_error("'igmp' modifier applied to host");
885
886	case Q_IGRP:
887		bpf_error("'igrp' modifier applied to host");
888
889	case Q_ATALK:
890		bpf_error("ATALK host filtering not implemented");
891
892	case Q_DECNET:
893		return gen_dnhostop(addr, dir, off_nl);
894
895	case Q_SCA:
896		bpf_error("SCA host filtering not implemented");
897
898	case Q_LAT:
899		bpf_error("LAT host filtering not implemented");
900
901	case Q_MOPDL:
902		bpf_error("MOPDL host filtering not implemented");
903
904	case Q_MOPRC:
905		bpf_error("MOPRC host filtering not implemented");
906
907	case Q_ISO:
908	        bpf_error("ISO host filtering not implemented");
909
910	default:
911		abort();
912	}
913	/* NOTREACHED */
914}
915
916static struct block *
917gen_gateway(eaddr, alist, proto, dir)
918	const u_char *eaddr;
919	bpf_u_int32 **alist;
920	int proto;
921	int dir;
922{
923	struct block *b0, *b1, *tmp;
924
925	if (dir != 0)
926		bpf_error("direction applied to 'gateway'");
927
928	switch (proto) {
929	case Q_DEFAULT:
930	case Q_IP:
931	case Q_ARP:
932	case Q_RARP:
933		if (linktype == DLT_EN10MB)
934			b0 = gen_ehostop(eaddr, Q_OR);
935		else if (linktype == DLT_FDDI)
936			b0 = gen_fhostop(eaddr, Q_OR);
937		else
938			bpf_error(
939			    "'gateway' supported only on ethernet or FDDI");
940
941		b1 = gen_host(**alist++, 0xffffffff, proto, Q_OR);
942		while (*alist) {
943			tmp = gen_host(**alist++, 0xffffffff, proto, Q_OR);
944			gen_or(b1, tmp);
945			b1 = tmp;
946		}
947		gen_not(b1);
948		gen_and(b0, b1);
949		return b1;
950	}
951	bpf_error("illegal modifier of 'gateway'");
952	/* NOTREACHED */
953}
954
955struct block *
956gen_proto_abbrev(proto)
957	int proto;
958{
959	struct block *b0, *b1;
960
961	switch (proto) {
962
963	case Q_TCP:
964		b0 = gen_linktype(ETHERTYPE_IP);
965		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_TCP);
966		gen_and(b0, b1);
967		break;
968
969	case Q_UDP:
970		b0 =  gen_linktype(ETHERTYPE_IP);
971		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_UDP);
972		gen_and(b0, b1);
973		break;
974
975	case Q_ICMP:
976		b0 =  gen_linktype(ETHERTYPE_IP);
977		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)IPPROTO_ICMP);
978		gen_and(b0, b1);
979		break;
980
981	case Q_IGMP:
982		b0 =  gen_linktype(ETHERTYPE_IP);
983		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)2);
984		gen_and(b0, b1);
985		break;
986
987#ifndef	IPPROTO_IGRP
988#define	IPPROTO_IGRP	9
989#endif
990	case Q_IGRP:
991		b0 = gen_linktype(ETHERTYPE_IP);
992		b1 = gen_cmp(off_nl + 9, BPF_B, (long)IPPROTO_IGRP);
993		gen_and(b0, b1);
994		break;
995
996	case Q_IP:
997		b1 =  gen_linktype(ETHERTYPE_IP);
998		break;
999
1000	case Q_ARP:
1001		b1 =  gen_linktype(ETHERTYPE_ARP);
1002		break;
1003
1004	case Q_RARP:
1005		b1 =  gen_linktype(ETHERTYPE_REVARP);
1006		break;
1007
1008	case Q_LINK:
1009		bpf_error("link layer applied in wrong context");
1010
1011	case Q_ATALK:
1012		b1 =  gen_linktype(ETHERTYPE_ATALK);
1013		break;
1014
1015	case Q_DECNET:
1016		b1 =  gen_linktype(ETHERTYPE_DN);
1017		break;
1018
1019	case Q_SCA:
1020		b1 =  gen_linktype(ETHERTYPE_SCA);
1021		break;
1022
1023	case Q_LAT:
1024		b1 =  gen_linktype(ETHERTYPE_LAT);
1025		break;
1026
1027	case Q_MOPDL:
1028		b1 =  gen_linktype(ETHERTYPE_MOPDL);
1029		break;
1030
1031	case Q_MOPRC:
1032		b1 =  gen_linktype(ETHERTYPE_MOPRC);
1033		break;
1034
1035	case Q_ISO:
1036	        b1 = gen_linktype(LLC_ISO_LSAP);
1037		break;
1038
1039	case Q_ESIS:
1040	        b1 = gen_proto(ISO9542_ESIS, Q_ISO, Q_DEFAULT);
1041		break;
1042
1043	case Q_ISIS:
1044	        b1 = gen_proto(ISO10589_ISIS, Q_ISO, Q_DEFAULT);
1045		break;
1046
1047	default:
1048		abort();
1049	}
1050	return b1;
1051}
1052
1053static struct block *
1054gen_ipfrag()
1055{
1056	struct slist *s;
1057	struct block *b;
1058
1059	/* not ip frag */
1060	s = new_stmt(BPF_LD|BPF_H|BPF_ABS);
1061	s->s.k = off_nl + 6;
1062	b = new_block(JMP(BPF_JSET));
1063	b->s.k = 0x1fff;
1064	b->stmts = s;
1065	gen_not(b);
1066
1067	return b;
1068}
1069
1070static struct block *
1071gen_portatom(off, v)
1072	int off;
1073	bpf_int32 v;
1074{
1075	struct slist *s;
1076	struct block *b;
1077
1078	s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1079	s->s.k = off_nl;
1080
1081	s->next = new_stmt(BPF_LD|BPF_IND|BPF_H);
1082	s->next->s.k = off_nl + off;
1083
1084	b = new_block(JMP(BPF_JEQ));
1085	b->stmts = s;
1086	b->s.k = v;
1087
1088	return b;
1089}
1090
1091struct block *
1092gen_portop(port, proto, dir)
1093	int port, proto, dir;
1094{
1095	struct block *b0, *b1, *tmp;
1096
1097	/* ip proto 'proto' */
1098	tmp = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)proto);
1099	b0 = gen_ipfrag();
1100	gen_and(tmp, b0);
1101
1102	switch (dir) {
1103	case Q_SRC:
1104		b1 = gen_portatom(0, (bpf_int32)port);
1105		break;
1106
1107	case Q_DST:
1108		b1 = gen_portatom(2, (bpf_int32)port);
1109		break;
1110
1111	case Q_OR:
1112	case Q_DEFAULT:
1113		tmp = gen_portatom(0, (bpf_int32)port);
1114		b1 = gen_portatom(2, (bpf_int32)port);
1115		gen_or(tmp, b1);
1116		break;
1117
1118	case Q_AND:
1119		tmp = gen_portatom(0, (bpf_int32)port);
1120		b1 = gen_portatom(2, (bpf_int32)port);
1121		gen_and(tmp, b1);
1122		break;
1123
1124	default:
1125		abort();
1126	}
1127	gen_and(b0, b1);
1128
1129	return b1;
1130}
1131
1132static struct block *
1133gen_port(port, ip_proto, dir)
1134	int port;
1135	int ip_proto;
1136	int dir;
1137{
1138	struct block *b0, *b1, *tmp;
1139
1140	/* ether proto ip */
1141	b0 =  gen_linktype(ETHERTYPE_IP);
1142
1143	switch (ip_proto) {
1144	case IPPROTO_UDP:
1145	case IPPROTO_TCP:
1146		b1 = gen_portop(port, ip_proto, dir);
1147		break;
1148
1149	case PROTO_UNDEF:
1150		tmp = gen_portop(port, IPPROTO_TCP, dir);
1151		b1 = gen_portop(port, IPPROTO_UDP, dir);
1152		gen_or(tmp, b1);
1153		break;
1154
1155	default:
1156		abort();
1157	}
1158	gen_and(b0, b1);
1159	return b1;
1160}
1161
1162static int
1163lookup_proto(name, proto)
1164	register const char *name;
1165	register int proto;
1166{
1167	register int v;
1168
1169	switch (proto) {
1170
1171	case Q_DEFAULT:
1172	case Q_IP:
1173		v = pcap_nametoproto(name);
1174		if (v == PROTO_UNDEF)
1175			bpf_error("unknown ip proto '%s'", name);
1176		break;
1177
1178	case Q_LINK:
1179		/* XXX should look up h/w protocol type based on linktype */
1180		v = pcap_nametoeproto(name);
1181		if (v == PROTO_UNDEF)
1182			bpf_error("unknown ether proto '%s'", name);
1183		break;
1184
1185	default:
1186		v = PROTO_UNDEF;
1187		break;
1188	}
1189	return v;
1190}
1191
1192static struct block *
1193gen_proto(v, proto, dir)
1194	int v;
1195	int proto;
1196	int dir;
1197{
1198	struct block *b0, *b1;
1199
1200	if (dir != Q_DEFAULT)
1201		bpf_error("direction applied to 'proto'");
1202
1203	switch (proto) {
1204	case Q_DEFAULT:
1205	case Q_IP:
1206		b0 = gen_linktype(ETHERTYPE_IP);
1207		b1 = gen_cmp(off_nl + 9, BPF_B, (bpf_int32)v);
1208		gen_and(b0, b1);
1209		return b1;
1210
1211	case Q_ISO:
1212		b0 = gen_linktype(LLC_ISO_LSAP);
1213		b1 = gen_cmp(off_nl + 3, BPF_B, (long)v);
1214		gen_and(b0, b1);
1215		return b1;
1216
1217	case Q_ARP:
1218		bpf_error("arp does not encapsulate another protocol");
1219		/* NOTREACHED */
1220
1221	case Q_RARP:
1222		bpf_error("rarp does not encapsulate another protocol");
1223		/* NOTREACHED */
1224
1225	case Q_ATALK:
1226		bpf_error("atalk encapsulation is not specifiable");
1227		/* NOTREACHED */
1228
1229	case Q_DECNET:
1230		bpf_error("decnet encapsulation is not specifiable");
1231		/* NOTREACHED */
1232
1233	case Q_SCA:
1234		bpf_error("sca does not encapsulate another protocol");
1235		/* NOTREACHED */
1236
1237	case Q_LAT:
1238		bpf_error("lat does not encapsulate another protocol");
1239		/* NOTREACHED */
1240
1241	case Q_MOPRC:
1242		bpf_error("moprc does not encapsulate another protocol");
1243		/* NOTREACHED */
1244
1245	case Q_MOPDL:
1246		bpf_error("mopdl does not encapsulate another protocol");
1247		/* NOTREACHED */
1248
1249	case Q_LINK:
1250		return gen_linktype(v);
1251
1252	case Q_UDP:
1253		bpf_error("'udp proto' is bogus");
1254		/* NOTREACHED */
1255
1256	case Q_TCP:
1257		bpf_error("'tcp proto' is bogus");
1258		/* NOTREACHED */
1259
1260	case Q_ICMP:
1261		bpf_error("'icmp proto' is bogus");
1262		/* NOTREACHED */
1263
1264	case Q_IGMP:
1265		bpf_error("'igmp proto' is bogus");
1266		/* NOTREACHED */
1267
1268	case Q_IGRP:
1269		bpf_error("'igrp proto' is bogus");
1270		/* NOTREACHED */
1271
1272	default:
1273		abort();
1274		/* NOTREACHED */
1275	}
1276	/* NOTREACHED */
1277}
1278
1279struct block *
1280gen_scode(name, q)
1281	register const char *name;
1282	struct qual q;
1283{
1284	int proto = q.proto;
1285	int dir = q.dir;
1286	int tproto;
1287	u_char *eaddr;
1288	bpf_u_int32 mask, addr, **alist;
1289	struct block *b, *tmp;
1290	int port, real_proto;
1291
1292	switch (q.addr) {
1293
1294	case Q_NET:
1295		addr = pcap_nametonetaddr(name);
1296		if (addr == 0)
1297			bpf_error("unknown network '%s'", name);
1298		/* Left justify network addr and calculate its network mask */
1299		mask = 0xffffffff;
1300		while (addr && (addr & 0xff000000) == 0) {
1301			addr <<= 8;
1302			mask <<= 8;
1303		}
1304		return gen_host(addr, mask, proto, dir);
1305
1306	case Q_DEFAULT:
1307	case Q_HOST:
1308		if (proto == Q_LINK) {
1309			switch (linktype) {
1310
1311			case DLT_EN10MB:
1312				eaddr = pcap_ether_hostton(name);
1313				if (eaddr == NULL)
1314					bpf_error(
1315					    "unknown ether host '%s'", name);
1316				return gen_ehostop(eaddr, dir);
1317
1318			case DLT_FDDI:
1319				eaddr = pcap_ether_hostton(name);
1320				if (eaddr == NULL)
1321					bpf_error(
1322					    "unknown FDDI host '%s'", name);
1323				return gen_fhostop(eaddr, dir);
1324
1325			default:
1326				bpf_error(
1327			"only ethernet/FDDI supports link-level host name");
1328				break;
1329			}
1330		} else if (proto == Q_DECNET) {
1331			unsigned short dn_addr = __pcap_nametodnaddr(name);
1332			/*
1333			 * I don't think DECNET hosts can be multihomed, so
1334			 * there is no need to build up a list of addresses
1335			 */
1336			return (gen_host(dn_addr, 0, proto, dir));
1337		} else {
1338			alist = pcap_nametoaddr(name);
1339			if (alist == NULL || *alist == NULL)
1340				bpf_error("unknown host '%s'", name);
1341			tproto = proto;
1342			if (off_linktype == -1 && tproto == Q_DEFAULT)
1343				tproto = Q_IP;
1344			b = gen_host(**alist++, 0xffffffff, tproto, dir);
1345			while (*alist) {
1346				tmp = gen_host(**alist++, 0xffffffff,
1347					       tproto, dir);
1348				gen_or(b, tmp);
1349				b = tmp;
1350			}
1351			return b;
1352		}
1353
1354	case Q_PORT:
1355		if (proto != Q_DEFAULT && proto != Q_UDP && proto != Q_TCP)
1356			bpf_error("illegal qualifier of 'port'");
1357		if (pcap_nametoport(name, &port, &real_proto) == 0)
1358			bpf_error("unknown port '%s'", name);
1359		if (proto == Q_UDP) {
1360			if (real_proto == IPPROTO_TCP)
1361				bpf_error("port '%s' is tcp", name);
1362			else
1363				/* override PROTO_UNDEF */
1364				real_proto = IPPROTO_UDP;
1365		}
1366		if (proto == Q_TCP) {
1367			if (real_proto == IPPROTO_UDP)
1368				bpf_error("port '%s' is udp", name);
1369			else
1370				/* override PROTO_UNDEF */
1371				real_proto = IPPROTO_TCP;
1372		}
1373		return gen_port(port, real_proto, dir);
1374
1375	case Q_GATEWAY:
1376		eaddr = pcap_ether_hostton(name);
1377		if (eaddr == NULL)
1378			bpf_error("unknown ether host: %s", name);
1379
1380		alist = pcap_nametoaddr(name);
1381		if (alist == NULL || *alist == NULL)
1382			bpf_error("unknown host '%s'", name);
1383		return gen_gateway(eaddr, alist, proto, dir);
1384
1385	case Q_PROTO:
1386		real_proto = lookup_proto(name, proto);
1387		if (real_proto >= 0)
1388			return gen_proto(real_proto, proto, dir);
1389		else
1390			bpf_error("unknown protocol: %s", name);
1391
1392	case Q_UNDEF:
1393		syntax();
1394		/* NOTREACHED */
1395	}
1396	abort();
1397	/* NOTREACHED */
1398}
1399
1400struct block *
1401gen_mcode(s1, s2, masklen, q)
1402	register const char *s1, *s2;
1403	register int masklen;
1404	struct qual q;
1405{
1406	register int nlen, mlen;
1407	bpf_u_int32 n, m;
1408
1409	nlen = __pcap_atoin(s1, &n);
1410	/* Promote short ipaddr */
1411	n <<= 32 - nlen;
1412
1413	if (s2 != NULL) {
1414		mlen = __pcap_atoin(s2, &m);
1415		/* Promote short ipaddr */
1416		m <<= 32 - mlen;
1417		if ((n & ~m) != 0)
1418			bpf_error("non-network bits set in \"%s mask %s\"",
1419			    s1, s2);
1420	} else {
1421		/* Convert mask len to mask */
1422		if (masklen > 32)
1423			bpf_error("mask length must be <= 32");
1424		m = 0xffffffff << (32 - masklen);
1425		if ((n & ~m) != 0)
1426			bpf_error("non-network bits set in \"%s/%d\"",
1427			    s1, masklen);
1428	}
1429
1430	switch (q.addr) {
1431
1432	case Q_NET:
1433		return gen_host(n, m, q.proto, q.dir);
1434
1435	default:
1436		bpf_error("Mask syntax for networks only");
1437		/* NOTREACHED */
1438	}
1439}
1440
1441struct block *
1442gen_ncode(s, v, q)
1443	register const char *s;
1444	bpf_u_int32 v;
1445	struct qual q;
1446{
1447	bpf_u_int32 mask;
1448	int proto = q.proto;
1449	int dir = q.dir;
1450	register int vlen;
1451
1452	if (s == NULL)
1453		vlen = 32;
1454	else if (q.proto == Q_DECNET)
1455		vlen = __pcap_atodn(s, &v);
1456	else
1457		vlen = __pcap_atoin(s, &v);
1458
1459	switch (q.addr) {
1460
1461	case Q_DEFAULT:
1462	case Q_HOST:
1463	case Q_NET:
1464		if (proto == Q_DECNET)
1465			return gen_host(v, 0, proto, dir);
1466		else if (proto == Q_LINK) {
1467			bpf_error("illegal link layer address");
1468		} else {
1469			mask = 0xffffffff;
1470			if (s == NULL && q.addr == Q_NET) {
1471				/* Promote short net number */
1472				while (v && (v & 0xff000000) == 0) {
1473					v <<= 8;
1474					mask <<= 8;
1475				}
1476			} else {
1477				/* Promote short ipaddr */
1478				v <<= 32 - vlen;
1479				mask <<= 32 - vlen;
1480			}
1481			return gen_host(v, mask, proto, dir);
1482		}
1483
1484	case Q_PORT:
1485		if (proto == Q_UDP)
1486			proto = IPPROTO_UDP;
1487		else if (proto == Q_TCP)
1488			proto = IPPROTO_TCP;
1489		else if (proto == Q_DEFAULT)
1490			proto = PROTO_UNDEF;
1491		else
1492			bpf_error("illegal qualifier of 'port'");
1493
1494		return gen_port((int)v, proto, dir);
1495
1496	case Q_GATEWAY:
1497		bpf_error("'gateway' requires a name");
1498		/* NOTREACHED */
1499
1500	case Q_PROTO:
1501		return gen_proto((int)v, proto, dir);
1502
1503	case Q_UNDEF:
1504		syntax();
1505		/* NOTREACHED */
1506
1507	default:
1508		abort();
1509		/* NOTREACHED */
1510	}
1511	/* NOTREACHED */
1512}
1513
1514struct block *
1515gen_ecode(eaddr, q)
1516	register const u_char *eaddr;
1517	struct qual q;
1518{
1519	if ((q.addr == Q_HOST || q.addr == Q_DEFAULT) && q.proto == Q_LINK) {
1520		if (linktype == DLT_EN10MB)
1521			return gen_ehostop(eaddr, (int)q.dir);
1522		if (linktype == DLT_FDDI)
1523			return gen_fhostop(eaddr, (int)q.dir);
1524	}
1525	bpf_error("ethernet address used in non-ether expression");
1526	/* NOTREACHED */
1527}
1528
1529void
1530sappend(s0, s1)
1531	struct slist *s0, *s1;
1532{
1533	/*
1534	 * This is definitely not the best way to do this, but the
1535	 * lists will rarely get long.
1536	 */
1537	while (s0->next)
1538		s0 = s0->next;
1539	s0->next = s1;
1540}
1541
1542static struct slist *
1543xfer_to_x(a)
1544	struct arth *a;
1545{
1546	struct slist *s;
1547
1548	s = new_stmt(BPF_LDX|BPF_MEM);
1549	s->s.k = a->regno;
1550	return s;
1551}
1552
1553static struct slist *
1554xfer_to_a(a)
1555	struct arth *a;
1556{
1557	struct slist *s;
1558
1559	s = new_stmt(BPF_LD|BPF_MEM);
1560	s->s.k = a->regno;
1561	return s;
1562}
1563
1564struct arth *
1565gen_load(proto, index, size)
1566	int proto;
1567	struct arth *index;
1568	int size;
1569{
1570	struct slist *s, *tmp;
1571	struct block *b;
1572	int regno = alloc_reg();
1573
1574	free_reg(index->regno);
1575	switch (size) {
1576
1577	default:
1578		bpf_error("data size must be 1, 2, or 4");
1579
1580	case 1:
1581		size = BPF_B;
1582		break;
1583
1584	case 2:
1585		size = BPF_H;
1586		break;
1587
1588	case 4:
1589		size = BPF_W;
1590		break;
1591	}
1592	switch (proto) {
1593	default:
1594		bpf_error("unsupported index operation");
1595
1596	case Q_LINK:
1597		s = xfer_to_x(index);
1598		tmp = new_stmt(BPF_LD|BPF_IND|size);
1599		sappend(s, tmp);
1600		sappend(index->s, s);
1601		break;
1602
1603	case Q_IP:
1604	case Q_ARP:
1605	case Q_RARP:
1606	case Q_ATALK:
1607	case Q_DECNET:
1608	case Q_SCA:
1609	case Q_LAT:
1610	case Q_MOPRC:
1611	case Q_MOPDL:
1612		/* XXX Note that we assume a fixed link header here. */
1613		s = xfer_to_x(index);
1614		tmp = new_stmt(BPF_LD|BPF_IND|size);
1615		tmp->s.k = off_nl;
1616		sappend(s, tmp);
1617		sappend(index->s, s);
1618
1619		b = gen_proto_abbrev(proto);
1620		if (index->b)
1621			gen_and(index->b, b);
1622		index->b = b;
1623		break;
1624
1625	case Q_TCP:
1626	case Q_UDP:
1627	case Q_ICMP:
1628	case Q_IGMP:
1629	case Q_IGRP:
1630		s = new_stmt(BPF_LDX|BPF_MSH|BPF_B);
1631		s->s.k = off_nl;
1632		sappend(s, xfer_to_a(index));
1633		sappend(s, new_stmt(BPF_ALU|BPF_ADD|BPF_X));
1634		sappend(s, new_stmt(BPF_MISC|BPF_TAX));
1635		sappend(s, tmp = new_stmt(BPF_LD|BPF_IND|size));
1636		tmp->s.k = off_nl;
1637		sappend(index->s, s);
1638
1639		gen_and(gen_proto_abbrev(proto), b = gen_ipfrag());
1640		if (index->b)
1641			gen_and(index->b, b);
1642		index->b = b;
1643		break;
1644	}
1645	index->regno = regno;
1646	s = new_stmt(BPF_ST);
1647	s->s.k = regno;
1648	sappend(index->s, s);
1649
1650	return index;
1651}
1652
1653struct block *
1654gen_relation(code, a0, a1, reversed)
1655	int code;
1656	struct arth *a0, *a1;
1657	int reversed;
1658{
1659	struct slist *s0, *s1, *s2;
1660	struct block *b, *tmp;
1661
1662	s0 = xfer_to_x(a1);
1663	s1 = xfer_to_a(a0);
1664	s2 = new_stmt(BPF_ALU|BPF_SUB|BPF_X);
1665	b = new_block(JMP(code));
1666	if (code == BPF_JGT || code == BPF_JGE) {
1667		reversed = !reversed;
1668		b->s.k = 0x80000000;
1669	}
1670	if (reversed)
1671		gen_not(b);
1672
1673	sappend(s1, s2);
1674	sappend(s0, s1);
1675	sappend(a1->s, s0);
1676	sappend(a0->s, a1->s);
1677
1678	b->stmts = a0->s;
1679
1680	free_reg(a0->regno);
1681	free_reg(a1->regno);
1682
1683	/* 'and' together protocol checks */
1684	if (a0->b) {
1685		if (a1->b) {
1686			gen_and(a0->b, tmp = a1->b);
1687		}
1688		else
1689			tmp = a0->b;
1690	} else
1691		tmp = a1->b;
1692
1693	if (tmp)
1694		gen_and(tmp, b);
1695
1696	return b;
1697}
1698
1699struct arth *
1700gen_loadlen()
1701{
1702	int regno = alloc_reg();
1703	struct arth *a = (struct arth *)newchunk(sizeof(*a));
1704	struct slist *s;
1705
1706	s = new_stmt(BPF_LD|BPF_LEN);
1707	s->next = new_stmt(BPF_ST);
1708	s->next->s.k = regno;
1709	a->s = s;
1710	a->regno = regno;
1711
1712	return a;
1713}
1714
1715struct arth *
1716gen_loadi(val)
1717	int val;
1718{
1719	struct arth *a;
1720	struct slist *s;
1721	int reg;
1722
1723	a = (struct arth *)newchunk(sizeof(*a));
1724
1725	reg = alloc_reg();
1726
1727	s = new_stmt(BPF_LD|BPF_IMM);
1728	s->s.k = val;
1729	s->next = new_stmt(BPF_ST);
1730	s->next->s.k = reg;
1731	a->s = s;
1732	a->regno = reg;
1733
1734	return a;
1735}
1736
1737struct arth *
1738gen_neg(a)
1739	struct arth *a;
1740{
1741	struct slist *s;
1742
1743	s = xfer_to_a(a);
1744	sappend(a->s, s);
1745	s = new_stmt(BPF_ALU|BPF_NEG);
1746	s->s.k = 0;
1747	sappend(a->s, s);
1748	s = new_stmt(BPF_ST);
1749	s->s.k = a->regno;
1750	sappend(a->s, s);
1751
1752	return a;
1753}
1754
1755struct arth *
1756gen_arth(code, a0, a1)
1757	int code;
1758	struct arth *a0, *a1;
1759{
1760	struct slist *s0, *s1, *s2;
1761
1762	s0 = xfer_to_x(a1);
1763	s1 = xfer_to_a(a0);
1764	s2 = new_stmt(BPF_ALU|BPF_X|code);
1765
1766	sappend(s1, s2);
1767	sappend(s0, s1);
1768	sappend(a1->s, s0);
1769	sappend(a0->s, a1->s);
1770
1771	free_reg(a1->regno);
1772
1773	s0 = new_stmt(BPF_ST);
1774	a0->regno = s0->s.k = alloc_reg();
1775	sappend(a0->s, s0);
1776
1777	return a0;
1778}
1779
1780/*
1781 * Here we handle simple allocation of the scratch registers.
1782 * If too many registers are alloc'd, the allocator punts.
1783 */
1784static int regused[BPF_MEMWORDS];
1785static int curreg;
1786
1787/*
1788 * Return the next free register.
1789 */
1790static int
1791alloc_reg()
1792{
1793	int n = BPF_MEMWORDS;
1794
1795	while (--n >= 0) {
1796		if (regused[curreg])
1797			curreg = (curreg + 1) % BPF_MEMWORDS;
1798		else {
1799			regused[curreg] = 1;
1800			return curreg;
1801		}
1802	}
1803	bpf_error("too many registers needed to evaluate expression");
1804	/* NOTREACHED */
1805}
1806
1807/*
1808 * Return a register to the table so it can
1809 * be used later.
1810 */
1811static void
1812free_reg(n)
1813	int n;
1814{
1815	regused[n] = 0;
1816}
1817
1818static struct block *
1819gen_len(jmp, n)
1820	int jmp, n;
1821{
1822	struct slist *s;
1823	struct block *b;
1824
1825	s = new_stmt(BPF_LD|BPF_LEN);
1826	b = new_block(JMP(jmp));
1827	b->stmts = s;
1828	b->s.k = n;
1829
1830	return b;
1831}
1832
1833struct block *
1834gen_greater(n)
1835	int n;
1836{
1837	return gen_len(BPF_JGE, n);
1838}
1839
1840/*
1841 * Actually, this is less than or equal.
1842 */
1843
1844struct block *
1845gen_less(n)
1846	int n;
1847{
1848	struct block *b;
1849
1850	b = gen_len(BPF_JGT, n);
1851	gen_not(b);
1852
1853	return b;
1854}
1855
1856struct block *
1857gen_byteop(op, idx, val)
1858	int op, idx, val;
1859{
1860	struct block *b;
1861	struct slist *s;
1862
1863	switch (op) {
1864	default:
1865		abort();
1866
1867	case '=':
1868		return gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1869
1870	case '<':
1871		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1872		b->s.code = JMP(BPF_JGE);
1873		gen_not(b);
1874		return b;
1875
1876	case '>':
1877		b = gen_cmp((u_int)idx, BPF_B, (bpf_int32)val);
1878		b->s.code = JMP(BPF_JGT);
1879		return b;
1880
1881	case '|':
1882		s = new_stmt(BPF_ALU|BPF_OR|BPF_K);
1883		break;
1884
1885	case '&':
1886		s = new_stmt(BPF_ALU|BPF_AND|BPF_K);
1887		break;
1888	}
1889	s->s.k = val;
1890	b = new_block(JMP(BPF_JEQ));
1891	b->stmts = s;
1892	gen_not(b);
1893
1894	return b;
1895}
1896
1897struct block *
1898gen_broadcast(proto)
1899	int proto;
1900{
1901	bpf_u_int32 hostmask;
1902	struct block *b0, *b1, *b2;
1903	static u_char ebroadcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
1904
1905	switch (proto) {
1906
1907	case Q_DEFAULT:
1908	case Q_LINK:
1909		if (linktype == DLT_EN10MB)
1910			return gen_ehostop(ebroadcast, Q_DST);
1911		if (linktype == DLT_FDDI)
1912			return gen_fhostop(ebroadcast, Q_DST);
1913		bpf_error("not a broadcast link");
1914		break;
1915
1916	case Q_IP:
1917		b0 = gen_linktype(ETHERTYPE_IP);
1918		hostmask = ~netmask;
1919		b1 = gen_mcmp(off_nl + 16, BPF_W, (bpf_int32)0, hostmask);
1920		b2 = gen_mcmp(off_nl + 16, BPF_W,
1921			      (bpf_int32)(~0 & hostmask), hostmask);
1922		gen_or(b1, b2);
1923		gen_and(b0, b2);
1924		return b2;
1925	}
1926	bpf_error("only ether/ip broadcast filters supported");
1927}
1928
1929struct block *
1930gen_multicast(proto)
1931	int proto;
1932{
1933	register struct block *b0, *b1;
1934	register struct slist *s;
1935
1936	switch (proto) {
1937
1938	case Q_DEFAULT:
1939	case Q_LINK:
1940		if (linktype == DLT_EN10MB) {
1941			/* ether[0] & 1 != 0 */
1942			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1943			s->s.k = 0;
1944			b0 = new_block(JMP(BPF_JSET));
1945			b0->s.k = 1;
1946			b0->stmts = s;
1947			return b0;
1948		}
1949
1950		if (linktype == DLT_FDDI) {
1951			/* XXX TEST THIS: MIGHT NOT PORT PROPERLY XXX */
1952			/* fddi[1] & 1 != 0 */
1953			s = new_stmt(BPF_LD|BPF_B|BPF_ABS);
1954			s->s.k = 1;
1955			b0 = new_block(JMP(BPF_JSET));
1956			b0->s.k = 1;
1957			b0->stmts = s;
1958			return b0;
1959		}
1960		/* Link not known to support multicasts */
1961		break;
1962
1963	case Q_IP:
1964		b0 = gen_linktype(ETHERTYPE_IP);
1965		b1 = gen_cmp(off_nl + 16, BPF_B, (bpf_int32)224);
1966		b1->s.code = JMP(BPF_JGE);
1967		gen_and(b0, b1);
1968		return b1;
1969	}
1970	bpf_error("only IP multicast filters supported on ethernet/FDDI");
1971}
1972
1973/*
1974 * generate command for inbound/outbound.  It's here so we can
1975 * make it link-type specific.  'dir' = 0 implies "inbound",
1976 * = 1 implies "outbound".
1977 */
1978struct block *
1979gen_inbound(dir)
1980	int dir;
1981{
1982	register struct block *b0;
1983
1984	b0 = gen_relation(BPF_JEQ,
1985			  gen_load(Q_LINK, gen_loadi(0), 1),
1986			  gen_loadi(0),
1987			  dir);
1988	return (b0);
1989}
1990