optimize.c revision 75107
1/*
2 * Copyright (c) 1988, 1989, 1990, 1991, 1993, 1994, 1995, 1996
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that: (1) source code distributions
7 * retain the above copyright notice and this paragraph in its entirety, (2)
8 * distributions including binary code include the above copyright notice and
9 * this paragraph in its entirety in the documentation or other materials
10 * provided with the distribution, and (3) all advertising materials mentioning
11 * features or use of this software display the following acknowledgement:
12 * ``This product includes software developed by the University of California,
13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of
14 * the University nor the names of its contributors may be used to endorse
15 * or promote products derived from this software without specific prior
16 * written permission.
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20 *
21 *  Optimization module for tcpdump intermediate representation.
22 */
23#ifndef lint
24static const char rcsid[] =
25    "@(#) $Header: /tcpdump/master/libpcap/optimize.c,v 1.67 2000/11/19 13:37:20 itojun Exp $ (LBL)";
26#endif
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <sys/types.h>
33#include <sys/time.h>
34
35#include <stdio.h>
36#include <stdlib.h>
37#include <memory.h>
38
39#include <errno.h>
40
41#include "pcap-int.h"
42
43#include "gencode.h"
44
45#ifdef HAVE_OS_PROTO_H
46#include "os-proto.h"
47#endif
48
49#ifdef BDEBUG
50extern int dflag;
51#endif
52
53#define A_ATOM BPF_MEMWORDS
54#define X_ATOM (BPF_MEMWORDS+1)
55
56#define NOP -1
57
58/*
59 * This define is used to represent *both* the accumulator and
60 * x register in use-def computations.
61 * Currently, the use-def code assumes only one definition per instruction.
62 */
63#define AX_ATOM N_ATOMS
64
65/*
66 * A flag to indicate that further optimization is needed.
67 * Iterative passes are continued until a given pass yields no
68 * branch movement.
69 */
70static int done;
71
72/*
73 * A block is marked if only if its mark equals the current mark.
74 * Rather than traverse the code array, marking each item, 'cur_mark' is
75 * incremented.  This automatically makes each element unmarked.
76 */
77static int cur_mark;
78#define isMarked(p) ((p)->mark == cur_mark)
79#define unMarkAll() cur_mark += 1
80#define Mark(p) ((p)->mark = cur_mark)
81
82static void opt_init(struct block *);
83static void opt_cleanup(void);
84
85static void make_marks(struct block *);
86static void mark_code(struct block *);
87
88static void intern_blocks(struct block *);
89
90static int eq_slist(struct slist *, struct slist *);
91
92static void find_levels_r(struct block *);
93
94static void find_levels(struct block *);
95static void find_dom(struct block *);
96static void propedom(struct edge *);
97static void find_edom(struct block *);
98static void find_closure(struct block *);
99static int atomuse(struct stmt *);
100static int atomdef(struct stmt *);
101static void compute_local_ud(struct block *);
102static void find_ud(struct block *);
103static void init_val(void);
104static int F(int, int, int);
105static inline void vstore(struct stmt *, int *, int, int);
106static void opt_blk(struct block *, int);
107static int use_conflict(struct block *, struct block *);
108static void opt_j(struct edge *);
109static void or_pullup(struct block *);
110static void and_pullup(struct block *);
111static void opt_blks(struct block *, int);
112static inline void link_inedge(struct edge *, struct block *);
113static void find_inedges(struct block *);
114static void opt_root(struct block **);
115static void opt_loop(struct block *, int);
116static void fold_op(struct stmt *, int, int);
117static inline struct slist *this_op(struct slist *);
118static void opt_not(struct block *);
119static void opt_peep(struct block *);
120static void opt_stmt(struct stmt *, int[], int);
121static void deadstmt(struct stmt *, struct stmt *[]);
122static void opt_deadstores(struct block *);
123static void opt_blk(struct block *, int);
124static int use_conflict(struct block *, struct block *);
125static void opt_j(struct edge *);
126static struct block *fold_edge(struct block *, struct edge *);
127static inline int eq_blk(struct block *, struct block *);
128static int slength(struct slist *);
129static int count_blocks(struct block *);
130static void number_blks_r(struct block *);
131static int count_stmts(struct block *);
132static int convert_code_r(struct block *);
133#ifdef BDEBUG
134static void opt_dump(struct block *);
135#endif
136
137static int n_blocks;
138struct block **blocks;
139static int n_edges;
140struct edge **edges;
141
142/*
143 * A bit vector set representation of the dominators.
144 * We round up the set size to the next power of two.
145 */
146static int nodewords;
147static int edgewords;
148struct block **levels;
149bpf_u_int32 *space;
150#define BITS_PER_WORD (8*sizeof(bpf_u_int32))
151/*
152 * True if a is in uset {p}
153 */
154#define SET_MEMBER(p, a) \
155((p)[(unsigned)(a) / BITS_PER_WORD] & (1 << ((unsigned)(a) % BITS_PER_WORD)))
156
157/*
158 * Add 'a' to uset p.
159 */
160#define SET_INSERT(p, a) \
161(p)[(unsigned)(a) / BITS_PER_WORD] |= (1 << ((unsigned)(a) % BITS_PER_WORD))
162
163/*
164 * Delete 'a' from uset p.
165 */
166#define SET_DELETE(p, a) \
167(p)[(unsigned)(a) / BITS_PER_WORD] &= ~(1 << ((unsigned)(a) % BITS_PER_WORD))
168
169/*
170 * a := a intersect b
171 */
172#define SET_INTERSECT(a, b, n)\
173{\
174	register bpf_u_int32 *_x = a, *_y = b;\
175	register int _n = n;\
176	while (--_n >= 0) *_x++ &= *_y++;\
177}
178
179/*
180 * a := a - b
181 */
182#define SET_SUBTRACT(a, b, n)\
183{\
184	register bpf_u_int32 *_x = a, *_y = b;\
185	register int _n = n;\
186	while (--_n >= 0) *_x++ &=~ *_y++;\
187}
188
189/*
190 * a := a union b
191 */
192#define SET_UNION(a, b, n)\
193{\
194	register bpf_u_int32 *_x = a, *_y = b;\
195	register int _n = n;\
196	while (--_n >= 0) *_x++ |= *_y++;\
197}
198
199static uset all_dom_sets;
200static uset all_closure_sets;
201static uset all_edge_sets;
202
203#ifndef MAX
204#define MAX(a,b) ((a)>(b)?(a):(b))
205#endif
206
207static void
208find_levels_r(b)
209	struct block *b;
210{
211	int level;
212
213	if (isMarked(b))
214		return;
215
216	Mark(b);
217	b->link = 0;
218
219	if (JT(b)) {
220		find_levels_r(JT(b));
221		find_levels_r(JF(b));
222		level = MAX(JT(b)->level, JF(b)->level) + 1;
223	} else
224		level = 0;
225	b->level = level;
226	b->link = levels[level];
227	levels[level] = b;
228}
229
230/*
231 * Level graph.  The levels go from 0 at the leaves to
232 * N_LEVELS at the root.  The levels[] array points to the
233 * first node of the level list, whose elements are linked
234 * with the 'link' field of the struct block.
235 */
236static void
237find_levels(root)
238	struct block *root;
239{
240	memset((char *)levels, 0, n_blocks * sizeof(*levels));
241	unMarkAll();
242	find_levels_r(root);
243}
244
245/*
246 * Find dominator relationships.
247 * Assumes graph has been leveled.
248 */
249static void
250find_dom(root)
251	struct block *root;
252{
253	int i;
254	struct block *b;
255	bpf_u_int32 *x;
256
257	/*
258	 * Initialize sets to contain all nodes.
259	 */
260	x = all_dom_sets;
261	i = n_blocks * nodewords;
262	while (--i >= 0)
263		*x++ = ~0;
264	/* Root starts off empty. */
265	for (i = nodewords; --i >= 0;)
266		root->dom[i] = 0;
267
268	/* root->level is the highest level no found. */
269	for (i = root->level; i >= 0; --i) {
270		for (b = levels[i]; b; b = b->link) {
271			SET_INSERT(b->dom, b->id);
272			if (JT(b) == 0)
273				continue;
274			SET_INTERSECT(JT(b)->dom, b->dom, nodewords);
275			SET_INTERSECT(JF(b)->dom, b->dom, nodewords);
276		}
277	}
278}
279
280static void
281propedom(ep)
282	struct edge *ep;
283{
284	SET_INSERT(ep->edom, ep->id);
285	if (ep->succ) {
286		SET_INTERSECT(ep->succ->et.edom, ep->edom, edgewords);
287		SET_INTERSECT(ep->succ->ef.edom, ep->edom, edgewords);
288	}
289}
290
291/*
292 * Compute edge dominators.
293 * Assumes graph has been leveled and predecessors established.
294 */
295static void
296find_edom(root)
297	struct block *root;
298{
299	int i;
300	uset x;
301	struct block *b;
302
303	x = all_edge_sets;
304	for (i = n_edges * edgewords; --i >= 0; )
305		x[i] = ~0;
306
307	/* root->level is the highest level no found. */
308	memset(root->et.edom, 0, edgewords * sizeof(*(uset)0));
309	memset(root->ef.edom, 0, edgewords * sizeof(*(uset)0));
310	for (i = root->level; i >= 0; --i) {
311		for (b = levels[i]; b != 0; b = b->link) {
312			propedom(&b->et);
313			propedom(&b->ef);
314		}
315	}
316}
317
318/*
319 * Find the backwards transitive closure of the flow graph.  These sets
320 * are backwards in the sense that we find the set of nodes that reach
321 * a given node, not the set of nodes that can be reached by a node.
322 *
323 * Assumes graph has been leveled.
324 */
325static void
326find_closure(root)
327	struct block *root;
328{
329	int i;
330	struct block *b;
331
332	/*
333	 * Initialize sets to contain no nodes.
334	 */
335	memset((char *)all_closure_sets, 0,
336	      n_blocks * nodewords * sizeof(*all_closure_sets));
337
338	/* root->level is the highest level no found. */
339	for (i = root->level; i >= 0; --i) {
340		for (b = levels[i]; b; b = b->link) {
341			SET_INSERT(b->closure, b->id);
342			if (JT(b) == 0)
343				continue;
344			SET_UNION(JT(b)->closure, b->closure, nodewords);
345			SET_UNION(JF(b)->closure, b->closure, nodewords);
346		}
347	}
348}
349
350/*
351 * Return the register number that is used by s.  If A and X are both
352 * used, return AX_ATOM.  If no register is used, return -1.
353 *
354 * The implementation should probably change to an array access.
355 */
356static int
357atomuse(s)
358	struct stmt *s;
359{
360	register int c = s->code;
361
362	if (c == NOP)
363		return -1;
364
365	switch (BPF_CLASS(c)) {
366
367	case BPF_RET:
368		return (BPF_RVAL(c) == BPF_A) ? A_ATOM :
369			(BPF_RVAL(c) == BPF_X) ? X_ATOM : -1;
370
371	case BPF_LD:
372	case BPF_LDX:
373		return (BPF_MODE(c) == BPF_IND) ? X_ATOM :
374			(BPF_MODE(c) == BPF_MEM) ? s->k : -1;
375
376	case BPF_ST:
377		return A_ATOM;
378
379	case BPF_STX:
380		return X_ATOM;
381
382	case BPF_JMP:
383	case BPF_ALU:
384		if (BPF_SRC(c) == BPF_X)
385			return AX_ATOM;
386		return A_ATOM;
387
388	case BPF_MISC:
389		return BPF_MISCOP(c) == BPF_TXA ? X_ATOM : A_ATOM;
390	}
391	abort();
392	/* NOTREACHED */
393}
394
395/*
396 * Return the register number that is defined by 's'.  We assume that
397 * a single stmt cannot define more than one register.  If no register
398 * is defined, return -1.
399 *
400 * The implementation should probably change to an array access.
401 */
402static int
403atomdef(s)
404	struct stmt *s;
405{
406	if (s->code == NOP)
407		return -1;
408
409	switch (BPF_CLASS(s->code)) {
410
411	case BPF_LD:
412	case BPF_ALU:
413		return A_ATOM;
414
415	case BPF_LDX:
416		return X_ATOM;
417
418	case BPF_ST:
419	case BPF_STX:
420		return s->k;
421
422	case BPF_MISC:
423		return BPF_MISCOP(s->code) == BPF_TAX ? X_ATOM : A_ATOM;
424	}
425	return -1;
426}
427
428static void
429compute_local_ud(b)
430	struct block *b;
431{
432	struct slist *s;
433	atomset def = 0, use = 0, kill = 0;
434	int atom;
435
436	for (s = b->stmts; s; s = s->next) {
437		if (s->s.code == NOP)
438			continue;
439		atom = atomuse(&s->s);
440		if (atom >= 0) {
441			if (atom == AX_ATOM) {
442				if (!ATOMELEM(def, X_ATOM))
443					use |= ATOMMASK(X_ATOM);
444				if (!ATOMELEM(def, A_ATOM))
445					use |= ATOMMASK(A_ATOM);
446			}
447			else if (atom < N_ATOMS) {
448				if (!ATOMELEM(def, atom))
449					use |= ATOMMASK(atom);
450			}
451			else
452				abort();
453		}
454		atom = atomdef(&s->s);
455		if (atom >= 0) {
456			if (!ATOMELEM(use, atom))
457				kill |= ATOMMASK(atom);
458			def |= ATOMMASK(atom);
459		}
460	}
461	if (!ATOMELEM(def, A_ATOM) && BPF_CLASS(b->s.code) == BPF_JMP)
462		use |= ATOMMASK(A_ATOM);
463
464	b->def = def;
465	b->kill = kill;
466	b->in_use = use;
467}
468
469/*
470 * Assume graph is already leveled.
471 */
472static void
473find_ud(root)
474	struct block *root;
475{
476	int i, maxlevel;
477	struct block *p;
478
479	/*
480	 * root->level is the highest level no found;
481	 * count down from there.
482	 */
483	maxlevel = root->level;
484	for (i = maxlevel; i >= 0; --i)
485		for (p = levels[i]; p; p = p->link) {
486			compute_local_ud(p);
487			p->out_use = 0;
488		}
489
490	for (i = 1; i <= maxlevel; ++i) {
491		for (p = levels[i]; p; p = p->link) {
492			p->out_use |= JT(p)->in_use | JF(p)->in_use;
493			p->in_use |= p->out_use &~ p->kill;
494		}
495	}
496}
497
498/*
499 * These data structures are used in a Cocke and Shwarz style
500 * value numbering scheme.  Since the flowgraph is acyclic,
501 * exit values can be propagated from a node's predecessors
502 * provided it is uniquely defined.
503 */
504struct valnode {
505	int code;
506	int v0, v1;
507	int val;
508	struct valnode *next;
509};
510
511#define MODULUS 213
512static struct valnode *hashtbl[MODULUS];
513static int curval;
514static int maxval;
515
516/* Integer constants mapped with the load immediate opcode. */
517#define K(i) F(BPF_LD|BPF_IMM|BPF_W, i, 0L)
518
519struct vmapinfo {
520	int is_const;
521	bpf_int32 const_val;
522};
523
524struct vmapinfo *vmap;
525struct valnode *vnode_base;
526struct valnode *next_vnode;
527
528static void
529init_val()
530{
531	curval = 0;
532	next_vnode = vnode_base;
533	memset((char *)vmap, 0, maxval * sizeof(*vmap));
534	memset((char *)hashtbl, 0, sizeof hashtbl);
535}
536
537/* Because we really don't have an IR, this stuff is a little messy. */
538static int
539F(code, v0, v1)
540	int code;
541	int v0, v1;
542{
543	u_int hash;
544	int val;
545	struct valnode *p;
546
547	hash = (u_int)code ^ (v0 << 4) ^ (v1 << 8);
548	hash %= MODULUS;
549
550	for (p = hashtbl[hash]; p; p = p->next)
551		if (p->code == code && p->v0 == v0 && p->v1 == v1)
552			return p->val;
553
554	val = ++curval;
555	if (BPF_MODE(code) == BPF_IMM &&
556	    (BPF_CLASS(code) == BPF_LD || BPF_CLASS(code) == BPF_LDX)) {
557		vmap[val].const_val = v0;
558		vmap[val].is_const = 1;
559	}
560	p = next_vnode++;
561	p->val = val;
562	p->code = code;
563	p->v0 = v0;
564	p->v1 = v1;
565	p->next = hashtbl[hash];
566	hashtbl[hash] = p;
567
568	return val;
569}
570
571static inline void
572vstore(s, valp, newval, alter)
573	struct stmt *s;
574	int *valp;
575	int newval;
576	int alter;
577{
578	if (alter && *valp == newval)
579		s->code = NOP;
580	else
581		*valp = newval;
582}
583
584static void
585fold_op(s, v0, v1)
586	struct stmt *s;
587	int v0, v1;
588{
589	bpf_int32 a, b;
590
591	a = vmap[v0].const_val;
592	b = vmap[v1].const_val;
593
594	switch (BPF_OP(s->code)) {
595	case BPF_ADD:
596		a += b;
597		break;
598
599	case BPF_SUB:
600		a -= b;
601		break;
602
603	case BPF_MUL:
604		a *= b;
605		break;
606
607	case BPF_DIV:
608		if (b == 0)
609			bpf_error("division by zero");
610		a /= b;
611		break;
612
613	case BPF_AND:
614		a &= b;
615		break;
616
617	case BPF_OR:
618		a |= b;
619		break;
620
621	case BPF_LSH:
622		a <<= b;
623		break;
624
625	case BPF_RSH:
626		a >>= b;
627		break;
628
629	case BPF_NEG:
630		a = -a;
631		break;
632
633	default:
634		abort();
635	}
636	s->k = a;
637	s->code = BPF_LD|BPF_IMM;
638	done = 0;
639}
640
641static inline struct slist *
642this_op(s)
643	struct slist *s;
644{
645	while (s != 0 && s->s.code == NOP)
646		s = s->next;
647	return s;
648}
649
650static void
651opt_not(b)
652	struct block *b;
653{
654	struct block *tmp = JT(b);
655
656	JT(b) = JF(b);
657	JF(b) = tmp;
658}
659
660static void
661opt_peep(b)
662	struct block *b;
663{
664	struct slist *s;
665	struct slist *next, *last;
666	int val;
667
668	s = b->stmts;
669	if (s == 0)
670		return;
671
672	last = s;
673	while (1) {
674		s = this_op(s);
675		if (s == 0)
676			break;
677		next = this_op(s->next);
678		if (next == 0)
679			break;
680		last = next;
681
682		/*
683		 * st  M[k]	-->	st  M[k]
684		 * ldx M[k]		tax
685		 */
686		if (s->s.code == BPF_ST &&
687		    next->s.code == (BPF_LDX|BPF_MEM) &&
688		    s->s.k == next->s.k) {
689			done = 0;
690			next->s.code = BPF_MISC|BPF_TAX;
691		}
692		/*
693		 * ld  #k	-->	ldx  #k
694		 * tax			txa
695		 */
696		if (s->s.code == (BPF_LD|BPF_IMM) &&
697		    next->s.code == (BPF_MISC|BPF_TAX)) {
698			s->s.code = BPF_LDX|BPF_IMM;
699			next->s.code = BPF_MISC|BPF_TXA;
700			done = 0;
701		}
702		/*
703		 * This is an ugly special case, but it happens
704		 * when you say tcp[k] or udp[k] where k is a constant.
705		 */
706		if (s->s.code == (BPF_LD|BPF_IMM)) {
707			struct slist *add, *tax, *ild;
708
709			/*
710			 * Check that X isn't used on exit from this
711			 * block (which the optimizer might cause).
712			 * We know the code generator won't generate
713			 * any local dependencies.
714			 */
715			if (ATOMELEM(b->out_use, X_ATOM))
716				break;
717
718			if (next->s.code != (BPF_LDX|BPF_MSH|BPF_B))
719				add = next;
720			else
721				add = this_op(next->next);
722			if (add == 0 || add->s.code != (BPF_ALU|BPF_ADD|BPF_X))
723				break;
724
725			tax = this_op(add->next);
726			if (tax == 0 || tax->s.code != (BPF_MISC|BPF_TAX))
727				break;
728
729			ild = this_op(tax->next);
730			if (ild == 0 || BPF_CLASS(ild->s.code) != BPF_LD ||
731			    BPF_MODE(ild->s.code) != BPF_IND)
732				break;
733			/*
734			 * XXX We need to check that X is not
735			 * subsequently used.  We know we can eliminate the
736			 * accumulator modifications since it is defined
737			 * by the last stmt of this sequence.
738			 *
739			 * We want to turn this sequence:
740			 *
741			 * (004) ldi     #0x2		{s}
742			 * (005) ldxms   [14]		{next}  -- optional
743			 * (006) addx			{add}
744			 * (007) tax			{tax}
745			 * (008) ild     [x+0]		{ild}
746			 *
747			 * into this sequence:
748			 *
749			 * (004) nop
750			 * (005) ldxms   [14]
751			 * (006) nop
752			 * (007) nop
753			 * (008) ild     [x+2]
754			 *
755			 */
756			ild->s.k += s->s.k;
757			s->s.code = NOP;
758			add->s.code = NOP;
759			tax->s.code = NOP;
760			done = 0;
761		}
762		s = next;
763	}
764	/*
765	 * If we have a subtract to do a comparison, and the X register
766	 * is a known constant, we can merge this value into the
767	 * comparison.
768	 */
769	if (last->s.code == (BPF_ALU|BPF_SUB|BPF_X) &&
770	    !ATOMELEM(b->out_use, A_ATOM)) {
771		val = b->val[X_ATOM];
772		if (vmap[val].is_const) {
773			int op;
774
775			b->s.k += vmap[val].const_val;
776			op = BPF_OP(b->s.code);
777			if (op == BPF_JGT || op == BPF_JGE) {
778				struct block *t = JT(b);
779				JT(b) = JF(b);
780				JF(b) = t;
781				b->s.k += 0x80000000;
782			}
783			last->s.code = NOP;
784			done = 0;
785		} else if (b->s.k == 0) {
786			/*
787			 * sub x  ->	nop
788			 * j  #0	j  x
789			 */
790			last->s.code = NOP;
791			b->s.code = BPF_CLASS(b->s.code) | BPF_OP(b->s.code) |
792				BPF_X;
793			done = 0;
794		}
795	}
796	/*
797	 * Likewise, a constant subtract can be simplified.
798	 */
799	else if (last->s.code == (BPF_ALU|BPF_SUB|BPF_K) &&
800		 !ATOMELEM(b->out_use, A_ATOM)) {
801		int op;
802
803		b->s.k += last->s.k;
804		last->s.code = NOP;
805		op = BPF_OP(b->s.code);
806		if (op == BPF_JGT || op == BPF_JGE) {
807			struct block *t = JT(b);
808			JT(b) = JF(b);
809			JF(b) = t;
810			b->s.k += 0x80000000;
811		}
812		done = 0;
813	}
814	/*
815	 * and #k	nop
816	 * jeq #0  ->	jset #k
817	 */
818	if (last->s.code == (BPF_ALU|BPF_AND|BPF_K) &&
819	    !ATOMELEM(b->out_use, A_ATOM) && b->s.k == 0) {
820		b->s.k = last->s.k;
821		b->s.code = BPF_JMP|BPF_K|BPF_JSET;
822		last->s.code = NOP;
823		done = 0;
824		opt_not(b);
825	}
826	/*
827	 * If the accumulator is a known constant, we can compute the
828	 * comparison result.
829	 */
830	val = b->val[A_ATOM];
831	if (vmap[val].is_const && BPF_SRC(b->s.code) == BPF_K) {
832		bpf_int32 v = vmap[val].const_val;
833		switch (BPF_OP(b->s.code)) {
834
835		case BPF_JEQ:
836			v = v == b->s.k;
837			break;
838
839		case BPF_JGT:
840			v = (unsigned)v > b->s.k;
841			break;
842
843		case BPF_JGE:
844			v = (unsigned)v >= b->s.k;
845			break;
846
847		case BPF_JSET:
848			v &= b->s.k;
849			break;
850
851		default:
852			abort();
853		}
854		if (JF(b) != JT(b))
855			done = 0;
856		if (v)
857			JF(b) = JT(b);
858		else
859			JT(b) = JF(b);
860	}
861}
862
863/*
864 * Compute the symbolic value of expression of 's', and update
865 * anything it defines in the value table 'val'.  If 'alter' is true,
866 * do various optimizations.  This code would be cleaner if symbolic
867 * evaluation and code transformations weren't folded together.
868 */
869static void
870opt_stmt(s, val, alter)
871	struct stmt *s;
872	int val[];
873	int alter;
874{
875	int op;
876	int v;
877
878	switch (s->code) {
879
880	case BPF_LD|BPF_ABS|BPF_W:
881	case BPF_LD|BPF_ABS|BPF_H:
882	case BPF_LD|BPF_ABS|BPF_B:
883		v = F(s->code, s->k, 0L);
884		vstore(s, &val[A_ATOM], v, alter);
885		break;
886
887	case BPF_LD|BPF_IND|BPF_W:
888	case BPF_LD|BPF_IND|BPF_H:
889	case BPF_LD|BPF_IND|BPF_B:
890		v = val[X_ATOM];
891		if (alter && vmap[v].is_const) {
892			s->code = BPF_LD|BPF_ABS|BPF_SIZE(s->code);
893			s->k += vmap[v].const_val;
894			v = F(s->code, s->k, 0L);
895			done = 0;
896		}
897		else
898			v = F(s->code, s->k, v);
899		vstore(s, &val[A_ATOM], v, alter);
900		break;
901
902	case BPF_LD|BPF_LEN:
903		v = F(s->code, 0L, 0L);
904		vstore(s, &val[A_ATOM], v, alter);
905		break;
906
907	case BPF_LD|BPF_IMM:
908		v = K(s->k);
909		vstore(s, &val[A_ATOM], v, alter);
910		break;
911
912	case BPF_LDX|BPF_IMM:
913		v = K(s->k);
914		vstore(s, &val[X_ATOM], v, alter);
915		break;
916
917	case BPF_LDX|BPF_MSH|BPF_B:
918		v = F(s->code, s->k, 0L);
919		vstore(s, &val[X_ATOM], v, alter);
920		break;
921
922	case BPF_ALU|BPF_NEG:
923		if (alter && vmap[val[A_ATOM]].is_const) {
924			s->code = BPF_LD|BPF_IMM;
925			s->k = -vmap[val[A_ATOM]].const_val;
926			val[A_ATOM] = K(s->k);
927		}
928		else
929			val[A_ATOM] = F(s->code, val[A_ATOM], 0L);
930		break;
931
932	case BPF_ALU|BPF_ADD|BPF_K:
933	case BPF_ALU|BPF_SUB|BPF_K:
934	case BPF_ALU|BPF_MUL|BPF_K:
935	case BPF_ALU|BPF_DIV|BPF_K:
936	case BPF_ALU|BPF_AND|BPF_K:
937	case BPF_ALU|BPF_OR|BPF_K:
938	case BPF_ALU|BPF_LSH|BPF_K:
939	case BPF_ALU|BPF_RSH|BPF_K:
940		op = BPF_OP(s->code);
941		if (alter) {
942			if (s->k == 0) {
943				if (op == BPF_ADD || op == BPF_SUB ||
944				    op == BPF_LSH || op == BPF_RSH ||
945				    op == BPF_OR) {
946					s->code = NOP;
947					break;
948				}
949				if (op == BPF_MUL || op == BPF_AND) {
950					s->code = BPF_LD|BPF_IMM;
951					val[A_ATOM] = K(s->k);
952					break;
953				}
954			}
955			if (vmap[val[A_ATOM]].is_const) {
956				fold_op(s, val[A_ATOM], K(s->k));
957				val[A_ATOM] = K(s->k);
958				break;
959			}
960		}
961		val[A_ATOM] = F(s->code, val[A_ATOM], K(s->k));
962		break;
963
964	case BPF_ALU|BPF_ADD|BPF_X:
965	case BPF_ALU|BPF_SUB|BPF_X:
966	case BPF_ALU|BPF_MUL|BPF_X:
967	case BPF_ALU|BPF_DIV|BPF_X:
968	case BPF_ALU|BPF_AND|BPF_X:
969	case BPF_ALU|BPF_OR|BPF_X:
970	case BPF_ALU|BPF_LSH|BPF_X:
971	case BPF_ALU|BPF_RSH|BPF_X:
972		op = BPF_OP(s->code);
973		if (alter && vmap[val[X_ATOM]].is_const) {
974			if (vmap[val[A_ATOM]].is_const) {
975				fold_op(s, val[A_ATOM], val[X_ATOM]);
976				val[A_ATOM] = K(s->k);
977			}
978			else {
979				s->code = BPF_ALU|BPF_K|op;
980				s->k = vmap[val[X_ATOM]].const_val;
981				done = 0;
982				val[A_ATOM] =
983					F(s->code, val[A_ATOM], K(s->k));
984			}
985			break;
986		}
987		/*
988		 * Check if we're doing something to an accumulator
989		 * that is 0, and simplify.  This may not seem like
990		 * much of a simplification but it could open up further
991		 * optimizations.
992		 * XXX We could also check for mul by 1, and -1, etc.
993		 */
994		if (alter && vmap[val[A_ATOM]].is_const
995		    && vmap[val[A_ATOM]].const_val == 0) {
996			if (op == BPF_ADD || op == BPF_OR ||
997			    op == BPF_LSH || op == BPF_RSH || op == BPF_SUB) {
998				s->code = BPF_MISC|BPF_TXA;
999				vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1000				break;
1001			}
1002			else if (op == BPF_MUL || op == BPF_DIV ||
1003				 op == BPF_AND) {
1004				s->code = BPF_LD|BPF_IMM;
1005				s->k = 0;
1006				vstore(s, &val[A_ATOM], K(s->k), alter);
1007				break;
1008			}
1009			else if (op == BPF_NEG) {
1010				s->code = NOP;
1011				break;
1012			}
1013		}
1014		val[A_ATOM] = F(s->code, val[A_ATOM], val[X_ATOM]);
1015		break;
1016
1017	case BPF_MISC|BPF_TXA:
1018		vstore(s, &val[A_ATOM], val[X_ATOM], alter);
1019		break;
1020
1021	case BPF_LD|BPF_MEM:
1022		v = val[s->k];
1023		if (alter && vmap[v].is_const) {
1024			s->code = BPF_LD|BPF_IMM;
1025			s->k = vmap[v].const_val;
1026			done = 0;
1027		}
1028		vstore(s, &val[A_ATOM], v, alter);
1029		break;
1030
1031	case BPF_MISC|BPF_TAX:
1032		vstore(s, &val[X_ATOM], val[A_ATOM], alter);
1033		break;
1034
1035	case BPF_LDX|BPF_MEM:
1036		v = val[s->k];
1037		if (alter && vmap[v].is_const) {
1038			s->code = BPF_LDX|BPF_IMM;
1039			s->k = vmap[v].const_val;
1040			done = 0;
1041		}
1042		vstore(s, &val[X_ATOM], v, alter);
1043		break;
1044
1045	case BPF_ST:
1046		vstore(s, &val[s->k], val[A_ATOM], alter);
1047		break;
1048
1049	case BPF_STX:
1050		vstore(s, &val[s->k], val[X_ATOM], alter);
1051		break;
1052	}
1053}
1054
1055static void
1056deadstmt(s, last)
1057	register struct stmt *s;
1058	register struct stmt *last[];
1059{
1060	register int atom;
1061
1062	atom = atomuse(s);
1063	if (atom >= 0) {
1064		if (atom == AX_ATOM) {
1065			last[X_ATOM] = 0;
1066			last[A_ATOM] = 0;
1067		}
1068		else
1069			last[atom] = 0;
1070	}
1071	atom = atomdef(s);
1072	if (atom >= 0) {
1073		if (last[atom]) {
1074			done = 0;
1075			last[atom]->code = NOP;
1076		}
1077		last[atom] = s;
1078	}
1079}
1080
1081static void
1082opt_deadstores(b)
1083	register struct block *b;
1084{
1085	register struct slist *s;
1086	register int atom;
1087	struct stmt *last[N_ATOMS];
1088
1089	memset((char *)last, 0, sizeof last);
1090
1091	for (s = b->stmts; s != 0; s = s->next)
1092		deadstmt(&s->s, last);
1093	deadstmt(&b->s, last);
1094
1095	for (atom = 0; atom < N_ATOMS; ++atom)
1096		if (last[atom] && !ATOMELEM(b->out_use, atom)) {
1097			last[atom]->code = NOP;
1098			done = 0;
1099		}
1100}
1101
1102static void
1103opt_blk(b, do_stmts)
1104	struct block *b;
1105	int do_stmts;
1106{
1107	struct slist *s;
1108	struct edge *p;
1109	int i;
1110	bpf_int32 aval;
1111
1112#if 0
1113	for (s = b->stmts; s && s->next; s = s->next)
1114		if (BPF_CLASS(s->s.code) == BPF_JMP) {
1115			do_stmts = 0;
1116			break;
1117		}
1118#endif
1119
1120	/*
1121	 * Initialize the atom values.
1122	 * If we have no predecessors, everything is undefined.
1123	 * Otherwise, we inherent our values from our predecessors.
1124	 * If any register has an ambiguous value (i.e. control paths are
1125	 * merging) give it the undefined value of 0.
1126	 */
1127	p = b->in_edges;
1128	if (p == 0)
1129		memset((char *)b->val, 0, sizeof(b->val));
1130	else {
1131		memcpy((char *)b->val, (char *)p->pred->val, sizeof(b->val));
1132		while ((p = p->next) != NULL) {
1133			for (i = 0; i < N_ATOMS; ++i)
1134				if (b->val[i] != p->pred->val[i])
1135					b->val[i] = 0;
1136		}
1137	}
1138	aval = b->val[A_ATOM];
1139	for (s = b->stmts; s; s = s->next)
1140		opt_stmt(&s->s, b->val, do_stmts);
1141
1142	/*
1143	 * This is a special case: if we don't use anything from this
1144	 * block, and we load the accumulator with value that is
1145	 * already there, or if this block is a return,
1146	 * eliminate all the statements.
1147	 */
1148	if (do_stmts &&
1149	    ((b->out_use == 0 && aval != 0 &&b->val[A_ATOM] == aval) ||
1150	     BPF_CLASS(b->s.code) == BPF_RET)) {
1151		if (b->stmts != 0) {
1152			b->stmts = 0;
1153			done = 0;
1154		}
1155	} else {
1156		opt_peep(b);
1157		opt_deadstores(b);
1158	}
1159	/*
1160	 * Set up values for branch optimizer.
1161	 */
1162	if (BPF_SRC(b->s.code) == BPF_K)
1163		b->oval = K(b->s.k);
1164	else
1165		b->oval = b->val[X_ATOM];
1166	b->et.code = b->s.code;
1167	b->ef.code = -b->s.code;
1168}
1169
1170/*
1171 * Return true if any register that is used on exit from 'succ', has
1172 * an exit value that is different from the corresponding exit value
1173 * from 'b'.
1174 */
1175static int
1176use_conflict(b, succ)
1177	struct block *b, *succ;
1178{
1179	int atom;
1180	atomset use = succ->out_use;
1181
1182	if (use == 0)
1183		return 0;
1184
1185	for (atom = 0; atom < N_ATOMS; ++atom)
1186		if (ATOMELEM(use, atom))
1187			if (b->val[atom] != succ->val[atom])
1188				return 1;
1189	return 0;
1190}
1191
1192static struct block *
1193fold_edge(child, ep)
1194	struct block *child;
1195	struct edge *ep;
1196{
1197	int sense;
1198	int aval0, aval1, oval0, oval1;
1199	int code = ep->code;
1200
1201	if (code < 0) {
1202		code = -code;
1203		sense = 0;
1204	} else
1205		sense = 1;
1206
1207	if (child->s.code != code)
1208		return 0;
1209
1210	aval0 = child->val[A_ATOM];
1211	oval0 = child->oval;
1212	aval1 = ep->pred->val[A_ATOM];
1213	oval1 = ep->pred->oval;
1214
1215	if (aval0 != aval1)
1216		return 0;
1217
1218	if (oval0 == oval1)
1219		/*
1220		 * The operands are identical, so the
1221		 * result is true if a true branch was
1222		 * taken to get here, otherwise false.
1223		 */
1224		return sense ? JT(child) : JF(child);
1225
1226	if (sense && code == (BPF_JMP|BPF_JEQ|BPF_K))
1227		/*
1228		 * At this point, we only know the comparison if we
1229		 * came down the true branch, and it was an equality
1230		 * comparison with a constant.  We rely on the fact that
1231		 * distinct constants have distinct value numbers.
1232		 */
1233		return JF(child);
1234
1235	return 0;
1236}
1237
1238static void
1239opt_j(ep)
1240	struct edge *ep;
1241{
1242	register int i, k;
1243	register struct block *target;
1244
1245	if (JT(ep->succ) == 0)
1246		return;
1247
1248	if (JT(ep->succ) == JF(ep->succ)) {
1249		/*
1250		 * Common branch targets can be eliminated, provided
1251		 * there is no data dependency.
1252		 */
1253		if (!use_conflict(ep->pred, ep->succ->et.succ)) {
1254			done = 0;
1255			ep->succ = JT(ep->succ);
1256		}
1257	}
1258	/*
1259	 * For each edge dominator that matches the successor of this
1260	 * edge, promote the edge successor to the its grandchild.
1261	 *
1262	 * XXX We violate the set abstraction here in favor a reasonably
1263	 * efficient loop.
1264	 */
1265 top:
1266	for (i = 0; i < edgewords; ++i) {
1267		register bpf_u_int32 x = ep->edom[i];
1268
1269		while (x != 0) {
1270			k = ffs(x) - 1;
1271			x &=~ (1 << k);
1272			k += i * BITS_PER_WORD;
1273
1274			target = fold_edge(ep->succ, edges[k]);
1275			/*
1276			 * Check that there is no data dependency between
1277			 * nodes that will be violated if we move the edge.
1278			 */
1279			if (target != 0 && !use_conflict(ep->pred, target)) {
1280				done = 0;
1281				ep->succ = target;
1282				if (JT(target) != 0)
1283					/*
1284					 * Start over unless we hit a leaf.
1285					 */
1286					goto top;
1287				return;
1288			}
1289		}
1290	}
1291}
1292
1293
1294static void
1295or_pullup(b)
1296	struct block *b;
1297{
1298	int val, at_top;
1299	struct block *pull;
1300	struct block **diffp, **samep;
1301	struct edge *ep;
1302
1303	ep = b->in_edges;
1304	if (ep == 0)
1305		return;
1306
1307	/*
1308	 * Make sure each predecessor loads the same value.
1309	 * XXX why?
1310	 */
1311	val = ep->pred->val[A_ATOM];
1312	for (ep = ep->next; ep != 0; ep = ep->next)
1313		if (val != ep->pred->val[A_ATOM])
1314			return;
1315
1316	if (JT(b->in_edges->pred) == b)
1317		diffp = &JT(b->in_edges->pred);
1318	else
1319		diffp = &JF(b->in_edges->pred);
1320
1321	at_top = 1;
1322	while (1) {
1323		if (*diffp == 0)
1324			return;
1325
1326		if (JT(*diffp) != JT(b))
1327			return;
1328
1329		if (!SET_MEMBER((*diffp)->dom, b->id))
1330			return;
1331
1332		if ((*diffp)->val[A_ATOM] != val)
1333			break;
1334
1335		diffp = &JF(*diffp);
1336		at_top = 0;
1337	}
1338	samep = &JF(*diffp);
1339	while (1) {
1340		if (*samep == 0)
1341			return;
1342
1343		if (JT(*samep) != JT(b))
1344			return;
1345
1346		if (!SET_MEMBER((*samep)->dom, b->id))
1347			return;
1348
1349		if ((*samep)->val[A_ATOM] == val)
1350			break;
1351
1352		/* XXX Need to check that there are no data dependencies
1353		   between dp0 and dp1.  Currently, the code generator
1354		   will not produce such dependencies. */
1355		samep = &JF(*samep);
1356	}
1357#ifdef notdef
1358	/* XXX This doesn't cover everything. */
1359	for (i = 0; i < N_ATOMS; ++i)
1360		if ((*samep)->val[i] != pred->val[i])
1361			return;
1362#endif
1363	/* Pull up the node. */
1364	pull = *samep;
1365	*samep = JF(pull);
1366	JF(pull) = *diffp;
1367
1368	/*
1369	 * At the top of the chain, each predecessor needs to point at the
1370	 * pulled up node.  Inside the chain, there is only one predecessor
1371	 * to worry about.
1372	 */
1373	if (at_top) {
1374		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1375			if (JT(ep->pred) == b)
1376				JT(ep->pred) = pull;
1377			else
1378				JF(ep->pred) = pull;
1379		}
1380	}
1381	else
1382		*diffp = pull;
1383
1384	done = 0;
1385}
1386
1387static void
1388and_pullup(b)
1389	struct block *b;
1390{
1391	int val, at_top;
1392	struct block *pull;
1393	struct block **diffp, **samep;
1394	struct edge *ep;
1395
1396	ep = b->in_edges;
1397	if (ep == 0)
1398		return;
1399
1400	/*
1401	 * Make sure each predecessor loads the same value.
1402	 */
1403	val = ep->pred->val[A_ATOM];
1404	for (ep = ep->next; ep != 0; ep = ep->next)
1405		if (val != ep->pred->val[A_ATOM])
1406			return;
1407
1408	if (JT(b->in_edges->pred) == b)
1409		diffp = &JT(b->in_edges->pred);
1410	else
1411		diffp = &JF(b->in_edges->pred);
1412
1413	at_top = 1;
1414	while (1) {
1415		if (*diffp == 0)
1416			return;
1417
1418		if (JF(*diffp) != JF(b))
1419			return;
1420
1421		if (!SET_MEMBER((*diffp)->dom, b->id))
1422			return;
1423
1424		if ((*diffp)->val[A_ATOM] != val)
1425			break;
1426
1427		diffp = &JT(*diffp);
1428		at_top = 0;
1429	}
1430	samep = &JT(*diffp);
1431	while (1) {
1432		if (*samep == 0)
1433			return;
1434
1435		if (JF(*samep) != JF(b))
1436			return;
1437
1438		if (!SET_MEMBER((*samep)->dom, b->id))
1439			return;
1440
1441		if ((*samep)->val[A_ATOM] == val)
1442			break;
1443
1444		/* XXX Need to check that there are no data dependencies
1445		   between diffp and samep.  Currently, the code generator
1446		   will not produce such dependencies. */
1447		samep = &JT(*samep);
1448	}
1449#ifdef notdef
1450	/* XXX This doesn't cover everything. */
1451	for (i = 0; i < N_ATOMS; ++i)
1452		if ((*samep)->val[i] != pred->val[i])
1453			return;
1454#endif
1455	/* Pull up the node. */
1456	pull = *samep;
1457	*samep = JT(pull);
1458	JT(pull) = *diffp;
1459
1460	/*
1461	 * At the top of the chain, each predecessor needs to point at the
1462	 * pulled up node.  Inside the chain, there is only one predecessor
1463	 * to worry about.
1464	 */
1465	if (at_top) {
1466		for (ep = b->in_edges; ep != 0; ep = ep->next) {
1467			if (JT(ep->pred) == b)
1468				JT(ep->pred) = pull;
1469			else
1470				JF(ep->pred) = pull;
1471		}
1472	}
1473	else
1474		*diffp = pull;
1475
1476	done = 0;
1477}
1478
1479static void
1480opt_blks(root, do_stmts)
1481	struct block *root;
1482	int do_stmts;
1483{
1484	int i, maxlevel;
1485	struct block *p;
1486
1487	init_val();
1488	maxlevel = root->level;
1489
1490	find_inedges(root);
1491	for (i = maxlevel; i >= 0; --i)
1492		for (p = levels[i]; p; p = p->link)
1493			opt_blk(p, do_stmts);
1494
1495	if (do_stmts)
1496		/*
1497		 * No point trying to move branches; it can't possibly
1498		 * make a difference at this point.
1499		 */
1500		return;
1501
1502	for (i = 1; i <= maxlevel; ++i) {
1503		for (p = levels[i]; p; p = p->link) {
1504			opt_j(&p->et);
1505			opt_j(&p->ef);
1506		}
1507	}
1508
1509	find_inedges(root);
1510	for (i = 1; i <= maxlevel; ++i) {
1511		for (p = levels[i]; p; p = p->link) {
1512			or_pullup(p);
1513			and_pullup(p);
1514		}
1515	}
1516}
1517
1518static inline void
1519link_inedge(parent, child)
1520	struct edge *parent;
1521	struct block *child;
1522{
1523	parent->next = child->in_edges;
1524	child->in_edges = parent;
1525}
1526
1527static void
1528find_inedges(root)
1529	struct block *root;
1530{
1531	int i;
1532	struct block *b;
1533
1534	for (i = 0; i < n_blocks; ++i)
1535		blocks[i]->in_edges = 0;
1536
1537	/*
1538	 * Traverse the graph, adding each edge to the predecessor
1539	 * list of its successors.  Skip the leaves (i.e. level 0).
1540	 */
1541	for (i = root->level; i > 0; --i) {
1542		for (b = levels[i]; b != 0; b = b->link) {
1543			link_inedge(&b->et, JT(b));
1544			link_inedge(&b->ef, JF(b));
1545		}
1546	}
1547}
1548
1549static void
1550opt_root(b)
1551	struct block **b;
1552{
1553	struct slist *tmp, *s;
1554
1555	s = (*b)->stmts;
1556	(*b)->stmts = 0;
1557	while (BPF_CLASS((*b)->s.code) == BPF_JMP && JT(*b) == JF(*b))
1558		*b = JT(*b);
1559
1560	tmp = (*b)->stmts;
1561	if (tmp != 0)
1562		sappend(s, tmp);
1563	(*b)->stmts = s;
1564
1565	/*
1566	 * If the root node is a return, then there is no
1567	 * point executing any statements (since the bpf machine
1568	 * has no side effects).
1569	 */
1570	if (BPF_CLASS((*b)->s.code) == BPF_RET)
1571		(*b)->stmts = 0;
1572}
1573
1574static void
1575opt_loop(root, do_stmts)
1576	struct block *root;
1577	int do_stmts;
1578{
1579
1580#ifdef BDEBUG
1581	if (dflag > 1)
1582		opt_dump(root);
1583#endif
1584	do {
1585		done = 1;
1586		find_levels(root);
1587		find_dom(root);
1588		find_closure(root);
1589		find_ud(root);
1590		find_edom(root);
1591		opt_blks(root, do_stmts);
1592#ifdef BDEBUG
1593		if (dflag > 1)
1594			opt_dump(root);
1595#endif
1596	} while (!done);
1597}
1598
1599/*
1600 * Optimize the filter code in its dag representation.
1601 */
1602void
1603bpf_optimize(rootp)
1604	struct block **rootp;
1605{
1606	struct block *root;
1607
1608	root = *rootp;
1609
1610	opt_init(root);
1611	opt_loop(root, 0);
1612	opt_loop(root, 1);
1613	intern_blocks(root);
1614	opt_root(rootp);
1615	opt_cleanup();
1616}
1617
1618static void
1619make_marks(p)
1620	struct block *p;
1621{
1622	if (!isMarked(p)) {
1623		Mark(p);
1624		if (BPF_CLASS(p->s.code) != BPF_RET) {
1625			make_marks(JT(p));
1626			make_marks(JF(p));
1627		}
1628	}
1629}
1630
1631/*
1632 * Mark code array such that isMarked(i) is true
1633 * only for nodes that are alive.
1634 */
1635static void
1636mark_code(p)
1637	struct block *p;
1638{
1639	cur_mark += 1;
1640	make_marks(p);
1641}
1642
1643/*
1644 * True iff the two stmt lists load the same value from the packet into
1645 * the accumulator.
1646 */
1647static int
1648eq_slist(x, y)
1649	struct slist *x, *y;
1650{
1651	while (1) {
1652		while (x && x->s.code == NOP)
1653			x = x->next;
1654		while (y && y->s.code == NOP)
1655			y = y->next;
1656		if (x == 0)
1657			return y == 0;
1658		if (y == 0)
1659			return x == 0;
1660		if (x->s.code != y->s.code || x->s.k != y->s.k)
1661			return 0;
1662		x = x->next;
1663		y = y->next;
1664	}
1665}
1666
1667static inline int
1668eq_blk(b0, b1)
1669	struct block *b0, *b1;
1670{
1671	if (b0->s.code == b1->s.code &&
1672	    b0->s.k == b1->s.k &&
1673	    b0->et.succ == b1->et.succ &&
1674	    b0->ef.succ == b1->ef.succ)
1675		return eq_slist(b0->stmts, b1->stmts);
1676	return 0;
1677}
1678
1679static void
1680intern_blocks(root)
1681	struct block *root;
1682{
1683	struct block *p;
1684	int i, j;
1685	int done;
1686 top:
1687	done = 1;
1688	for (i = 0; i < n_blocks; ++i)
1689		blocks[i]->link = 0;
1690
1691	mark_code(root);
1692
1693	for (i = n_blocks - 1; --i >= 0; ) {
1694		if (!isMarked(blocks[i]))
1695			continue;
1696		for (j = i + 1; j < n_blocks; ++j) {
1697			if (!isMarked(blocks[j]))
1698				continue;
1699			if (eq_blk(blocks[i], blocks[j])) {
1700				blocks[i]->link = blocks[j]->link ?
1701					blocks[j]->link : blocks[j];
1702				break;
1703			}
1704		}
1705	}
1706	for (i = 0; i < n_blocks; ++i) {
1707		p = blocks[i];
1708		if (JT(p) == 0)
1709			continue;
1710		if (JT(p)->link) {
1711			done = 0;
1712			JT(p) = JT(p)->link;
1713		}
1714		if (JF(p)->link) {
1715			done = 0;
1716			JF(p) = JF(p)->link;
1717		}
1718	}
1719	if (!done)
1720		goto top;
1721}
1722
1723static void
1724opt_cleanup()
1725{
1726	free((void *)vnode_base);
1727	free((void *)vmap);
1728	free((void *)edges);
1729	free((void *)space);
1730	free((void *)levels);
1731	free((void *)blocks);
1732}
1733
1734/*
1735 * Return the number of stmts in 's'.
1736 */
1737static int
1738slength(s)
1739	struct slist *s;
1740{
1741	int n = 0;
1742
1743	for (; s; s = s->next)
1744		if (s->s.code != NOP)
1745			++n;
1746	return n;
1747}
1748
1749/*
1750 * Return the number of nodes reachable by 'p'.
1751 * All nodes should be initially unmarked.
1752 */
1753static int
1754count_blocks(p)
1755	struct block *p;
1756{
1757	if (p == 0 || isMarked(p))
1758		return 0;
1759	Mark(p);
1760	return count_blocks(JT(p)) + count_blocks(JF(p)) + 1;
1761}
1762
1763/*
1764 * Do a depth first search on the flow graph, numbering the
1765 * the basic blocks, and entering them into the 'blocks' array.`
1766 */
1767static void
1768number_blks_r(p)
1769	struct block *p;
1770{
1771	int n;
1772
1773	if (p == 0 || isMarked(p))
1774		return;
1775
1776	Mark(p);
1777	n = n_blocks++;
1778	p->id = n;
1779	blocks[n] = p;
1780
1781	number_blks_r(JT(p));
1782	number_blks_r(JF(p));
1783}
1784
1785/*
1786 * Return the number of stmts in the flowgraph reachable by 'p'.
1787 * The nodes should be unmarked before calling.
1788 *
1789 * Note that "stmts" means "instructions", and that this includes
1790 *
1791 *	side-effect statements in 'p' (slength(p->stmts));
1792 *
1793 *	statements in the true branch from 'p' (count_stmts(JT(p)));
1794 *
1795 *	statements in the false branch from 'p' (count_stmts(JF(p)));
1796 *
1797 *	the conditional jump itself (1);
1798 *
1799 *	an extra long jump if the true branch requires it (p->longjt);
1800 *
1801 *	an extra long jump if the false branch requires it (p->longjf).
1802 */
1803static int
1804count_stmts(p)
1805	struct block *p;
1806{
1807	int n;
1808
1809	if (p == 0 || isMarked(p))
1810		return 0;
1811	Mark(p);
1812	n = count_stmts(JT(p)) + count_stmts(JF(p));
1813	return slength(p->stmts) + n + 1 + p->longjt + p->longjf;
1814}
1815
1816/*
1817 * Allocate memory.  All allocation is done before optimization
1818 * is begun.  A linear bound on the size of all data structures is computed
1819 * from the total number of blocks and/or statements.
1820 */
1821static void
1822opt_init(root)
1823	struct block *root;
1824{
1825	bpf_u_int32 *p;
1826	int i, n, max_stmts;
1827
1828	/*
1829	 * First, count the blocks, so we can malloc an array to map
1830	 * block number to block.  Then, put the blocks into the array.
1831	 */
1832	unMarkAll();
1833	n = count_blocks(root);
1834	blocks = (struct block **)malloc(n * sizeof(*blocks));
1835	unMarkAll();
1836	n_blocks = 0;
1837	number_blks_r(root);
1838
1839	n_edges = 2 * n_blocks;
1840	edges = (struct edge **)malloc(n_edges * sizeof(*edges));
1841
1842	/*
1843	 * The number of levels is bounded by the number of nodes.
1844	 */
1845	levels = (struct block **)malloc(n_blocks * sizeof(*levels));
1846
1847	edgewords = n_edges / (8 * sizeof(bpf_u_int32)) + 1;
1848	nodewords = n_blocks / (8 * sizeof(bpf_u_int32)) + 1;
1849
1850	/* XXX */
1851	space = (bpf_u_int32 *)malloc(2 * n_blocks * nodewords * sizeof(*space)
1852				 + n_edges * edgewords * sizeof(*space));
1853	p = space;
1854	all_dom_sets = p;
1855	for (i = 0; i < n; ++i) {
1856		blocks[i]->dom = p;
1857		p += nodewords;
1858	}
1859	all_closure_sets = p;
1860	for (i = 0; i < n; ++i) {
1861		blocks[i]->closure = p;
1862		p += nodewords;
1863	}
1864	all_edge_sets = p;
1865	for (i = 0; i < n; ++i) {
1866		register struct block *b = blocks[i];
1867
1868		b->et.edom = p;
1869		p += edgewords;
1870		b->ef.edom = p;
1871		p += edgewords;
1872		b->et.id = i;
1873		edges[i] = &b->et;
1874		b->ef.id = n_blocks + i;
1875		edges[n_blocks + i] = &b->ef;
1876		b->et.pred = b;
1877		b->ef.pred = b;
1878	}
1879	max_stmts = 0;
1880	for (i = 0; i < n; ++i)
1881		max_stmts += slength(blocks[i]->stmts) + 1;
1882	/*
1883	 * We allocate at most 3 value numbers per statement,
1884	 * so this is an upper bound on the number of valnodes
1885	 * we'll need.
1886	 */
1887	maxval = 3 * max_stmts;
1888	vmap = (struct vmapinfo *)malloc(maxval * sizeof(*vmap));
1889	vnode_base = (struct valnode *)malloc(maxval * sizeof(*vnode_base));
1890}
1891
1892/*
1893 * Some pointers used to convert the basic block form of the code,
1894 * into the array form that BPF requires.  'fstart' will point to
1895 * the malloc'd array while 'ftail' is used during the recursive traversal.
1896 */
1897static struct bpf_insn *fstart;
1898static struct bpf_insn *ftail;
1899
1900#ifdef BDEBUG
1901int bids[1000];
1902#endif
1903
1904/*
1905 * Returns true if successful.  Returns false if a branch has
1906 * an offset that is too large.  If so, we have marked that
1907 * branch so that on a subsequent iteration, it will be treated
1908 * properly.
1909 */
1910static int
1911convert_code_r(p)
1912	struct block *p;
1913{
1914	struct bpf_insn *dst;
1915	struct slist *src;
1916	int slen;
1917	u_int off;
1918	int extrajmps;		/* number of extra jumps inserted */
1919	struct slist **offset = NULL;
1920
1921	if (p == 0 || isMarked(p))
1922		return (1);
1923	Mark(p);
1924
1925	if (convert_code_r(JF(p)) == 0)
1926		return (0);
1927	if (convert_code_r(JT(p)) == 0)
1928		return (0);
1929
1930	slen = slength(p->stmts);
1931	dst = ftail -= (slen + 1 + p->longjt + p->longjf);
1932		/* inflate length by any extra jumps */
1933
1934	p->offset = dst - fstart;
1935
1936	/* generate offset[] for convenience  */
1937	if (slen) {
1938		offset = (struct slist **)calloc(sizeof(struct slist *), slen);
1939		if (!offset) {
1940			bpf_error("not enough core");
1941			/*NOTREACHED*/
1942		}
1943	}
1944	src = p->stmts;
1945	for (off = 0; off < slen && src; off++) {
1946#if 0
1947		printf("off=%d src=%x\n", off, src);
1948#endif
1949		offset[off] = src;
1950		src = src->next;
1951	}
1952
1953	off = 0;
1954	for (src = p->stmts; src; src = src->next) {
1955		if (src->s.code == NOP)
1956			continue;
1957		dst->code = (u_short)src->s.code;
1958		dst->k = src->s.k;
1959
1960		/* fill block-local relative jump */
1961		if (BPF_CLASS(src->s.code) != BPF_JMP || src->s.code == (BPF_JMP|BPF_JA)) {
1962#if 0
1963			if (src->s.jt || src->s.jf) {
1964				bpf_error("illegal jmp destination");
1965				/*NOTREACHED*/
1966			}
1967#endif
1968			goto filled;
1969		}
1970		if (off == slen - 2)	/*???*/
1971			goto filled;
1972
1973	    {
1974		int i;
1975		int jt, jf;
1976		char *ljerr = "%s for block-local relative jump: off=%d";
1977
1978#if 0
1979		printf("code=%x off=%d %x %x\n", src->s.code,
1980			off, src->s.jt, src->s.jf);
1981#endif
1982
1983		if (!src->s.jt || !src->s.jf) {
1984			bpf_error(ljerr, "no jmp destination", off);
1985			/*NOTREACHED*/
1986		}
1987
1988		jt = jf = 0;
1989		for (i = 0; i < slen; i++) {
1990			if (offset[i] == src->s.jt) {
1991				if (jt) {
1992					bpf_error(ljerr, "multiple matches", off);
1993					/*NOTREACHED*/
1994				}
1995
1996				dst->jt = i - off - 1;
1997				jt++;
1998			}
1999			if (offset[i] == src->s.jf) {
2000				if (jf) {
2001					bpf_error(ljerr, "multiple matches", off);
2002					/*NOTREACHED*/
2003				}
2004				dst->jf = i - off - 1;
2005				jf++;
2006			}
2007		}
2008		if (!jt || !jf) {
2009			bpf_error(ljerr, "no destination found", off);
2010			/*NOTREACHED*/
2011		}
2012	    }
2013filled:
2014		++dst;
2015		++off;
2016	}
2017	if (offset)
2018		free(offset);
2019
2020#ifdef BDEBUG
2021	bids[dst - fstart] = p->id + 1;
2022#endif
2023	dst->code = (u_short)p->s.code;
2024	dst->k = p->s.k;
2025	if (JT(p)) {
2026		extrajmps = 0;
2027		off = JT(p)->offset - (p->offset + slen) - 1;
2028		if (off >= 256) {
2029		    /* offset too large for branch, must add a jump */
2030		    if (p->longjt == 0) {
2031		    	/* mark this instruction and retry */
2032			p->longjt++;
2033			return(0);
2034		    }
2035		    /* branch if T to following jump */
2036		    dst->jt = extrajmps;
2037		    extrajmps++;
2038		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2039		    dst[extrajmps].k = off - extrajmps;
2040		}
2041		else
2042		    dst->jt = off;
2043		off = JF(p)->offset - (p->offset + slen) - 1;
2044		if (off >= 256) {
2045		    /* offset too large for branch, must add a jump */
2046		    if (p->longjf == 0) {
2047		    	/* mark this instruction and retry */
2048			p->longjf++;
2049			return(0);
2050		    }
2051		    /* branch if F to following jump */
2052		    /* if two jumps are inserted, F goes to second one */
2053		    dst->jf = extrajmps;
2054		    extrajmps++;
2055		    dst[extrajmps].code = BPF_JMP|BPF_JA;
2056		    dst[extrajmps].k = off - extrajmps;
2057		}
2058		else
2059		    dst->jf = off;
2060	}
2061	return (1);
2062}
2063
2064
2065/*
2066 * Convert flowgraph intermediate representation to the
2067 * BPF array representation.  Set *lenp to the number of instructions.
2068 */
2069struct bpf_insn *
2070icode_to_fcode(root, lenp)
2071	struct block *root;
2072	int *lenp;
2073{
2074	int n;
2075	struct bpf_insn *fp;
2076
2077	/*
2078	 * Loop doing convert_codr_r() until no branches remain
2079	 * with too-large offsets.
2080	 */
2081	while (1) {
2082	    unMarkAll();
2083	    n = *lenp = count_stmts(root);
2084
2085	    fp = (struct bpf_insn *)malloc(sizeof(*fp) * n);
2086	    memset((char *)fp, 0, sizeof(*fp) * n);
2087	    fstart = fp;
2088	    ftail = fp + n;
2089
2090	    unMarkAll();
2091	    if (convert_code_r(root))
2092		break;
2093	    free(fp);
2094	}
2095
2096	return fp;
2097}
2098
2099/*
2100 * Make a copy of a BPF program and put it in the "fcode" member of
2101 * a "pcap_t".
2102 *
2103 * If we fail to allocate memory for the copy, fill in the "errbuf"
2104 * member of the "pcap_t" with an error message, and return -1;
2105 * otherwise, return 0.
2106 */
2107int
2108install_bpf_program(pcap_t *p, struct bpf_program *fp)
2109{
2110	size_t prog_size;
2111
2112	/*
2113	 * Free up any already installed program.
2114	 */
2115	pcap_freecode(&p->fcode);
2116
2117	prog_size = sizeof(*fp->bf_insns) * fp->bf_len;
2118	p->fcode.bf_len = fp->bf_len;
2119	p->fcode.bf_insns = (struct bpf_insn *)malloc(prog_size);
2120	if (p->fcode.bf_insns == NULL) {
2121		snprintf(p->errbuf, sizeof(p->errbuf),
2122			 "malloc: %s", pcap_strerror(errno));
2123		return (-1);
2124	}
2125	memcpy(p->fcode.bf_insns, fp->bf_insns, prog_size);
2126	return (0);
2127}
2128
2129#ifdef BDEBUG
2130static void
2131opt_dump(root)
2132	struct block *root;
2133{
2134	struct bpf_program f;
2135
2136	memset(bids, 0, sizeof bids);
2137	f.bf_insns = icode_to_fcode(root, &f.bf_len);
2138	bpf_dump(&f, 1);
2139	putchar('\n');
2140	free((char *)f.bf_insns);
2141}
2142#endif
2143