simplify-rtx.c revision 117395
1/* RTL simplification functions for GNU compiler.
2   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA.  */
21
22
23#include "config.h"
24#include "system.h"
25#include "rtl.h"
26#include "tree.h"
27#include "tm_p.h"
28#include "regs.h"
29#include "hard-reg-set.h"
30#include "flags.h"
31#include "real.h"
32#include "insn-config.h"
33#include "recog.h"
34#include "function.h"
35#include "expr.h"
36#include "toplev.h"
37#include "output.h"
38#include "ggc.h"
39
40/* Simplification and canonicalization of RTL.  */
41
42/* Nonzero if X has the form (PLUS frame-pointer integer).  We check for
43   virtual regs here because the simplify_*_operation routines are called
44   by integrate.c, which is called before virtual register instantiation.
45
46   ?!? NONZERO_BASE_PLUS_P needs to move into
47   a header file so that their definitions can be shared with the
48   simplification routines in simplify-rtx.c.  Until then, do not
49   change this macro without also changing the copy in simplify-rtx.c.  */
50
51/* Allows reference to the stack pointer.
52
53   This used to include FIXED_BASE_PLUS_P, however, we can't assume that
54   arg_pointer_rtx by itself is nonzero, because on at least one machine,
55   the i960, the arg pointer is zero when it is unused.  */
56
57#define NONZERO_BASE_PLUS_P(X)					\
58  ((X) == frame_pointer_rtx || (X) == hard_frame_pointer_rtx	\
59   || (X) == virtual_stack_vars_rtx				\
60   || (X) == virtual_incoming_args_rtx				\
61   || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
62       && (XEXP (X, 0) == frame_pointer_rtx			\
63	   || XEXP (X, 0) == hard_frame_pointer_rtx		\
64	   || ((X) == arg_pointer_rtx				\
65	       && fixed_regs[ARG_POINTER_REGNUM])		\
66	   || XEXP (X, 0) == virtual_stack_vars_rtx		\
67	   || XEXP (X, 0) == virtual_incoming_args_rtx))	\
68   || (X) == stack_pointer_rtx					\
69   || (X) == virtual_stack_dynamic_rtx				\
70   || (X) == virtual_outgoing_args_rtx				\
71   || (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == CONST_INT \
72       && (XEXP (X, 0) == stack_pointer_rtx			\
73	   || XEXP (X, 0) == virtual_stack_dynamic_rtx		\
74	   || XEXP (X, 0) == virtual_outgoing_args_rtx))	\
75   || GET_CODE (X) == ADDRESSOF)
76
77/* Much code operates on (low, high) pairs; the low value is an
78   unsigned wide int, the high value a signed wide int.  We
79   occasionally need to sign extend from low to high as if low were a
80   signed wide int.  */
81#define HWI_SIGN_EXTEND(low) \
82 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
83
84static rtx neg_const_int PARAMS ((enum machine_mode, rtx));
85static int simplify_plus_minus_op_data_cmp PARAMS ((const void *,
86						    const void *));
87static rtx simplify_plus_minus		PARAMS ((enum rtx_code,
88						 enum machine_mode, rtx,
89						 rtx, int));
90
91/* Negate a CONST_INT rtx, truncating (because a conversion from a
92   maximally negative number can overflow).  */
93static rtx
94neg_const_int (mode, i)
95     enum machine_mode mode;
96     rtx i;
97{
98  return gen_int_mode (- INTVAL (i), mode);
99}
100
101
102/* Make a binary operation by properly ordering the operands and
103   seeing if the expression folds.  */
104
105rtx
106simplify_gen_binary (code, mode, op0, op1)
107     enum rtx_code code;
108     enum machine_mode mode;
109     rtx op0, op1;
110{
111  rtx tem;
112
113  /* Put complex operands first and constants second if commutative.  */
114  if (GET_RTX_CLASS (code) == 'c'
115      && swap_commutative_operands_p (op0, op1))
116    tem = op0, op0 = op1, op1 = tem;
117
118  /* If this simplifies, do it.  */
119  tem = simplify_binary_operation (code, mode, op0, op1);
120  if (tem)
121    return tem;
122
123  /* Handle addition and subtraction specially.  Otherwise, just form
124     the operation.  */
125
126  if (code == PLUS || code == MINUS)
127    {
128      tem = simplify_plus_minus (code, mode, op0, op1, 1);
129      if (tem)
130	return tem;
131    }
132
133  return gen_rtx_fmt_ee (code, mode, op0, op1);
134}
135
136/* If X is a MEM referencing the constant pool, return the real value.
137   Otherwise return X.  */
138rtx
139avoid_constant_pool_reference (x)
140     rtx x;
141{
142  rtx c, addr;
143  enum machine_mode cmode;
144
145  if (GET_CODE (x) != MEM)
146    return x;
147  addr = XEXP (x, 0);
148
149  if (GET_CODE (addr) == LO_SUM)
150    addr = XEXP (addr, 1);
151
152  if (GET_CODE (addr) != SYMBOL_REF
153      || ! CONSTANT_POOL_ADDRESS_P (addr))
154    return x;
155
156  c = get_pool_constant (addr);
157  cmode = get_pool_mode (addr);
158
159  /* If we're accessing the constant in a different mode than it was
160     originally stored, attempt to fix that up via subreg simplifications.
161     If that fails we have no choice but to return the original memory.  */
162  if (cmode != GET_MODE (x))
163    {
164      c = simplify_subreg (GET_MODE (x), c, cmode, 0);
165      return c ? c : x;
166    }
167
168  return c;
169}
170
171/* Make a unary operation by first seeing if it folds and otherwise making
172   the specified operation.  */
173
174rtx
175simplify_gen_unary (code, mode, op, op_mode)
176     enum rtx_code code;
177     enum machine_mode mode;
178     rtx op;
179     enum machine_mode op_mode;
180{
181  rtx tem;
182
183  /* If this simplifies, use it.  */
184  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
185    return tem;
186
187  return gen_rtx_fmt_e (code, mode, op);
188}
189
190/* Likewise for ternary operations.  */
191
192rtx
193simplify_gen_ternary (code, mode, op0_mode, op0, op1, op2)
194     enum rtx_code code;
195     enum machine_mode mode, op0_mode;
196     rtx op0, op1, op2;
197{
198  rtx tem;
199
200  /* If this simplifies, use it.  */
201  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
202					      op0, op1, op2)))
203    return tem;
204
205  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
206}
207
208/* Likewise, for relational operations.
209   CMP_MODE specifies mode comparison is done in.
210  */
211
212rtx
213simplify_gen_relational (code, mode, cmp_mode, op0, op1)
214     enum rtx_code code;
215     enum machine_mode mode;
216     enum machine_mode cmp_mode;
217     rtx op0, op1;
218{
219  rtx tem;
220
221  if ((tem = simplify_relational_operation (code, cmp_mode, op0, op1)) != 0)
222    return tem;
223
224  /* For the following tests, ensure const0_rtx is op1.  */
225  if (op0 == const0_rtx && swap_commutative_operands_p (op0, op1))
226    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
227
228  /* If op0 is a compare, extract the comparison arguments from it.  */
229  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
230    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
231
232  /* If op0 is a comparison, extract the comparison arguments form it.  */
233  if (code == NE && op1 == const0_rtx
234      && GET_RTX_CLASS (GET_CODE (op0)) == '<')
235    return op0;
236  else if (code == EQ && op1 == const0_rtx)
237    {
238      /* The following tests GET_RTX_CLASS (GET_CODE (op0)) == '<'.  */
239      enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
240      if (new != UNKNOWN)
241        {
242	  code = new;
243	  mode = cmp_mode;
244	  op1 = XEXP (op0, 1);
245	  op0 = XEXP (op0, 0);
246        }
247    }
248
249  /* Put complex operands first and constants second.  */
250  if (swap_commutative_operands_p (op0, op1))
251    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
252
253  return gen_rtx_fmt_ee (code, mode, op0, op1);
254}
255
256/* Replace all occurrences of OLD in X with NEW and try to simplify the
257   resulting RTX.  Return a new RTX which is as simplified as possible.  */
258
259rtx
260simplify_replace_rtx (x, old, new)
261     rtx x;
262     rtx old;
263     rtx new;
264{
265  enum rtx_code code = GET_CODE (x);
266  enum machine_mode mode = GET_MODE (x);
267
268  /* If X is OLD, return NEW.  Otherwise, if this is an expression, try
269     to build a new expression substituting recursively.  If we can't do
270     anything, return our input.  */
271
272  if (x == old)
273    return new;
274
275  switch (GET_RTX_CLASS (code))
276    {
277    case '1':
278      {
279	enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
280	rtx op = (XEXP (x, 0) == old
281		  ? new : simplify_replace_rtx (XEXP (x, 0), old, new));
282
283	return simplify_gen_unary (code, mode, op, op_mode);
284      }
285
286    case '2':
287    case 'c':
288      return
289	simplify_gen_binary (code, mode,
290			     simplify_replace_rtx (XEXP (x, 0), old, new),
291			     simplify_replace_rtx (XEXP (x, 1), old, new));
292    case '<':
293      {
294	enum machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
295				     ? GET_MODE (XEXP (x, 0))
296				     : GET_MODE (XEXP (x, 1)));
297	rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
298	rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
299
300	return
301	  simplify_gen_relational (code, mode,
302				   (op_mode != VOIDmode
303				    ? op_mode
304				    : GET_MODE (op0) != VOIDmode
305				    ? GET_MODE (op0)
306				    : GET_MODE (op1)),
307				   op0, op1);
308      }
309
310    case '3':
311    case 'b':
312      {
313	enum machine_mode op_mode = GET_MODE (XEXP (x, 0));
314	rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
315
316	return
317	  simplify_gen_ternary (code, mode,
318				(op_mode != VOIDmode
319				 ? op_mode
320				 : GET_MODE (op0)),
321				op0,
322				simplify_replace_rtx (XEXP (x, 1), old, new),
323				simplify_replace_rtx (XEXP (x, 2), old, new));
324      }
325
326    case 'x':
327      /* The only case we try to handle is a SUBREG.  */
328      if (code == SUBREG)
329	{
330	  rtx exp;
331	  exp = simplify_gen_subreg (GET_MODE (x),
332				     simplify_replace_rtx (SUBREG_REG (x),
333				     			   old, new),
334				     GET_MODE (SUBREG_REG (x)),
335				     SUBREG_BYTE (x));
336	  if (exp)
337	   x = exp;
338	}
339      return x;
340
341    case 'o':
342      if (code == MEM)
343	return replace_equiv_address_nv (x,
344					 simplify_replace_rtx (XEXP (x, 0),
345							       old, new));
346      else if (code == LO_SUM)
347	{
348	  rtx op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
349	  rtx op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
350
351	  /* (lo_sum (high x) x) -> x  */
352	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
353	    return op1;
354
355	  return gen_rtx_LO_SUM (mode, op0, op1);
356	}
357      else if (code == REG)
358	{
359	  if (REG_P (old) && REGNO (x) == REGNO (old))
360	    return new;
361	}
362
363      return x;
364
365    default:
366      return x;
367    }
368  return x;
369}
370
371/* Try to simplify a unary operation CODE whose output mode is to be
372   MODE with input operand OP whose mode was originally OP_MODE.
373   Return zero if no simplification can be made.  */
374rtx
375simplify_unary_operation (code, mode, op, op_mode)
376     enum rtx_code code;
377     enum machine_mode mode;
378     rtx op;
379     enum machine_mode op_mode;
380{
381  unsigned int width = GET_MODE_BITSIZE (mode);
382  rtx trueop = avoid_constant_pool_reference (op);
383
384  if (code == VEC_DUPLICATE)
385    {
386      if (!VECTOR_MODE_P (mode))
387	abort ();
388      if (GET_MODE (trueop) != VOIDmode
389	  && !VECTOR_MODE_P (GET_MODE (trueop))
390	  && GET_MODE_INNER (mode) != GET_MODE (trueop))
391	abort ();
392      if (GET_MODE (trueop) != VOIDmode
393	  && VECTOR_MODE_P (GET_MODE (trueop))
394	  && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
395	abort ();
396      if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
397	  || GET_CODE (trueop) == CONST_VECTOR)
398	{
399          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
400          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
401	  rtvec v = rtvec_alloc (n_elts);
402	  unsigned int i;
403
404	  if (GET_CODE (trueop) != CONST_VECTOR)
405	    for (i = 0; i < n_elts; i++)
406	      RTVEC_ELT (v, i) = trueop;
407	  else
408	    {
409	      enum machine_mode inmode = GET_MODE (trueop);
410              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
411              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
412
413	      if (in_n_elts >= n_elts || n_elts % in_n_elts)
414		abort ();
415	      for (i = 0; i < n_elts; i++)
416	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
417	    }
418	  return gen_rtx_CONST_VECTOR (mode, v);
419	}
420    }
421
422  /* The order of these tests is critical so that, for example, we don't
423     check the wrong mode (input vs. output) for a conversion operation,
424     such as FIX.  At some point, this should be simplified.  */
425
426  if (code == FLOAT && GET_MODE (trueop) == VOIDmode
427      && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
428    {
429      HOST_WIDE_INT hv, lv;
430      REAL_VALUE_TYPE d;
431
432      if (GET_CODE (trueop) == CONST_INT)
433	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
434      else
435	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
436
437      REAL_VALUE_FROM_INT (d, lv, hv, mode);
438      d = real_value_truncate (mode, d);
439      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
440    }
441  else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
442	   && (GET_CODE (trueop) == CONST_DOUBLE
443	       || GET_CODE (trueop) == CONST_INT))
444    {
445      HOST_WIDE_INT hv, lv;
446      REAL_VALUE_TYPE d;
447
448      if (GET_CODE (trueop) == CONST_INT)
449	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
450      else
451	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
452
453      if (op_mode == VOIDmode)
454	{
455	  /* We don't know how to interpret negative-looking numbers in
456	     this case, so don't try to fold those.  */
457	  if (hv < 0)
458	    return 0;
459	}
460      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
461	;
462      else
463	hv = 0, lv &= GET_MODE_MASK (op_mode);
464
465      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
466      d = real_value_truncate (mode, d);
467      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
468    }
469
470  if (GET_CODE (trueop) == CONST_INT
471      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
472    {
473      HOST_WIDE_INT arg0 = INTVAL (trueop);
474      HOST_WIDE_INT val;
475
476      switch (code)
477	{
478	case NOT:
479	  val = ~ arg0;
480	  break;
481
482	case NEG:
483	  val = - arg0;
484	  break;
485
486	case ABS:
487	  val = (arg0 >= 0 ? arg0 : - arg0);
488	  break;
489
490	case FFS:
491	  /* Don't use ffs here.  Instead, get low order bit and then its
492	     number.  If arg0 is zero, this will return 0, as desired.  */
493	  arg0 &= GET_MODE_MASK (mode);
494	  val = exact_log2 (arg0 & (- arg0)) + 1;
495	  break;
496
497	case TRUNCATE:
498	  val = arg0;
499	  break;
500
501	case ZERO_EXTEND:
502	  /* When zero-extending a CONST_INT, we need to know its
503             original mode.  */
504	  if (op_mode == VOIDmode)
505	    abort ();
506	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
507	    {
508	      /* If we were really extending the mode,
509		 we would have to distinguish between zero-extension
510		 and sign-extension.  */
511	      if (width != GET_MODE_BITSIZE (op_mode))
512		abort ();
513	      val = arg0;
514	    }
515	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
516	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
517	  else
518	    return 0;
519	  break;
520
521	case SIGN_EXTEND:
522	  if (op_mode == VOIDmode)
523	    op_mode = mode;
524	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
525	    {
526	      /* If we were really extending the mode,
527		 we would have to distinguish between zero-extension
528		 and sign-extension.  */
529	      if (width != GET_MODE_BITSIZE (op_mode))
530		abort ();
531	      val = arg0;
532	    }
533	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
534	    {
535	      val
536		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
537	      if (val
538		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
539		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
540	    }
541	  else
542	    return 0;
543	  break;
544
545	case SQRT:
546	case FLOAT_EXTEND:
547	case FLOAT_TRUNCATE:
548	case SS_TRUNCATE:
549	case US_TRUNCATE:
550	  return 0;
551
552	default:
553	  abort ();
554	}
555
556      val = trunc_int_for_mode (val, mode);
557
558      return GEN_INT (val);
559    }
560
561  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
562     for a DImode operation on a CONST_INT.  */
563  else if (GET_MODE (trueop) == VOIDmode
564	   && width <= HOST_BITS_PER_WIDE_INT * 2
565	   && (GET_CODE (trueop) == CONST_DOUBLE
566	       || GET_CODE (trueop) == CONST_INT))
567    {
568      unsigned HOST_WIDE_INT l1, lv;
569      HOST_WIDE_INT h1, hv;
570
571      if (GET_CODE (trueop) == CONST_DOUBLE)
572	l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
573      else
574	l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
575
576      switch (code)
577	{
578	case NOT:
579	  lv = ~ l1;
580	  hv = ~ h1;
581	  break;
582
583	case NEG:
584	  neg_double (l1, h1, &lv, &hv);
585	  break;
586
587	case ABS:
588	  if (h1 < 0)
589	    neg_double (l1, h1, &lv, &hv);
590	  else
591	    lv = l1, hv = h1;
592	  break;
593
594	case FFS:
595	  hv = 0;
596	  if (l1 == 0)
597	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & (-h1)) + 1;
598	  else
599	    lv = exact_log2 (l1 & (-l1)) + 1;
600	  break;
601
602	case TRUNCATE:
603	  /* This is just a change-of-mode, so do nothing.  */
604	  lv = l1, hv = h1;
605	  break;
606
607	case ZERO_EXTEND:
608	  if (op_mode == VOIDmode)
609	    abort ();
610
611	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
612	    return 0;
613
614	  hv = 0;
615	  lv = l1 & GET_MODE_MASK (op_mode);
616	  break;
617
618	case SIGN_EXTEND:
619	  if (op_mode == VOIDmode
620	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
621	    return 0;
622	  else
623	    {
624	      lv = l1 & GET_MODE_MASK (op_mode);
625	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
626		  && (lv & ((HOST_WIDE_INT) 1
627			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
628		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
629
630	      hv = HWI_SIGN_EXTEND (lv);
631	    }
632	  break;
633
634	case SQRT:
635	  return 0;
636
637	default:
638	  return 0;
639	}
640
641      return immed_double_const (lv, hv, mode);
642    }
643
644  else if (GET_CODE (trueop) == CONST_DOUBLE
645	   && GET_MODE_CLASS (mode) == MODE_FLOAT)
646    {
647      REAL_VALUE_TYPE d;
648      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
649
650      switch (code)
651	{
652	case SQRT:
653	  /* We don't attempt to optimize this.  */
654	  return 0;
655
656	case ABS:
657	  d = REAL_VALUE_ABS (d);
658	  break;
659	case NEG:
660	  d = REAL_VALUE_NEGATE (d);
661	  break;
662	case FLOAT_TRUNCATE:
663	  d = real_value_truncate (mode, d);
664	  break;
665	case FLOAT_EXTEND:
666	  /* All this does is change the mode.  */
667	  break;
668	case FIX:
669	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
670	  break;
671
672	default:
673	  abort ();
674	}
675      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
676    }
677
678  else if (GET_CODE (trueop) == CONST_DOUBLE
679	   && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
680	   && GET_MODE_CLASS (mode) == MODE_INT
681	   && width <= HOST_BITS_PER_WIDE_INT && width > 0)
682    {
683      HOST_WIDE_INT i;
684      REAL_VALUE_TYPE d;
685      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
686      switch (code)
687	{
688	case FIX:		i = REAL_VALUE_FIX (d);		  break;
689	case UNSIGNED_FIX:	i = REAL_VALUE_UNSIGNED_FIX (d);  break;
690	default:
691	  abort ();
692	}
693      return gen_int_mode (i, mode);
694    }
695
696  /* This was formerly used only for non-IEEE float.
697     eggert@twinsun.com says it is safe for IEEE also.  */
698  else
699    {
700      enum rtx_code reversed;
701      /* There are some simplifications we can do even if the operands
702	 aren't constant.  */
703      switch (code)
704	{
705	case NOT:
706	  /* (not (not X)) == X.  */
707	  if (GET_CODE (op) == NOT)
708	    return XEXP (op, 0);
709
710	  /* (not (eq X Y)) == (ne X Y), etc.  */
711	  if (mode == BImode && GET_RTX_CLASS (GET_CODE (op)) == '<'
712	      && ((reversed = reversed_comparison_code (op, NULL_RTX))
713		  != UNKNOWN))
714	    return gen_rtx_fmt_ee (reversed,
715				   op_mode, XEXP (op, 0), XEXP (op, 1));
716	  break;
717
718	case NEG:
719	  /* (neg (neg X)) == X.  */
720	  if (GET_CODE (op) == NEG)
721	    return XEXP (op, 0);
722	  break;
723
724	case SIGN_EXTEND:
725	  /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
726	     becomes just the MINUS if its mode is MODE.  This allows
727	     folding switch statements on machines using casesi (such as
728	     the VAX).  */
729	  if (GET_CODE (op) == TRUNCATE
730	      && GET_MODE (XEXP (op, 0)) == mode
731	      && GET_CODE (XEXP (op, 0)) == MINUS
732	      && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
733	      && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
734	    return XEXP (op, 0);
735
736#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
737	  if (! POINTERS_EXTEND_UNSIGNED
738	      && mode == Pmode && GET_MODE (op) == ptr_mode
739	      && (CONSTANT_P (op)
740		  || (GET_CODE (op) == SUBREG
741		      && GET_CODE (SUBREG_REG (op)) == REG
742		      && REG_POINTER (SUBREG_REG (op))
743		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
744	    return convert_memory_address (Pmode, op);
745#endif
746	  break;
747
748#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
749	case ZERO_EXTEND:
750	  if (POINTERS_EXTEND_UNSIGNED > 0
751	      && mode == Pmode && GET_MODE (op) == ptr_mode
752	      && (CONSTANT_P (op)
753		  || (GET_CODE (op) == SUBREG
754		      && GET_CODE (SUBREG_REG (op)) == REG
755		      && REG_POINTER (SUBREG_REG (op))
756		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
757	    return convert_memory_address (Pmode, op);
758	  break;
759#endif
760
761	default:
762	  break;
763	}
764
765      return 0;
766    }
767}
768
769/* Simplify a binary operation CODE with result mode MODE, operating on OP0
770   and OP1.  Return 0 if no simplification is possible.
771
772   Don't use this for relational operations such as EQ or LT.
773   Use simplify_relational_operation instead.  */
774rtx
775simplify_binary_operation (code, mode, op0, op1)
776     enum rtx_code code;
777     enum machine_mode mode;
778     rtx op0, op1;
779{
780  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
781  HOST_WIDE_INT val;
782  unsigned int width = GET_MODE_BITSIZE (mode);
783  rtx tem;
784  rtx trueop0 = avoid_constant_pool_reference (op0);
785  rtx trueop1 = avoid_constant_pool_reference (op1);
786
787  /* Relational operations don't work here.  We must know the mode
788     of the operands in order to do the comparison correctly.
789     Assuming a full word can give incorrect results.
790     Consider comparing 128 with -128 in QImode.  */
791
792  if (GET_RTX_CLASS (code) == '<')
793    abort ();
794
795  /* Make sure the constant is second.  */
796  if (GET_RTX_CLASS (code) == 'c'
797      && swap_commutative_operands_p (trueop0, trueop1))
798    {
799      tem = op0, op0 = op1, op1 = tem;
800      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
801    }
802
803  if (GET_MODE_CLASS (mode) == MODE_FLOAT
804      && GET_CODE (trueop0) == CONST_DOUBLE
805      && GET_CODE (trueop1) == CONST_DOUBLE
806      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
807    {
808      REAL_VALUE_TYPE f0, f1, value;
809
810      REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
811      REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
812      f0 = real_value_truncate (mode, f0);
813      f1 = real_value_truncate (mode, f1);
814
815      if (code == DIV
816	  && !MODE_HAS_INFINITIES (mode)
817	  && REAL_VALUES_EQUAL (f1, dconst0))
818	return 0;
819
820      REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
821
822      value = real_value_truncate (mode, value);
823      return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
824    }
825
826  /* We can fold some multi-word operations.  */
827  if (GET_MODE_CLASS (mode) == MODE_INT
828      && width == HOST_BITS_PER_WIDE_INT * 2
829      && (GET_CODE (trueop0) == CONST_DOUBLE
830	  || GET_CODE (trueop0) == CONST_INT)
831      && (GET_CODE (trueop1) == CONST_DOUBLE
832	  || GET_CODE (trueop1) == CONST_INT))
833    {
834      unsigned HOST_WIDE_INT l1, l2, lv;
835      HOST_WIDE_INT h1, h2, hv;
836
837      if (GET_CODE (trueop0) == CONST_DOUBLE)
838	l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
839      else
840	l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
841
842      if (GET_CODE (trueop1) == CONST_DOUBLE)
843	l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
844      else
845	l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
846
847      switch (code)
848	{
849	case MINUS:
850	  /* A - B == A + (-B).  */
851	  neg_double (l2, h2, &lv, &hv);
852	  l2 = lv, h2 = hv;
853
854	  /* .. fall through ...  */
855
856	case PLUS:
857	  add_double (l1, h1, l2, h2, &lv, &hv);
858	  break;
859
860	case MULT:
861	  mul_double (l1, h1, l2, h2, &lv, &hv);
862	  break;
863
864	case DIV:  case MOD:   case UDIV:  case UMOD:
865	  /* We'd need to include tree.h to do this and it doesn't seem worth
866	     it.  */
867	  return 0;
868
869	case AND:
870	  lv = l1 & l2, hv = h1 & h2;
871	  break;
872
873	case IOR:
874	  lv = l1 | l2, hv = h1 | h2;
875	  break;
876
877	case XOR:
878	  lv = l1 ^ l2, hv = h1 ^ h2;
879	  break;
880
881	case SMIN:
882	  if (h1 < h2
883	      || (h1 == h2
884		  && ((unsigned HOST_WIDE_INT) l1
885		      < (unsigned HOST_WIDE_INT) l2)))
886	    lv = l1, hv = h1;
887	  else
888	    lv = l2, hv = h2;
889	  break;
890
891	case SMAX:
892	  if (h1 > h2
893	      || (h1 == h2
894		  && ((unsigned HOST_WIDE_INT) l1
895		      > (unsigned HOST_WIDE_INT) l2)))
896	    lv = l1, hv = h1;
897	  else
898	    lv = l2, hv = h2;
899	  break;
900
901	case UMIN:
902	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
903	      || (h1 == h2
904		  && ((unsigned HOST_WIDE_INT) l1
905		      < (unsigned HOST_WIDE_INT) l2)))
906	    lv = l1, hv = h1;
907	  else
908	    lv = l2, hv = h2;
909	  break;
910
911	case UMAX:
912	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
913	      || (h1 == h2
914		  && ((unsigned HOST_WIDE_INT) l1
915		      > (unsigned HOST_WIDE_INT) l2)))
916	    lv = l1, hv = h1;
917	  else
918	    lv = l2, hv = h2;
919	  break;
920
921	case LSHIFTRT:   case ASHIFTRT:
922	case ASHIFT:
923	case ROTATE:     case ROTATERT:
924#ifdef SHIFT_COUNT_TRUNCATED
925	  if (SHIFT_COUNT_TRUNCATED)
926	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
927#endif
928
929	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
930	    return 0;
931
932	  if (code == LSHIFTRT || code == ASHIFTRT)
933	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
934			   code == ASHIFTRT);
935	  else if (code == ASHIFT)
936	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
937	  else if (code == ROTATE)
938	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
939	  else /* code == ROTATERT */
940	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
941	  break;
942
943	default:
944	  return 0;
945	}
946
947      return immed_double_const (lv, hv, mode);
948    }
949
950  if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
951      || width > HOST_BITS_PER_WIDE_INT || width == 0)
952    {
953      /* Even if we can't compute a constant result,
954	 there are some cases worth simplifying.  */
955
956      switch (code)
957	{
958	case PLUS:
959	  /* Maybe simplify x + 0 to x.  The two expressions are equivalent
960	     when x is NaN, infinite, or finite and nonzero.  They aren't
961	     when x is -0 and the rounding mode is not towards -infinity,
962	     since (-0) + 0 is then 0.  */
963	  if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
964	    return op0;
965
966	  /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
967	     transformations are safe even for IEEE.  */
968	  if (GET_CODE (op0) == NEG)
969	    return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
970	  else if (GET_CODE (op1) == NEG)
971	    return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
972
973	  /* (~a) + 1 -> -a */
974	  if (INTEGRAL_MODE_P (mode)
975	      && GET_CODE (op0) == NOT
976	      && trueop1 == const1_rtx)
977	    return gen_rtx_NEG (mode, XEXP (op0, 0));
978
979	  /* Handle both-operands-constant cases.  We can only add
980	     CONST_INTs to constants since the sum of relocatable symbols
981	     can't be handled by most assemblers.  Don't add CONST_INT
982	     to CONST_INT since overflow won't be computed properly if wider
983	     than HOST_BITS_PER_WIDE_INT.  */
984
985	  if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
986	      && GET_CODE (op1) == CONST_INT)
987	    return plus_constant (op0, INTVAL (op1));
988	  else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
989		   && GET_CODE (op0) == CONST_INT)
990	    return plus_constant (op1, INTVAL (op0));
991
992	  /* See if this is something like X * C - X or vice versa or
993	     if the multiplication is written as a shift.  If so, we can
994	     distribute and make a new multiply, shift, or maybe just
995	     have X (if C is 2 in the example above).  But don't make
996	     real multiply if we didn't have one before.  */
997
998	  if (! FLOAT_MODE_P (mode))
999	    {
1000	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1001	      rtx lhs = op0, rhs = op1;
1002	      int had_mult = 0;
1003
1004	      if (GET_CODE (lhs) == NEG)
1005		coeff0 = -1, lhs = XEXP (lhs, 0);
1006	      else if (GET_CODE (lhs) == MULT
1007		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1008		{
1009		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1010		  had_mult = 1;
1011		}
1012	      else if (GET_CODE (lhs) == ASHIFT
1013		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1014		       && INTVAL (XEXP (lhs, 1)) >= 0
1015		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1016		{
1017		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1018		  lhs = XEXP (lhs, 0);
1019		}
1020
1021	      if (GET_CODE (rhs) == NEG)
1022		coeff1 = -1, rhs = XEXP (rhs, 0);
1023	      else if (GET_CODE (rhs) == MULT
1024		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1025		{
1026		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1027		  had_mult = 1;
1028		}
1029	      else if (GET_CODE (rhs) == ASHIFT
1030		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1031		       && INTVAL (XEXP (rhs, 1)) >= 0
1032		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1033		{
1034		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1035		  rhs = XEXP (rhs, 0);
1036		}
1037
1038	      if (rtx_equal_p (lhs, rhs))
1039		{
1040		  tem = simplify_gen_binary (MULT, mode, lhs,
1041					GEN_INT (coeff0 + coeff1));
1042		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1043		}
1044	    }
1045
1046	  /* If one of the operands is a PLUS or a MINUS, see if we can
1047	     simplify this by the associative law.
1048	     Don't use the associative law for floating point.
1049	     The inaccuracy makes it nonassociative,
1050	     and subtle programs can break if operations are associated.  */
1051
1052	  if (INTEGRAL_MODE_P (mode)
1053	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1054		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1055		  || (GET_CODE (op0) == CONST
1056		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1057		  || (GET_CODE (op1) == CONST
1058		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1059	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1060	    return tem;
1061	  break;
1062
1063	case COMPARE:
1064#ifdef HAVE_cc0
1065	  /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1066	     using cc0, in which case we want to leave it as a COMPARE
1067	     so we can distinguish it from a register-register-copy.
1068
1069	     In IEEE floating point, x-0 is not the same as x.  */
1070
1071	  if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1072	       || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1073	      && trueop1 == CONST0_RTX (mode))
1074	    return op0;
1075#endif
1076
1077	  /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1078	  if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1079	       || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1080	      && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1081	    {
1082	      rtx xop00 = XEXP (op0, 0);
1083	      rtx xop10 = XEXP (op1, 0);
1084
1085#ifdef HAVE_cc0
1086	      if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1087#else
1088	      if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1089		  && GET_MODE (xop00) == GET_MODE (xop10)
1090		  && REGNO (xop00) == REGNO (xop10)
1091		  && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1092		  && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1093#endif
1094		return xop00;
1095	    }
1096	  break;
1097
1098	case MINUS:
1099	  /* We can't assume x-x is 0 even with non-IEEE floating point,
1100	     but since it is zero except in very strange circumstances, we
1101	     will treat it as zero with -funsafe-math-optimizations.  */
1102	  if (rtx_equal_p (trueop0, trueop1)
1103	      && ! side_effects_p (op0)
1104	      && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1105	    return CONST0_RTX (mode);
1106
1107	  /* Change subtraction from zero into negation.  (0 - x) is the
1108	     same as -x when x is NaN, infinite, or finite and nonzero.
1109	     But if the mode has signed zeros, and does not round towards
1110	     -infinity, then 0 - 0 is 0, not -0.  */
1111	  if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1112	    return gen_rtx_NEG (mode, op1);
1113
1114	  /* (-1 - a) is ~a.  */
1115	  if (trueop0 == constm1_rtx)
1116	    return gen_rtx_NOT (mode, op1);
1117
1118	  /* Subtracting 0 has no effect unless the mode has signed zeros
1119	     and supports rounding towards -infinity.  In such a case,
1120	     0 - 0 is -0.  */
1121	  if (!(HONOR_SIGNED_ZEROS (mode)
1122		&& HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123	      && trueop1 == CONST0_RTX (mode))
1124	    return op0;
1125
1126	  /* See if this is something like X * C - X or vice versa or
1127	     if the multiplication is written as a shift.  If so, we can
1128	     distribute and make a new multiply, shift, or maybe just
1129	     have X (if C is 2 in the example above).  But don't make
1130	     real multiply if we didn't have one before.  */
1131
1132	  if (! FLOAT_MODE_P (mode))
1133	    {
1134	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1135	      rtx lhs = op0, rhs = op1;
1136	      int had_mult = 0;
1137
1138	      if (GET_CODE (lhs) == NEG)
1139		coeff0 = -1, lhs = XEXP (lhs, 0);
1140	      else if (GET_CODE (lhs) == MULT
1141		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1142		{
1143		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1144		  had_mult = 1;
1145		}
1146	      else if (GET_CODE (lhs) == ASHIFT
1147		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1148		       && INTVAL (XEXP (lhs, 1)) >= 0
1149		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1150		{
1151		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1152		  lhs = XEXP (lhs, 0);
1153		}
1154
1155	      if (GET_CODE (rhs) == NEG)
1156		coeff1 = - 1, rhs = XEXP (rhs, 0);
1157	      else if (GET_CODE (rhs) == MULT
1158		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1159		{
1160		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1161		  had_mult = 1;
1162		}
1163	      else if (GET_CODE (rhs) == ASHIFT
1164		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1165		       && INTVAL (XEXP (rhs, 1)) >= 0
1166		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1167		{
1168		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1169		  rhs = XEXP (rhs, 0);
1170		}
1171
1172	      if (rtx_equal_p (lhs, rhs))
1173		{
1174		  tem = simplify_gen_binary (MULT, mode, lhs,
1175					     GEN_INT (coeff0 - coeff1));
1176		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1177		}
1178	    }
1179
1180	  /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1181	  if (GET_CODE (op1) == NEG)
1182	    return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1183
1184	  /* If one of the operands is a PLUS or a MINUS, see if we can
1185	     simplify this by the associative law.
1186	     Don't use the associative law for floating point.
1187	     The inaccuracy makes it nonassociative,
1188	     and subtle programs can break if operations are associated.  */
1189
1190	  if (INTEGRAL_MODE_P (mode)
1191	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1192		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1193		  || (GET_CODE (op0) == CONST
1194		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1195		  || (GET_CODE (op1) == CONST
1196		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1197	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1198	    return tem;
1199
1200	  /* Don't let a relocatable value get a negative coeff.  */
1201	  if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1202	    return simplify_gen_binary (PLUS, mode,
1203					op0,
1204					neg_const_int (mode, op1));
1205
1206	  /* (x - (x & y)) -> (x & ~y) */
1207	  if (GET_CODE (op1) == AND)
1208	    {
1209	     if (rtx_equal_p (op0, XEXP (op1, 0)))
1210	       return simplify_gen_binary (AND, mode, op0,
1211					   gen_rtx_NOT (mode, XEXP (op1, 1)));
1212	     if (rtx_equal_p (op0, XEXP (op1, 1)))
1213	       return simplify_gen_binary (AND, mode, op0,
1214					   gen_rtx_NOT (mode, XEXP (op1, 0)));
1215	   }
1216	  break;
1217
1218	case MULT:
1219	  if (trueop1 == constm1_rtx)
1220	    {
1221	      tem = simplify_unary_operation (NEG, mode, op0, mode);
1222
1223	      return tem ? tem : gen_rtx_NEG (mode, op0);
1224	    }
1225
1226	  /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1227	     x is NaN, since x * 0 is then also NaN.  Nor is it valid
1228	     when the mode has signed zeros, since multiplying a negative
1229	     number by 0 will give -0, not 0.  */
1230	  if (!HONOR_NANS (mode)
1231	      && !HONOR_SIGNED_ZEROS (mode)
1232	      && trueop1 == CONST0_RTX (mode)
1233	      && ! side_effects_p (op0))
1234	    return op1;
1235
1236	  /* In IEEE floating point, x*1 is not equivalent to x for
1237	     signalling NaNs.  */
1238	  if (!HONOR_SNANS (mode)
1239	      && trueop1 == CONST1_RTX (mode))
1240	    return op0;
1241
1242	  /* Convert multiply by constant power of two into shift unless
1243	     we are still generating RTL.  This test is a kludge.  */
1244	  if (GET_CODE (trueop1) == CONST_INT
1245	      && (val = exact_log2 (INTVAL (trueop1))) >= 0
1246	      /* If the mode is larger than the host word size, and the
1247		 uppermost bit is set, then this isn't a power of two due
1248		 to implicit sign extension.  */
1249	      && (width <= HOST_BITS_PER_WIDE_INT
1250		  || val != HOST_BITS_PER_WIDE_INT - 1)
1251	      && ! rtx_equal_function_value_matters)
1252	    return gen_rtx_ASHIFT (mode, op0, GEN_INT (val));
1253
1254	  /* x*2 is x+x and x*(-1) is -x */
1255	  if (GET_CODE (trueop1) == CONST_DOUBLE
1256	      && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1257	      && GET_MODE (op0) == mode)
1258	    {
1259	      REAL_VALUE_TYPE d;
1260	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1261
1262	      if (REAL_VALUES_EQUAL (d, dconst2))
1263		return gen_rtx_PLUS (mode, op0, copy_rtx (op0));
1264
1265	      if (REAL_VALUES_EQUAL (d, dconstm1))
1266		return gen_rtx_NEG (mode, op0);
1267	    }
1268	  break;
1269
1270	case IOR:
1271	  if (trueop1 == const0_rtx)
1272	    return op0;
1273	  if (GET_CODE (trueop1) == CONST_INT
1274	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1275	          == GET_MODE_MASK (mode)))
1276	    return op1;
1277	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1278	    return op0;
1279	  /* A | (~A) -> -1 */
1280	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1281	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1282	      && ! side_effects_p (op0)
1283	      && GET_MODE_CLASS (mode) != MODE_CC)
1284	    return constm1_rtx;
1285	  break;
1286
1287	case XOR:
1288	  if (trueop1 == const0_rtx)
1289	    return op0;
1290	  if (GET_CODE (trueop1) == CONST_INT
1291	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1292		  == GET_MODE_MASK (mode)))
1293	    return gen_rtx_NOT (mode, op0);
1294	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1295	      && GET_MODE_CLASS (mode) != MODE_CC)
1296	    return const0_rtx;
1297	  break;
1298
1299	case AND:
1300	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1301	    return const0_rtx;
1302	  if (GET_CODE (trueop1) == CONST_INT
1303	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1304		  == GET_MODE_MASK (mode)))
1305	    return op0;
1306	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1307	      && GET_MODE_CLASS (mode) != MODE_CC)
1308	    return op0;
1309	  /* A & (~A) -> 0 */
1310	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1311	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1312	      && ! side_effects_p (op0)
1313	      && GET_MODE_CLASS (mode) != MODE_CC)
1314	    return const0_rtx;
1315	  break;
1316
1317	case UDIV:
1318	  /* Convert divide by power of two into shift (divide by 1 handled
1319	     below).  */
1320	  if (GET_CODE (trueop1) == CONST_INT
1321	      && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1322	    return gen_rtx_LSHIFTRT (mode, op0, GEN_INT (arg1));
1323
1324	  /* ... fall through ...  */
1325
1326	case DIV:
1327	  if (trueop1 == CONST1_RTX (mode))
1328	    {
1329	      /* On some platforms DIV uses narrower mode than its
1330		 operands.  */
1331	      rtx x = gen_lowpart_common (mode, op0);
1332	      if (x)
1333		return x;
1334	      else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1335		return gen_lowpart_SUBREG (mode, op0);
1336	      else
1337		return op0;
1338	    }
1339
1340	  /* Maybe change 0 / x to 0.  This transformation isn't safe for
1341	     modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1342	     Nor is it safe for modes with signed zeros, since dividing
1343	     0 by a negative number gives -0, not 0.  */
1344	  if (!HONOR_NANS (mode)
1345	      && !HONOR_SIGNED_ZEROS (mode)
1346	      && trueop0 == CONST0_RTX (mode)
1347	      && ! side_effects_p (op1))
1348	    return op0;
1349
1350	  /* Change division by a constant into multiplication.  Only do
1351	     this with -funsafe-math-optimizations.  */
1352	  else if (GET_CODE (trueop1) == CONST_DOUBLE
1353		   && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1354		   && trueop1 != CONST0_RTX (mode)
1355		   && flag_unsafe_math_optimizations)
1356	    {
1357	      REAL_VALUE_TYPE d;
1358	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1359
1360	      if (! REAL_VALUES_EQUAL (d, dconst0))
1361		{
1362		  REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1363		  return gen_rtx_MULT (mode, op0,
1364				       CONST_DOUBLE_FROM_REAL_VALUE (d, mode));
1365		}
1366	    }
1367	  break;
1368
1369	case UMOD:
1370	  /* Handle modulus by power of two (mod with 1 handled below).  */
1371	  if (GET_CODE (trueop1) == CONST_INT
1372	      && exact_log2 (INTVAL (trueop1)) > 0)
1373	    return gen_rtx_AND (mode, op0, GEN_INT (INTVAL (op1) - 1));
1374
1375	  /* ... fall through ...  */
1376
1377	case MOD:
1378	  if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1379	      && ! side_effects_p (op0) && ! side_effects_p (op1))
1380	    return const0_rtx;
1381	  break;
1382
1383	case ROTATERT:
1384	case ROTATE:
1385	  /* Rotating ~0 always results in ~0.  */
1386	  if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1387	      && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1388	      && ! side_effects_p (op1))
1389	    return op0;
1390
1391	  /* ... fall through ...  */
1392
1393	case ASHIFT:
1394	case ASHIFTRT:
1395	case LSHIFTRT:
1396	  if (trueop1 == const0_rtx)
1397	    return op0;
1398	  if (trueop0 == const0_rtx && ! side_effects_p (op1))
1399	    return op0;
1400	  break;
1401
1402	case SMIN:
1403	  if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1404	      && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1405	      && ! side_effects_p (op0))
1406	    return op1;
1407	  else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1408	    return op0;
1409	  break;
1410
1411	case SMAX:
1412	  if (width <= HOST_BITS_PER_WIDE_INT && GET_CODE (trueop1) == CONST_INT
1413	      && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1414		  == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1415	      && ! side_effects_p (op0))
1416	    return op1;
1417	  else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1418	    return op0;
1419	  break;
1420
1421	case UMIN:
1422	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1423	    return op1;
1424	  else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1425	    return op0;
1426	  break;
1427
1428	case UMAX:
1429	  if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1430	    return op1;
1431	  else if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1432	    return op0;
1433	  break;
1434
1435	case SS_PLUS:
1436	case US_PLUS:
1437	case SS_MINUS:
1438	case US_MINUS:
1439	  /* ??? There are simplifications that can be done.  */
1440	  return 0;
1441
1442	case VEC_SELECT:
1443	case VEC_CONCAT:
1444	  return 0;
1445
1446	default:
1447	  abort ();
1448	}
1449
1450      return 0;
1451    }
1452
1453  /* Get the integer argument values in two forms:
1454     zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
1455
1456  arg0 = INTVAL (trueop0);
1457  arg1 = INTVAL (trueop1);
1458
1459  if (width < HOST_BITS_PER_WIDE_INT)
1460    {
1461      arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
1462      arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
1463
1464      arg0s = arg0;
1465      if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
1466	arg0s |= ((HOST_WIDE_INT) (-1) << width);
1467
1468      arg1s = arg1;
1469      if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
1470	arg1s |= ((HOST_WIDE_INT) (-1) << width);
1471    }
1472  else
1473    {
1474      arg0s = arg0;
1475      arg1s = arg1;
1476    }
1477
1478  /* Compute the value of the arithmetic.  */
1479
1480  switch (code)
1481    {
1482    case PLUS:
1483      val = arg0s + arg1s;
1484      break;
1485
1486    case MINUS:
1487      val = arg0s - arg1s;
1488      break;
1489
1490    case MULT:
1491      val = arg0s * arg1s;
1492      break;
1493
1494    case DIV:
1495      if (arg1s == 0
1496	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1497	      && arg1s == -1))
1498	return 0;
1499      val = arg0s / arg1s;
1500      break;
1501
1502    case MOD:
1503      if (arg1s == 0
1504	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1505	      && arg1s == -1))
1506	return 0;
1507      val = arg0s % arg1s;
1508      break;
1509
1510    case UDIV:
1511      if (arg1 == 0
1512	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1513	      && arg1s == -1))
1514	return 0;
1515      val = (unsigned HOST_WIDE_INT) arg0 / arg1;
1516      break;
1517
1518    case UMOD:
1519      if (arg1 == 0
1520	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
1521	      && arg1s == -1))
1522	return 0;
1523      val = (unsigned HOST_WIDE_INT) arg0 % arg1;
1524      break;
1525
1526    case AND:
1527      val = arg0 & arg1;
1528      break;
1529
1530    case IOR:
1531      val = arg0 | arg1;
1532      break;
1533
1534    case XOR:
1535      val = arg0 ^ arg1;
1536      break;
1537
1538    case LSHIFTRT:
1539      /* If shift count is undefined, don't fold it; let the machine do
1540	 what it wants.  But truncate it if the machine will do that.  */
1541      if (arg1 < 0)
1542	return 0;
1543
1544#ifdef SHIFT_COUNT_TRUNCATED
1545      if (SHIFT_COUNT_TRUNCATED)
1546	arg1 %= width;
1547#endif
1548
1549      val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
1550      break;
1551
1552    case ASHIFT:
1553      if (arg1 < 0)
1554	return 0;
1555
1556#ifdef SHIFT_COUNT_TRUNCATED
1557      if (SHIFT_COUNT_TRUNCATED)
1558	arg1 %= width;
1559#endif
1560
1561      val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
1562      break;
1563
1564    case ASHIFTRT:
1565      if (arg1 < 0)
1566	return 0;
1567
1568#ifdef SHIFT_COUNT_TRUNCATED
1569      if (SHIFT_COUNT_TRUNCATED)
1570	arg1 %= width;
1571#endif
1572
1573      val = arg0s >> arg1;
1574
1575      /* Bootstrap compiler may not have sign extended the right shift.
1576	 Manually extend the sign to insure bootstrap cc matches gcc.  */
1577      if (arg0s < 0 && arg1 > 0)
1578	val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
1579
1580      break;
1581
1582    case ROTATERT:
1583      if (arg1 < 0)
1584	return 0;
1585
1586      arg1 %= width;
1587      val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
1588	     | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
1589      break;
1590
1591    case ROTATE:
1592      if (arg1 < 0)
1593	return 0;
1594
1595      arg1 %= width;
1596      val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
1597	     | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
1598      break;
1599
1600    case COMPARE:
1601      /* Do nothing here.  */
1602      return 0;
1603
1604    case SMIN:
1605      val = arg0s <= arg1s ? arg0s : arg1s;
1606      break;
1607
1608    case UMIN:
1609      val = ((unsigned HOST_WIDE_INT) arg0
1610	     <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1611      break;
1612
1613    case SMAX:
1614      val = arg0s > arg1s ? arg0s : arg1s;
1615      break;
1616
1617    case UMAX:
1618      val = ((unsigned HOST_WIDE_INT) arg0
1619	     > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
1620      break;
1621
1622    default:
1623      abort ();
1624    }
1625
1626  val = trunc_int_for_mode (val, mode);
1627
1628  return GEN_INT (val);
1629}
1630
1631/* Simplify a PLUS or MINUS, at least one of whose operands may be another
1632   PLUS or MINUS.
1633
1634   Rather than test for specific case, we do this by a brute-force method
1635   and do all possible simplifications until no more changes occur.  Then
1636   we rebuild the operation.
1637
1638   If FORCE is true, then always generate the rtx.  This is used to
1639   canonicalize stuff emitted from simplify_gen_binary.  Note that this
1640   can still fail if the rtx is too complex.  It won't fail just because
1641   the result is not 'simpler' than the input, however.  */
1642
1643struct simplify_plus_minus_op_data
1644{
1645  rtx op;
1646  int neg;
1647};
1648
1649static int
1650simplify_plus_minus_op_data_cmp (p1, p2)
1651     const void *p1;
1652     const void *p2;
1653{
1654  const struct simplify_plus_minus_op_data *d1 = p1;
1655  const struct simplify_plus_minus_op_data *d2 = p2;
1656
1657  return (commutative_operand_precedence (d2->op)
1658	  - commutative_operand_precedence (d1->op));
1659}
1660
1661static rtx
1662simplify_plus_minus (code, mode, op0, op1, force)
1663     enum rtx_code code;
1664     enum machine_mode mode;
1665     rtx op0, op1;
1666     int force;
1667{
1668  struct simplify_plus_minus_op_data ops[8];
1669  rtx result, tem;
1670  int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
1671  int first, negate, changed;
1672  int i, j;
1673
1674  memset ((char *) ops, 0, sizeof ops);
1675
1676  /* Set up the two operands and then expand them until nothing has been
1677     changed.  If we run out of room in our array, give up; this should
1678     almost never happen.  */
1679
1680  ops[0].op = op0;
1681  ops[0].neg = 0;
1682  ops[1].op = op1;
1683  ops[1].neg = (code == MINUS);
1684
1685  do
1686    {
1687      changed = 0;
1688
1689      for (i = 0; i < n_ops; i++)
1690	{
1691	  rtx this_op = ops[i].op;
1692	  int this_neg = ops[i].neg;
1693	  enum rtx_code this_code = GET_CODE (this_op);
1694
1695	  switch (this_code)
1696	    {
1697	    case PLUS:
1698	    case MINUS:
1699	      if (n_ops == 7)
1700		return NULL_RTX;
1701
1702	      ops[n_ops].op = XEXP (this_op, 1);
1703	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
1704	      n_ops++;
1705
1706	      ops[i].op = XEXP (this_op, 0);
1707	      input_ops++;
1708	      changed = 1;
1709	      break;
1710
1711	    case NEG:
1712	      ops[i].op = XEXP (this_op, 0);
1713	      ops[i].neg = ! this_neg;
1714	      changed = 1;
1715	      break;
1716
1717	    case CONST:
1718	      if (n_ops < 7
1719		  && GET_CODE (XEXP (this_op, 0)) == PLUS
1720		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
1721		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
1722		{
1723		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
1724		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
1725		  ops[n_ops].neg = this_neg;
1726		  n_ops++;
1727		  input_consts++;
1728		  changed = 1;
1729		}
1730	      break;
1731
1732	    case NOT:
1733	      /* ~a -> (-a - 1) */
1734	      if (n_ops != 7)
1735		{
1736		  ops[n_ops].op = constm1_rtx;
1737		  ops[n_ops++].neg = this_neg;
1738		  ops[i].op = XEXP (this_op, 0);
1739		  ops[i].neg = !this_neg;
1740		  changed = 1;
1741		}
1742	      break;
1743
1744	    case CONST_INT:
1745	      if (this_neg)
1746		{
1747		  ops[i].op = neg_const_int (mode, this_op);
1748		  ops[i].neg = 0;
1749		  changed = 1;
1750		}
1751	      break;
1752
1753	    default:
1754	      break;
1755	    }
1756	}
1757    }
1758  while (changed);
1759
1760  /* If we only have two operands, we can't do anything.  */
1761  if (n_ops <= 2 && !force)
1762    return NULL_RTX;
1763
1764  /* Count the number of CONSTs we didn't split above.  */
1765  for (i = 0; i < n_ops; i++)
1766    if (GET_CODE (ops[i].op) == CONST)
1767      input_consts++;
1768
1769  /* Now simplify each pair of operands until nothing changes.  The first
1770     time through just simplify constants against each other.  */
1771
1772  first = 1;
1773  do
1774    {
1775      changed = first;
1776
1777      for (i = 0; i < n_ops - 1; i++)
1778	for (j = i + 1; j < n_ops; j++)
1779	  {
1780	    rtx lhs = ops[i].op, rhs = ops[j].op;
1781	    int lneg = ops[i].neg, rneg = ops[j].neg;
1782
1783	    if (lhs != 0 && rhs != 0
1784		&& (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
1785	      {
1786		enum rtx_code ncode = PLUS;
1787
1788		if (lneg != rneg)
1789		  {
1790		    ncode = MINUS;
1791		    if (lneg)
1792		      tem = lhs, lhs = rhs, rhs = tem;
1793		  }
1794		else if (swap_commutative_operands_p (lhs, rhs))
1795		  tem = lhs, lhs = rhs, rhs = tem;
1796
1797		tem = simplify_binary_operation (ncode, mode, lhs, rhs);
1798
1799		/* Reject "simplifications" that just wrap the two
1800		   arguments in a CONST.  Failure to do so can result
1801		   in infinite recursion with simplify_binary_operation
1802		   when it calls us to simplify CONST operations.  */
1803		if (tem
1804		    && ! (GET_CODE (tem) == CONST
1805			  && GET_CODE (XEXP (tem, 0)) == ncode
1806			  && XEXP (XEXP (tem, 0), 0) == lhs
1807			  && XEXP (XEXP (tem, 0), 1) == rhs)
1808		    /* Don't allow -x + -1 -> ~x simplifications in the
1809		       first pass.  This allows us the chance to combine
1810		       the -1 with other constants.  */
1811		    && ! (first
1812			  && GET_CODE (tem) == NOT
1813			  && XEXP (tem, 0) == rhs))
1814		  {
1815		    lneg &= rneg;
1816		    if (GET_CODE (tem) == NEG)
1817		      tem = XEXP (tem, 0), lneg = !lneg;
1818		    if (GET_CODE (tem) == CONST_INT && lneg)
1819		      tem = neg_const_int (mode, tem), lneg = 0;
1820
1821		    ops[i].op = tem;
1822		    ops[i].neg = lneg;
1823		    ops[j].op = NULL_RTX;
1824		    changed = 1;
1825		  }
1826	      }
1827	  }
1828
1829      first = 0;
1830    }
1831  while (changed);
1832
1833  /* Pack all the operands to the lower-numbered entries.  */
1834  for (i = 0, j = 0; j < n_ops; j++)
1835    if (ops[j].op)
1836      ops[i++] = ops[j];
1837  n_ops = i;
1838
1839  /* Sort the operations based on swap_commutative_operands_p.  */
1840  qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
1841
1842  /* We suppressed creation of trivial CONST expressions in the
1843     combination loop to avoid recursion.  Create one manually now.
1844     The combination loop should have ensured that there is exactly
1845     one CONST_INT, and the sort will have ensured that it is last
1846     in the array and that any other constant will be next-to-last.  */
1847
1848  if (n_ops > 1
1849      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
1850      && CONSTANT_P (ops[n_ops - 2].op))
1851    {
1852      rtx value = ops[n_ops - 1].op;
1853      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
1854	value = neg_const_int (mode, value);
1855      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
1856      n_ops--;
1857    }
1858
1859  /* Count the number of CONSTs that we generated.  */
1860  n_consts = 0;
1861  for (i = 0; i < n_ops; i++)
1862    if (GET_CODE (ops[i].op) == CONST)
1863      n_consts++;
1864
1865  /* Give up if we didn't reduce the number of operands we had.  Make
1866     sure we count a CONST as two operands.  If we have the same
1867     number of operands, but have made more CONSTs than before, this
1868     is also an improvement, so accept it.  */
1869  if (!force
1870      && (n_ops + n_consts > input_ops
1871	  || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
1872    return NULL_RTX;
1873
1874  /* Put a non-negated operand first.  If there aren't any, make all
1875     operands positive and negate the whole thing later.  */
1876
1877  negate = 0;
1878  for (i = 0; i < n_ops && ops[i].neg; i++)
1879    continue;
1880  if (i == n_ops)
1881    {
1882      for (i = 0; i < n_ops; i++)
1883	ops[i].neg = 0;
1884      negate = 1;
1885    }
1886  else if (i != 0)
1887    {
1888      tem = ops[0].op;
1889      ops[0] = ops[i];
1890      ops[i].op = tem;
1891      ops[i].neg = 1;
1892    }
1893
1894  /* Now make the result by performing the requested operations.  */
1895  result = ops[0].op;
1896  for (i = 1; i < n_ops; i++)
1897    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
1898			     mode, result, ops[i].op);
1899
1900  return negate ? gen_rtx_NEG (mode, result) : result;
1901}
1902
1903/* Like simplify_binary_operation except used for relational operators.
1904   MODE is the mode of the operands, not that of the result.  If MODE
1905   is VOIDmode, both operands must also be VOIDmode and we compare the
1906   operands in "infinite precision".
1907
1908   If no simplification is possible, this function returns zero.  Otherwise,
1909   it returns either const_true_rtx or const0_rtx.  */
1910
1911rtx
1912simplify_relational_operation (code, mode, op0, op1)
1913     enum rtx_code code;
1914     enum machine_mode mode;
1915     rtx op0, op1;
1916{
1917  int equal, op0lt, op0ltu, op1lt, op1ltu;
1918  rtx tem;
1919  rtx trueop0;
1920  rtx trueop1;
1921
1922  if (mode == VOIDmode
1923      && (GET_MODE (op0) != VOIDmode
1924	  || GET_MODE (op1) != VOIDmode))
1925    abort ();
1926
1927  /* If op0 is a compare, extract the comparison arguments from it.  */
1928  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
1929    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
1930
1931  trueop0 = avoid_constant_pool_reference (op0);
1932  trueop1 = avoid_constant_pool_reference (op1);
1933
1934  /* We can't simplify MODE_CC values since we don't know what the
1935     actual comparison is.  */
1936  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC
1937#ifdef HAVE_cc0
1938      || op0 == cc0_rtx
1939#endif
1940      )
1941    return 0;
1942
1943  /* Make sure the constant is second.  */
1944  if (swap_commutative_operands_p (trueop0, trueop1))
1945    {
1946      tem = op0, op0 = op1, op1 = tem;
1947      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1948      code = swap_condition (code);
1949    }
1950
1951  /* For integer comparisons of A and B maybe we can simplify A - B and can
1952     then simplify a comparison of that with zero.  If A and B are both either
1953     a register or a CONST_INT, this can't help; testing for these cases will
1954     prevent infinite recursion here and speed things up.
1955
1956     If CODE is an unsigned comparison, then we can never do this optimization,
1957     because it gives an incorrect result if the subtraction wraps around zero.
1958     ANSI C defines unsigned operations such that they never overflow, and
1959     thus such cases can not be ignored.  */
1960
1961  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
1962      && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
1963	    && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
1964      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
1965      && code != GTU && code != GEU && code != LTU && code != LEU)
1966    return simplify_relational_operation (signed_condition (code),
1967					  mode, tem, const0_rtx);
1968
1969  if (flag_unsafe_math_optimizations && code == ORDERED)
1970    return const_true_rtx;
1971
1972  if (flag_unsafe_math_optimizations && code == UNORDERED)
1973    return const0_rtx;
1974
1975  /* For modes without NaNs, if the two operands are equal, we know the
1976     result.  Nevertheless, don't discard them if they have side-effects.  */
1977  if (!HONOR_NANS (GET_MODE (trueop0))
1978      && rtx_equal_p (trueop0, trueop1)
1979      && ! side_effects_p (trueop0))
1980    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
1981
1982  /* If the operands are floating-point constants, see if we can fold
1983     the result.  */
1984  else if (GET_CODE (trueop0) == CONST_DOUBLE
1985	   && GET_CODE (trueop1) == CONST_DOUBLE
1986	   && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
1987    {
1988      REAL_VALUE_TYPE d0, d1;
1989
1990      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
1991      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
1992
1993      /* Comparisons are unordered iff at least one of the values is NaN.  */
1994      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
1995	switch (code)
1996	  {
1997	  case UNEQ:
1998	  case UNLT:
1999	  case UNGT:
2000	  case UNLE:
2001	  case UNGE:
2002	  case NE:
2003	  case UNORDERED:
2004	    return const_true_rtx;
2005	  case EQ:
2006	  case LT:
2007	  case GT:
2008	  case LE:
2009	  case GE:
2010	  case LTGT:
2011	  case ORDERED:
2012	    return const0_rtx;
2013	  default:
2014	    return 0;
2015	  }
2016
2017      equal = REAL_VALUES_EQUAL (d0, d1);
2018      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2019      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2020    }
2021
2022  /* Otherwise, see if the operands are both integers.  */
2023  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2024	   && (GET_CODE (trueop0) == CONST_DOUBLE
2025	       || GET_CODE (trueop0) == CONST_INT)
2026	   && (GET_CODE (trueop1) == CONST_DOUBLE
2027	       || GET_CODE (trueop1) == CONST_INT))
2028    {
2029      int width = GET_MODE_BITSIZE (mode);
2030      HOST_WIDE_INT l0s, h0s, l1s, h1s;
2031      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2032
2033      /* Get the two words comprising each integer constant.  */
2034      if (GET_CODE (trueop0) == CONST_DOUBLE)
2035	{
2036	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2037	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2038	}
2039      else
2040	{
2041	  l0u = l0s = INTVAL (trueop0);
2042	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
2043	}
2044
2045      if (GET_CODE (trueop1) == CONST_DOUBLE)
2046	{
2047	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2048	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2049	}
2050      else
2051	{
2052	  l1u = l1s = INTVAL (trueop1);
2053	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
2054	}
2055
2056      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2057	 we have to sign or zero-extend the values.  */
2058      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2059	{
2060	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2061	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2062
2063	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2064	    l0s |= ((HOST_WIDE_INT) (-1) << width);
2065
2066	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2067	    l1s |= ((HOST_WIDE_INT) (-1) << width);
2068	}
2069      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2070	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2071
2072      equal = (h0u == h1u && l0u == l1u);
2073      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2074      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2075      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2076      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2077    }
2078
2079  /* Otherwise, there are some code-specific tests we can make.  */
2080  else
2081    {
2082      switch (code)
2083	{
2084	case EQ:
2085	  /* References to the frame plus a constant or labels cannot
2086	     be zero, but a SYMBOL_REF can due to #pragma weak.  */
2087	  if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2088	       || GET_CODE (trueop0) == LABEL_REF)
2089#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2090	      /* On some machines, the ap reg can be 0 sometimes.  */
2091	      && op0 != arg_pointer_rtx
2092#endif
2093		)
2094	    return const0_rtx;
2095	  break;
2096
2097	case NE:
2098	  if (((NONZERO_BASE_PLUS_P (op0) && trueop1 == const0_rtx)
2099	       || GET_CODE (trueop0) == LABEL_REF)
2100#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2101	      && op0 != arg_pointer_rtx
2102#endif
2103	      )
2104	    return const_true_rtx;
2105	  break;
2106
2107	case GEU:
2108	  /* Unsigned values are never negative.  */
2109	  if (trueop1 == const0_rtx)
2110	    return const_true_rtx;
2111	  break;
2112
2113	case LTU:
2114	  if (trueop1 == const0_rtx)
2115	    return const0_rtx;
2116	  break;
2117
2118	case LEU:
2119	  /* Unsigned values are never greater than the largest
2120	     unsigned value.  */
2121	  if (GET_CODE (trueop1) == CONST_INT
2122	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2123	    && INTEGRAL_MODE_P (mode))
2124	  return const_true_rtx;
2125	  break;
2126
2127	case GTU:
2128	  if (GET_CODE (trueop1) == CONST_INT
2129	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2130	      && INTEGRAL_MODE_P (mode))
2131	    return const0_rtx;
2132	  break;
2133
2134	case LT:
2135	  /* Optimize abs(x) < 0.0.  */
2136	  if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2137	    {
2138	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2139						       : trueop0;
2140	      if (GET_CODE (tem) == ABS)
2141		return const0_rtx;
2142	    }
2143	  break;
2144
2145	case GE:
2146	  /* Optimize abs(x) >= 0.0.  */
2147	  if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2148	    {
2149	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2150						       : trueop0;
2151	      if (GET_CODE (tem) == ABS)
2152		return const1_rtx;
2153	    }
2154	  break;
2155
2156	default:
2157	  break;
2158	}
2159
2160      return 0;
2161    }
2162
2163  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2164     as appropriate.  */
2165  switch (code)
2166    {
2167    case EQ:
2168    case UNEQ:
2169      return equal ? const_true_rtx : const0_rtx;
2170    case NE:
2171    case LTGT:
2172      return ! equal ? const_true_rtx : const0_rtx;
2173    case LT:
2174    case UNLT:
2175      return op0lt ? const_true_rtx : const0_rtx;
2176    case GT:
2177    case UNGT:
2178      return op1lt ? const_true_rtx : const0_rtx;
2179    case LTU:
2180      return op0ltu ? const_true_rtx : const0_rtx;
2181    case GTU:
2182      return op1ltu ? const_true_rtx : const0_rtx;
2183    case LE:
2184    case UNLE:
2185      return equal || op0lt ? const_true_rtx : const0_rtx;
2186    case GE:
2187    case UNGE:
2188      return equal || op1lt ? const_true_rtx : const0_rtx;
2189    case LEU:
2190      return equal || op0ltu ? const_true_rtx : const0_rtx;
2191    case GEU:
2192      return equal || op1ltu ? const_true_rtx : const0_rtx;
2193    case ORDERED:
2194      return const_true_rtx;
2195    case UNORDERED:
2196      return const0_rtx;
2197    default:
2198      abort ();
2199    }
2200}
2201
2202/* Simplify CODE, an operation with result mode MODE and three operands,
2203   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
2204   a constant.  Return 0 if no simplifications is possible.  */
2205
2206rtx
2207simplify_ternary_operation (code, mode, op0_mode, op0, op1, op2)
2208     enum rtx_code code;
2209     enum machine_mode mode, op0_mode;
2210     rtx op0, op1, op2;
2211{
2212  unsigned int width = GET_MODE_BITSIZE (mode);
2213
2214  /* VOIDmode means "infinite" precision.  */
2215  if (width == 0)
2216    width = HOST_BITS_PER_WIDE_INT;
2217
2218  switch (code)
2219    {
2220    case SIGN_EXTRACT:
2221    case ZERO_EXTRACT:
2222      if (GET_CODE (op0) == CONST_INT
2223	  && GET_CODE (op1) == CONST_INT
2224	  && GET_CODE (op2) == CONST_INT
2225	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2226	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2227	{
2228	  /* Extracting a bit-field from a constant */
2229	  HOST_WIDE_INT val = INTVAL (op0);
2230
2231	  if (BITS_BIG_ENDIAN)
2232	    val >>= (GET_MODE_BITSIZE (op0_mode)
2233		     - INTVAL (op2) - INTVAL (op1));
2234	  else
2235	    val >>= INTVAL (op2);
2236
2237	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2238	    {
2239	      /* First zero-extend.  */
2240	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2241	      /* If desired, propagate sign bit.  */
2242	      if (code == SIGN_EXTRACT
2243		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2244		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2245	    }
2246
2247	  /* Clear the bits that don't belong in our mode,
2248	     unless they and our sign bit are all one.
2249	     So we get either a reasonable negative value or a reasonable
2250	     unsigned value for this mode.  */
2251	  if (width < HOST_BITS_PER_WIDE_INT
2252	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2253		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
2254	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
2255
2256	  return GEN_INT (val);
2257	}
2258      break;
2259
2260    case IF_THEN_ELSE:
2261      if (GET_CODE (op0) == CONST_INT)
2262	return op0 != const0_rtx ? op1 : op2;
2263
2264      /* Convert a == b ? b : a to "a".  */
2265      if (GET_CODE (op0) == NE && ! side_effects_p (op0)
2266	  && !HONOR_NANS (mode)
2267	  && rtx_equal_p (XEXP (op0, 0), op1)
2268	  && rtx_equal_p (XEXP (op0, 1), op2))
2269	return op1;
2270      else if (GET_CODE (op0) == EQ && ! side_effects_p (op0)
2271	  && !HONOR_NANS (mode)
2272	  && rtx_equal_p (XEXP (op0, 1), op1)
2273	  && rtx_equal_p (XEXP (op0, 0), op2))
2274	return op2;
2275      else if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2276	{
2277	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2278					? GET_MODE (XEXP (op0, 1))
2279					: GET_MODE (XEXP (op0, 0)));
2280	  rtx temp;
2281	  if (cmp_mode == VOIDmode)
2282	    cmp_mode = op0_mode;
2283	  temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2284					        XEXP (op0, 0), XEXP (op0, 1));
2285
2286	  /* See if any simplifications were possible.  */
2287	  if (temp == const0_rtx)
2288	    return op2;
2289	  else if (temp == const1_rtx)
2290	    return op1;
2291	  else if (temp)
2292	    op0 = temp;
2293
2294	  /* Look for happy constants in op1 and op2.  */
2295	  if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2296	    {
2297	      HOST_WIDE_INT t = INTVAL (op1);
2298	      HOST_WIDE_INT f = INTVAL (op2);
2299
2300	      if (t == STORE_FLAG_VALUE && f == 0)
2301	        code = GET_CODE (op0);
2302	      else if (t == 0 && f == STORE_FLAG_VALUE)
2303		{
2304		  enum rtx_code tmp;
2305		  tmp = reversed_comparison_code (op0, NULL_RTX);
2306		  if (tmp == UNKNOWN)
2307		    break;
2308		  code = tmp;
2309		}
2310	      else
2311		break;
2312
2313	      return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2314	    }
2315	}
2316      break;
2317    case VEC_MERGE:
2318      if (GET_MODE (op0) != mode
2319	  || GET_MODE (op1) != mode
2320	  || !VECTOR_MODE_P (mode))
2321	abort ();
2322      op0 = avoid_constant_pool_reference (op0);
2323      op1 = avoid_constant_pool_reference (op1);
2324      op2 = avoid_constant_pool_reference (op2);
2325      if (GET_CODE (op0) == CONST_VECTOR
2326	  && GET_CODE (op1) == CONST_VECTOR
2327	  && GET_CODE (op2) == CONST_INT)
2328	{
2329          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2330	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2331	  rtvec v = rtvec_alloc (n_elts);
2332	  unsigned int i;
2333
2334	  for (i = 0; i < n_elts; i++)
2335	    RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2336				? CONST_VECTOR_ELT (op0, i)
2337				: CONST_VECTOR_ELT (op1, i));
2338	  return gen_rtx_CONST_VECTOR (mode, v);
2339	}
2340      break;
2341
2342    default:
2343      abort ();
2344    }
2345
2346  return 0;
2347}
2348
2349/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2350   Return 0 if no simplifications is possible.  */
2351rtx
2352simplify_subreg (outermode, op, innermode, byte)
2353     rtx op;
2354     unsigned int byte;
2355     enum machine_mode outermode, innermode;
2356{
2357  /* Little bit of sanity checking.  */
2358  if (innermode == VOIDmode || outermode == VOIDmode
2359      || innermode == BLKmode || outermode == BLKmode)
2360    abort ();
2361
2362  if (GET_MODE (op) != innermode
2363      && GET_MODE (op) != VOIDmode)
2364    abort ();
2365
2366  if (byte % GET_MODE_SIZE (outermode)
2367      || byte >= GET_MODE_SIZE (innermode))
2368    abort ();
2369
2370  if (outermode == innermode && !byte)
2371    return op;
2372
2373  /* Simplify subregs of vector constants.  */
2374  if (GET_CODE (op) == CONST_VECTOR)
2375    {
2376      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (innermode));
2377      const unsigned int offset = byte / elt_size;
2378      rtx elt;
2379
2380      if (GET_MODE_INNER (innermode) == outermode)
2381	{
2382	  elt = CONST_VECTOR_ELT (op, offset);
2383
2384	  /* ?? We probably don't need this copy_rtx because constants
2385	     can be shared.  ?? */
2386
2387	  return copy_rtx (elt);
2388	}
2389      else if (GET_MODE_INNER (innermode) == GET_MODE_INNER (outermode)
2390	       && GET_MODE_SIZE (innermode) > GET_MODE_SIZE (outermode))
2391	{
2392	  return (gen_rtx_CONST_VECTOR
2393		  (outermode,
2394		   gen_rtvec_v (GET_MODE_NUNITS (outermode),
2395				&CONST_VECTOR_ELT (op, offset))));
2396	}
2397      else if (GET_MODE_CLASS (outermode) == MODE_INT
2398	       && (GET_MODE_SIZE (outermode) % elt_size == 0))
2399	{
2400	  /* This happens when the target register size is smaller then
2401	     the vector mode, and we synthesize operations with vectors
2402	     of elements that are smaller than the register size.  */
2403	  HOST_WIDE_INT sum = 0, high = 0;
2404	  unsigned n_elts = (GET_MODE_SIZE (outermode) / elt_size);
2405	  unsigned i = BYTES_BIG_ENDIAN ? offset : offset + n_elts - 1;
2406	  unsigned step = BYTES_BIG_ENDIAN ? 1 : -1;
2407	  int shift = BITS_PER_UNIT * elt_size;
2408
2409	  for (; n_elts--; i += step)
2410	    {
2411	      elt = CONST_VECTOR_ELT (op, i);
2412	      if (GET_CODE (elt) == CONST_DOUBLE
2413		  && GET_MODE_CLASS (GET_MODE (elt)) == MODE_FLOAT)
2414		{
2415		  elt = gen_lowpart_common (int_mode_for_mode (GET_MODE (elt)),
2416					    elt);
2417		  if (! elt)
2418		    return NULL_RTX;
2419		}
2420	      if (GET_CODE (elt) != CONST_INT)
2421		return NULL_RTX;
2422	      /* Avoid overflow.  */
2423	      if (high >> (HOST_BITS_PER_WIDE_INT - shift))
2424		return NULL_RTX;
2425	      high = high << shift | sum >> (HOST_BITS_PER_WIDE_INT - shift);
2426	      sum = (sum << shift) + INTVAL (elt);
2427	    }
2428	  if (GET_MODE_BITSIZE (outermode) <= HOST_BITS_PER_WIDE_INT)
2429	    return GEN_INT (trunc_int_for_mode (sum, outermode));
2430	  else if (GET_MODE_BITSIZE (outermode) == 2* HOST_BITS_PER_WIDE_INT)
2431	    return immed_double_const (sum, high, outermode);
2432	  else
2433	    return NULL_RTX;
2434	}
2435      else if (GET_MODE_CLASS (outermode) == MODE_INT
2436	       && (elt_size % GET_MODE_SIZE (outermode) == 0))
2437	{
2438	  enum machine_mode new_mode
2439	    = int_mode_for_mode (GET_MODE_INNER (innermode));
2440	  int subbyte = byte % elt_size;
2441
2442	  op = simplify_subreg (new_mode, op, innermode, byte - subbyte);
2443	    if (! op)
2444	      return NULL_RTX;
2445	  return simplify_subreg (outermode, op, new_mode, subbyte);
2446	}
2447      else if (GET_MODE_CLASS (outermode) == MODE_INT)
2448        /* This shouldn't happen, but let's not do anything stupid.  */
2449	return NULL_RTX;
2450    }
2451
2452  /* Attempt to simplify constant to non-SUBREG expression.  */
2453  if (CONSTANT_P (op))
2454    {
2455      int offset, part;
2456      unsigned HOST_WIDE_INT val = 0;
2457
2458      if (GET_MODE_CLASS (outermode) == MODE_VECTOR_INT
2459	  || GET_MODE_CLASS (outermode) == MODE_VECTOR_FLOAT)
2460	{
2461	  /* Construct a CONST_VECTOR from individual subregs.  */
2462	  enum machine_mode submode = GET_MODE_INNER (outermode);
2463	  int subsize = GET_MODE_UNIT_SIZE (outermode);
2464	  int i, elts = GET_MODE_NUNITS (outermode);
2465	  rtvec v = rtvec_alloc (elts);
2466	  rtx elt;
2467
2468	  for (i = 0; i < elts; i++, byte += subsize)
2469	    {
2470	      /* This might fail, e.g. if taking a subreg from a SYMBOL_REF.  */
2471	      /* ??? It would be nice if we could actually make such subregs
2472		 on targets that allow such relocations.  */
2473	      if (byte >= GET_MODE_UNIT_SIZE (innermode))
2474		elt = CONST0_RTX (submode);
2475	      else
2476	        elt = simplify_subreg (submode, op, innermode, byte);
2477	      if (! elt)
2478		return NULL_RTX;
2479	      RTVEC_ELT (v, i) = elt;
2480	    }
2481	  return gen_rtx_CONST_VECTOR (outermode, v);
2482	}
2483
2484      /* ??? This code is partly redundant with code below, but can handle
2485	 the subregs of floats and similar corner cases.
2486	 Later it we should move all simplification code here and rewrite
2487	 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2488	 using SIMPLIFY_SUBREG.  */
2489      if (subreg_lowpart_offset (outermode, innermode) == byte
2490	  && GET_CODE (op) != CONST_VECTOR)
2491	{
2492	  rtx new = gen_lowpart_if_possible (outermode, op);
2493	  if (new)
2494	    return new;
2495	}
2496
2497      /* Similar comment as above apply here.  */
2498      if (GET_MODE_SIZE (outermode) == UNITS_PER_WORD
2499	  && GET_MODE_SIZE (innermode) > UNITS_PER_WORD
2500	  && GET_MODE_CLASS (outermode) == MODE_INT)
2501	{
2502	  rtx new = constant_subword (op,
2503				      (byte / UNITS_PER_WORD),
2504				      innermode);
2505	  if (new)
2506	    return new;
2507	}
2508
2509      if (GET_MODE_CLASS (outermode) != MODE_INT
2510	  && GET_MODE_CLASS (outermode) != MODE_CC)
2511	{
2512	  enum machine_mode new_mode = int_mode_for_mode (outermode);
2513
2514	  if (new_mode != innermode || byte != 0)
2515	    {
2516	      op = simplify_subreg (new_mode, op, innermode, byte);
2517	      if (! op)
2518		return NULL_RTX;
2519	      return simplify_subreg (outermode, op, new_mode, 0);
2520	    }
2521	}
2522
2523      offset = byte * BITS_PER_UNIT;
2524      switch (GET_CODE (op))
2525	{
2526	case CONST_DOUBLE:
2527	  if (GET_MODE (op) != VOIDmode)
2528	    break;
2529
2530	  /* We can't handle this case yet.  */
2531	  if (GET_MODE_BITSIZE (outermode) >= HOST_BITS_PER_WIDE_INT)
2532	    return NULL_RTX;
2533
2534	  part = offset >= HOST_BITS_PER_WIDE_INT;
2535	  if ((BITS_PER_WORD > HOST_BITS_PER_WIDE_INT
2536	       && BYTES_BIG_ENDIAN)
2537	      || (BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2538		  && WORDS_BIG_ENDIAN))
2539	    part = !part;
2540	  val = part ? CONST_DOUBLE_HIGH (op) : CONST_DOUBLE_LOW (op);
2541	  offset %= HOST_BITS_PER_WIDE_INT;
2542
2543	  /* We've already picked the word we want from a double, so
2544	     pretend this is actually an integer.  */
2545	  innermode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
2546
2547	  /* FALLTHROUGH */
2548	case CONST_INT:
2549	  if (GET_CODE (op) == CONST_INT)
2550	    val = INTVAL (op);
2551
2552	  /* We don't handle synthetizing of non-integral constants yet.  */
2553	  if (GET_MODE_CLASS (outermode) != MODE_INT)
2554	    return NULL_RTX;
2555
2556	  if (BYTES_BIG_ENDIAN || WORDS_BIG_ENDIAN)
2557	    {
2558	      if (WORDS_BIG_ENDIAN)
2559		offset = (GET_MODE_BITSIZE (innermode)
2560			  - GET_MODE_BITSIZE (outermode) - offset);
2561	      if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN
2562		  && GET_MODE_SIZE (outermode) < UNITS_PER_WORD)
2563		offset = (offset + BITS_PER_WORD - GET_MODE_BITSIZE (outermode)
2564			  - 2 * (offset % BITS_PER_WORD));
2565	    }
2566
2567	  if (offset >= HOST_BITS_PER_WIDE_INT)
2568	    return ((HOST_WIDE_INT) val < 0) ? constm1_rtx : const0_rtx;
2569	  else
2570	    {
2571	      val >>= offset;
2572	      if (GET_MODE_BITSIZE (outermode) < HOST_BITS_PER_WIDE_INT)
2573		val = trunc_int_for_mode (val, outermode);
2574	      return GEN_INT (val);
2575	    }
2576	default:
2577	  break;
2578	}
2579    }
2580
2581  /* Changing mode twice with SUBREG => just change it once,
2582     or not at all if changing back op starting mode.  */
2583  if (GET_CODE (op) == SUBREG)
2584    {
2585      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
2586      int final_offset = byte + SUBREG_BYTE (op);
2587      rtx new;
2588
2589      if (outermode == innermostmode
2590	  && byte == 0 && SUBREG_BYTE (op) == 0)
2591	return SUBREG_REG (op);
2592
2593      /* The SUBREG_BYTE represents offset, as if the value were stored
2594	 in memory.  Irritating exception is paradoxical subreg, where
2595	 we define SUBREG_BYTE to be 0.  On big endian machines, this
2596	 value should be negative.  For a moment, undo this exception.  */
2597      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
2598	{
2599	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
2600	  if (WORDS_BIG_ENDIAN)
2601	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2602	  if (BYTES_BIG_ENDIAN)
2603	    final_offset += difference % UNITS_PER_WORD;
2604	}
2605      if (SUBREG_BYTE (op) == 0
2606	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
2607	{
2608	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
2609	  if (WORDS_BIG_ENDIAN)
2610	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2611	  if (BYTES_BIG_ENDIAN)
2612	    final_offset += difference % UNITS_PER_WORD;
2613	}
2614
2615      /* See whether resulting subreg will be paradoxical.  */
2616      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
2617	{
2618	  /* In nonparadoxical subregs we can't handle negative offsets.  */
2619	  if (final_offset < 0)
2620	    return NULL_RTX;
2621	  /* Bail out in case resulting subreg would be incorrect.  */
2622	  if (final_offset % GET_MODE_SIZE (outermode)
2623	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
2624	    return NULL_RTX;
2625	}
2626      else
2627	{
2628	  int offset = 0;
2629	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
2630
2631	  /* In paradoxical subreg, see if we are still looking on lower part.
2632	     If so, our SUBREG_BYTE will be 0.  */
2633	  if (WORDS_BIG_ENDIAN)
2634	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
2635	  if (BYTES_BIG_ENDIAN)
2636	    offset += difference % UNITS_PER_WORD;
2637	  if (offset == final_offset)
2638	    final_offset = 0;
2639	  else
2640	    return NULL_RTX;
2641	}
2642
2643      /* Recurse for futher possible simplifications.  */
2644      new = simplify_subreg (outermode, SUBREG_REG (op),
2645			     GET_MODE (SUBREG_REG (op)),
2646			     final_offset);
2647      if (new)
2648	return new;
2649      return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
2650    }
2651
2652  /* SUBREG of a hard register => just change the register number
2653     and/or mode.  If the hard register is not valid in that mode,
2654     suppress this simplification.  If the hard register is the stack,
2655     frame, or argument pointer, leave this as a SUBREG.  */
2656
2657  if (REG_P (op)
2658      && (! REG_FUNCTION_VALUE_P (op)
2659	  || ! rtx_equal_function_value_matters)
2660      && REGNO (op) < FIRST_PSEUDO_REGISTER
2661#ifdef CANNOT_CHANGE_MODE_CLASS
2662      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
2663	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
2664	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
2665#endif
2666      && ((reload_completed && !frame_pointer_needed)
2667	  || (REGNO (op) != FRAME_POINTER_REGNUM
2668#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
2669	      && REGNO (op) != HARD_FRAME_POINTER_REGNUM
2670#endif
2671	     ))
2672#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
2673      && REGNO (op) != ARG_POINTER_REGNUM
2674#endif
2675      && REGNO (op) != STACK_POINTER_REGNUM)
2676    {
2677      int final_regno = subreg_hard_regno (gen_rtx_SUBREG (outermode, op, byte),
2678					   0);
2679
2680      /* ??? We do allow it if the current REG is not valid for
2681	 its mode.  This is a kludge to work around how float/complex
2682	 arguments are passed on 32-bit SPARC and should be fixed.  */
2683      if (HARD_REGNO_MODE_OK (final_regno, outermode)
2684	  || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
2685	{
2686	  rtx x = gen_rtx_REG (outermode, final_regno);
2687
2688	  /* Propagate original regno.  We don't have any way to specify
2689	     the offset inside orignal regno, so do so only for lowpart.
2690	     The information is used only by alias analysis that can not
2691	     grog partial register anyway.  */
2692
2693	  if (subreg_lowpart_offset (outermode, innermode) == byte)
2694	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
2695	  return x;
2696	}
2697    }
2698
2699  /* If we have a SUBREG of a register that we are replacing and we are
2700     replacing it with a MEM, make a new MEM and try replacing the
2701     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
2702     or if we would be widening it.  */
2703
2704  if (GET_CODE (op) == MEM
2705      && ! mode_dependent_address_p (XEXP (op, 0))
2706      /* Allow splitting of volatile memory references in case we don't
2707         have instruction to move the whole thing.  */
2708      && (! MEM_VOLATILE_P (op)
2709	  || ! have_insn_for (SET, innermode))
2710      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
2711    return adjust_address_nv (op, outermode, byte);
2712
2713  /* Handle complex values represented as CONCAT
2714     of real and imaginary part.  */
2715  if (GET_CODE (op) == CONCAT)
2716    {
2717      int is_realpart = byte < GET_MODE_UNIT_SIZE (innermode);
2718      rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
2719      unsigned int final_offset;
2720      rtx res;
2721
2722      final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
2723      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
2724      if (res)
2725	return res;
2726      /* We can at least simplify it by referring directly to the relevant part.  */
2727      return gen_rtx_SUBREG (outermode, part, final_offset);
2728    }
2729
2730  return NULL_RTX;
2731}
2732/* Make a SUBREG operation or equivalent if it folds.  */
2733
2734rtx
2735simplify_gen_subreg (outermode, op, innermode, byte)
2736     rtx op;
2737     unsigned int byte;
2738     enum machine_mode outermode, innermode;
2739{
2740  rtx new;
2741  /* Little bit of sanity checking.  */
2742  if (innermode == VOIDmode || outermode == VOIDmode
2743      || innermode == BLKmode || outermode == BLKmode)
2744    abort ();
2745
2746  if (GET_MODE (op) != innermode
2747      && GET_MODE (op) != VOIDmode)
2748    abort ();
2749
2750  if (byte % GET_MODE_SIZE (outermode)
2751      || byte >= GET_MODE_SIZE (innermode))
2752    abort ();
2753
2754  if (GET_CODE (op) == QUEUED)
2755    return NULL_RTX;
2756
2757  new = simplify_subreg (outermode, op, innermode, byte);
2758  if (new)
2759    return new;
2760
2761  if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
2762    return NULL_RTX;
2763
2764  return gen_rtx_SUBREG (outermode, op, byte);
2765}
2766/* Simplify X, an rtx expression.
2767
2768   Return the simplified expression or NULL if no simplifications
2769   were possible.
2770
2771   This is the preferred entry point into the simplification routines;
2772   however, we still allow passes to call the more specific routines.
2773
2774   Right now GCC has three (yes, three) major bodies of RTL simplficiation
2775   code that need to be unified.
2776
2777	1. fold_rtx in cse.c.  This code uses various CSE specific
2778	   information to aid in RTL simplification.
2779
2780	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
2781	   it uses combine specific information to aid in RTL
2782	   simplification.
2783
2784	3. The routines in this file.
2785
2786
2787   Long term we want to only have one body of simplification code; to
2788   get to that state I recommend the following steps:
2789
2790	1. Pour over fold_rtx & simplify_rtx and move any simplifications
2791	   which are not pass dependent state into these routines.
2792
2793	2. As code is moved by #1, change fold_rtx & simplify_rtx to
2794	   use this routine whenever possible.
2795
2796	3. Allow for pass dependent state to be provided to these
2797	   routines and add simplifications based on the pass dependent
2798	   state.  Remove code from cse.c & combine.c that becomes
2799	   redundant/dead.
2800
2801    It will take time, but ultimately the compiler will be easier to
2802    maintain and improve.  It's totally silly that when we add a
2803    simplification that it needs to be added to 4 places (3 for RTL
2804    simplification and 1 for tree simplification.  */
2805
2806rtx
2807simplify_rtx (x)
2808     rtx x;
2809{
2810  enum rtx_code code = GET_CODE (x);
2811  enum machine_mode mode = GET_MODE (x);
2812
2813  switch (GET_RTX_CLASS (code))
2814    {
2815    case '1':
2816      return simplify_unary_operation (code, mode,
2817				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
2818    case 'c':
2819      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
2820	{
2821	  rtx tem;
2822
2823	  tem = XEXP (x, 0);
2824	  XEXP (x, 0) = XEXP (x, 1);
2825	  XEXP (x, 1) = tem;
2826	  return simplify_binary_operation (code, mode,
2827					    XEXP (x, 0), XEXP (x, 1));
2828	}
2829
2830    case '2':
2831      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
2832
2833    case '3':
2834    case 'b':
2835      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
2836					 XEXP (x, 0), XEXP (x, 1),
2837					 XEXP (x, 2));
2838
2839    case '<':
2840      return simplify_relational_operation (code,
2841					    ((GET_MODE (XEXP (x, 0))
2842					      != VOIDmode)
2843					     ? GET_MODE (XEXP (x, 0))
2844					     : GET_MODE (XEXP (x, 1))),
2845					    XEXP (x, 0), XEXP (x, 1));
2846    case 'x':
2847      /* The only case we try to handle is a SUBREG.  */
2848      if (code == SUBREG)
2849	return simplify_gen_subreg (mode, SUBREG_REG (x),
2850				    GET_MODE (SUBREG_REG (x)),
2851				    SUBREG_BYTE (x));
2852      return NULL;
2853    default:
2854      return NULL;
2855    }
2856}
2857