simplify-rtx.c revision 132718
1/* RTL simplification functions for GNU compiler.
2   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA.  */
21
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "rtl.h"
28#include "tree.h"
29#include "tm_p.h"
30#include "regs.h"
31#include "hard-reg-set.h"
32#include "flags.h"
33#include "real.h"
34#include "insn-config.h"
35#include "recog.h"
36#include "function.h"
37#include "expr.h"
38#include "toplev.h"
39#include "output.h"
40#include "ggc.h"
41#include "target.h"
42
43/* Simplification and canonicalization of RTL.  */
44
45/* Much code operates on (low, high) pairs; the low value is an
46   unsigned wide int, the high value a signed wide int.  We
47   occasionally need to sign extend from low to high as if low were a
48   signed wide int.  */
49#define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52static rtx neg_const_int (enum machine_mode, rtx);
53static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55				rtx, int);
56static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57				  unsigned int);
58static bool associative_constant_p (rtx);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60					   rtx, rtx);
61
62/* Negate a CONST_INT rtx, truncating (because a conversion from a
63   maximally negative number can overflow).  */
64static rtx
65neg_const_int (enum machine_mode mode, rtx i)
66{
67  return gen_int_mode (- INTVAL (i), mode);
68}
69
70
71/* Make a binary operation by properly ordering the operands and
72   seeing if the expression folds.  */
73
74rtx
75simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
76		     rtx op1)
77{
78  rtx tem;
79
80  /* Put complex operands first and constants second if commutative.  */
81  if (GET_RTX_CLASS (code) == 'c'
82      && swap_commutative_operands_p (op0, op1))
83    tem = op0, op0 = op1, op1 = tem;
84
85  /* If this simplifies, do it.  */
86  tem = simplify_binary_operation (code, mode, op0, op1);
87  if (tem)
88    return tem;
89
90  /* Handle addition and subtraction specially.  Otherwise, just form
91     the operation.  */
92
93  if (code == PLUS || code == MINUS)
94    {
95      tem = simplify_plus_minus (code, mode, op0, op1, 1);
96      if (tem)
97	return tem;
98    }
99
100  return gen_rtx_fmt_ee (code, mode, op0, op1);
101}
102
103/* If X is a MEM referencing the constant pool, return the real value.
104   Otherwise return X.  */
105rtx
106avoid_constant_pool_reference (rtx x)
107{
108  rtx c, tmp, addr;
109  enum machine_mode cmode;
110
111  switch (GET_CODE (x))
112    {
113    case MEM:
114      break;
115
116    case FLOAT_EXTEND:
117      /* Handle float extensions of constant pool references.  */
118      tmp = XEXP (x, 0);
119      c = avoid_constant_pool_reference (tmp);
120      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
121	{
122	  REAL_VALUE_TYPE d;
123
124	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
126	}
127      return x;
128
129    default:
130      return x;
131    }
132
133  addr = XEXP (x, 0);
134
135  /* Call target hook to avoid the effects of -fpic etc....  */
136  addr = (*targetm.delegitimize_address) (addr);
137
138  if (GET_CODE (addr) == LO_SUM)
139    addr = XEXP (addr, 1);
140
141  if (GET_CODE (addr) != SYMBOL_REF
142      || ! CONSTANT_POOL_ADDRESS_P (addr))
143    return x;
144
145  c = get_pool_constant (addr);
146  cmode = get_pool_mode (addr);
147
148  /* If we're accessing the constant in a different mode than it was
149     originally stored, attempt to fix that up via subreg simplifications.
150     If that fails we have no choice but to return the original memory.  */
151  if (cmode != GET_MODE (x))
152    {
153      c = simplify_subreg (GET_MODE (x), c, cmode, 0);
154      return c ? c : x;
155    }
156
157  return c;
158}
159
160/* Make a unary operation by first seeing if it folds and otherwise making
161   the specified operation.  */
162
163rtx
164simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165		    enum machine_mode op_mode)
166{
167  rtx tem;
168
169  /* If this simplifies, use it.  */
170  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171    return tem;
172
173  return gen_rtx_fmt_e (code, mode, op);
174}
175
176/* Likewise for ternary operations.  */
177
178rtx
179simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
181{
182  rtx tem;
183
184  /* If this simplifies, use it.  */
185  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
186					      op0, op1, op2)))
187    return tem;
188
189  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190}
191
192/* Likewise, for relational operations.
193   CMP_MODE specifies mode comparison is done in.
194  */
195
196rtx
197simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198			 enum machine_mode cmp_mode, rtx op0, rtx op1)
199{
200  rtx tem;
201
202  if (cmp_mode == VOIDmode)
203    cmp_mode = GET_MODE (op0);
204  if (cmp_mode == VOIDmode)
205    cmp_mode = GET_MODE (op1);
206
207  if (cmp_mode != VOIDmode)
208    {
209      tem = simplify_relational_operation (code, cmp_mode, op0, op1);
210
211      if (tem)
212	{
213#ifdef FLOAT_STORE_FLAG_VALUE
214	  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
215	    {
216	      REAL_VALUE_TYPE val;
217	      if (tem == const0_rtx)
218		return CONST0_RTX (mode);
219	      if (tem != const_true_rtx)
220		abort ();
221	      val = FLOAT_STORE_FLAG_VALUE (mode);
222	      return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
223	    }
224#endif
225	  return tem;
226	}
227    }
228
229  /* For the following tests, ensure const0_rtx is op1.  */
230  if (swap_commutative_operands_p (op0, op1)
231      || (op0 == const0_rtx && op1 != const0_rtx))
232    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
233
234  /* If op0 is a compare, extract the comparison arguments from it.  */
235  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
236    return simplify_gen_relational (code, mode, VOIDmode,
237				    XEXP (op0, 0), XEXP (op0, 1));
238
239  /* If op0 is a comparison, extract the comparison arguments form it.  */
240  if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
241    {
242      if (code == NE)
243	{
244	  if (GET_MODE (op0) == mode)
245	    return op0;
246	  return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
247					  XEXP (op0, 0), XEXP (op0, 1));
248	}
249      else if (code == EQ)
250	{
251	  enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
252	  if (new != UNKNOWN)
253	    return simplify_gen_relational (new, mode, VOIDmode,
254					    XEXP (op0, 0), XEXP (op0, 1));
255        }
256    }
257
258  return gen_rtx_fmt_ee (code, mode, op0, op1);
259}
260
261/* Replace all occurrences of OLD in X with NEW and try to simplify the
262   resulting RTX.  Return a new RTX which is as simplified as possible.  */
263
264rtx
265simplify_replace_rtx (rtx x, rtx old, rtx new)
266{
267  enum rtx_code code = GET_CODE (x);
268  enum machine_mode mode = GET_MODE (x);
269  enum machine_mode op_mode;
270  rtx op0, op1, op2;
271
272  /* If X is OLD, return NEW.  Otherwise, if this is an expression, try
273     to build a new expression substituting recursively.  If we can't do
274     anything, return our input.  */
275
276  if (x == old)
277    return new;
278
279  switch (GET_RTX_CLASS (code))
280    {
281    case '1':
282      op0 = XEXP (x, 0);
283      op_mode = GET_MODE (op0);
284      op0 = simplify_replace_rtx (op0, old, new);
285      if (op0 == XEXP (x, 0))
286	return x;
287      return simplify_gen_unary (code, mode, op0, op_mode);
288
289    case '2':
290    case 'c':
291      op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
292      op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
293      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
294	return x;
295      return simplify_gen_binary (code, mode, op0, op1);
296
297    case '<':
298      op0 = XEXP (x, 0);
299      op1 = XEXP (x, 1);
300      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
301      op0 = simplify_replace_rtx (op0, old, new);
302      op1 = simplify_replace_rtx (op1, old, new);
303      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
304	return x;
305      return simplify_gen_relational (code, mode, op_mode, op0, op1);
306
307    case '3':
308    case 'b':
309      op0 = XEXP (x, 0);
310      op_mode = GET_MODE (op0);
311      op0 = simplify_replace_rtx (op0, old, new);
312      op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
313      op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
314      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
315	return x;
316      if (op_mode == VOIDmode)
317	op_mode = GET_MODE (op0);
318      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
319
320    case 'x':
321      /* The only case we try to handle is a SUBREG.  */
322      if (code == SUBREG)
323	{
324	  op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
325	  if (op0 == SUBREG_REG (x))
326	    return x;
327	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
328				     GET_MODE (SUBREG_REG (x)),
329				     SUBREG_BYTE (x));
330	  return op0 ? op0 : x;
331	}
332      break;
333
334    case 'o':
335      if (code == MEM)
336	{
337	  op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
338	  if (op0 == XEXP (x, 0))
339	    return x;
340	  return replace_equiv_address_nv (x, op0);
341	}
342      else if (code == LO_SUM)
343	{
344	  op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
345	  op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
346
347	  /* (lo_sum (high x) x) -> x  */
348	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
349	    return op1;
350
351	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
352	    return x;
353	  return gen_rtx_LO_SUM (mode, op0, op1);
354	}
355      else if (code == REG)
356	{
357	  if (REG_P (old) && REGNO (x) == REGNO (old))
358	    return new;
359	}
360      break;
361
362    default:
363      break;
364    }
365  return x;
366}
367
368/* Try to simplify a unary operation CODE whose output mode is to be
369   MODE with input operand OP whose mode was originally OP_MODE.
370   Return zero if no simplification can be made.  */
371rtx
372simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
373			  rtx op, enum machine_mode op_mode)
374{
375  unsigned int width = GET_MODE_BITSIZE (mode);
376  rtx trueop = avoid_constant_pool_reference (op);
377
378  if (code == VEC_DUPLICATE)
379    {
380      if (!VECTOR_MODE_P (mode))
381	abort ();
382      if (GET_MODE (trueop) != VOIDmode
383	  && !VECTOR_MODE_P (GET_MODE (trueop))
384	  && GET_MODE_INNER (mode) != GET_MODE (trueop))
385	abort ();
386      if (GET_MODE (trueop) != VOIDmode
387	  && VECTOR_MODE_P (GET_MODE (trueop))
388	  && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
389	abort ();
390      if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
391	  || GET_CODE (trueop) == CONST_VECTOR)
392	{
393          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
394          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
395	  rtvec v = rtvec_alloc (n_elts);
396	  unsigned int i;
397
398	  if (GET_CODE (trueop) != CONST_VECTOR)
399	    for (i = 0; i < n_elts; i++)
400	      RTVEC_ELT (v, i) = trueop;
401	  else
402	    {
403	      enum machine_mode inmode = GET_MODE (trueop);
404              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
405              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
406
407	      if (in_n_elts >= n_elts || n_elts % in_n_elts)
408		abort ();
409	      for (i = 0; i < n_elts; i++)
410	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
411	    }
412	  return gen_rtx_CONST_VECTOR (mode, v);
413	}
414    }
415  else if (GET_CODE (op) == CONST)
416    return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
417
418  if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
419    {
420      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
421      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
422      enum machine_mode opmode = GET_MODE (trueop);
423      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
424      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
425      rtvec v = rtvec_alloc (n_elts);
426      unsigned int i;
427
428      if (op_n_elts != n_elts)
429	abort ();
430
431      for (i = 0; i < n_elts; i++)
432	{
433	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
434					    CONST_VECTOR_ELT (trueop, i),
435					    GET_MODE_INNER (opmode));
436	  if (!x)
437	    return 0;
438	  RTVEC_ELT (v, i) = x;
439	}
440      return gen_rtx_CONST_VECTOR (mode, v);
441    }
442
443  /* The order of these tests is critical so that, for example, we don't
444     check the wrong mode (input vs. output) for a conversion operation,
445     such as FIX.  At some point, this should be simplified.  */
446
447  if (code == FLOAT && GET_MODE (trueop) == VOIDmode
448      && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
449    {
450      HOST_WIDE_INT hv, lv;
451      REAL_VALUE_TYPE d;
452
453      if (GET_CODE (trueop) == CONST_INT)
454	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
455      else
456	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
457
458      REAL_VALUE_FROM_INT (d, lv, hv, mode);
459      d = real_value_truncate (mode, d);
460      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
461    }
462  else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
463	   && (GET_CODE (trueop) == CONST_DOUBLE
464	       || GET_CODE (trueop) == CONST_INT))
465    {
466      HOST_WIDE_INT hv, lv;
467      REAL_VALUE_TYPE d;
468
469      if (GET_CODE (trueop) == CONST_INT)
470	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
471      else
472	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
473
474      if (op_mode == VOIDmode)
475	{
476	  /* We don't know how to interpret negative-looking numbers in
477	     this case, so don't try to fold those.  */
478	  if (hv < 0)
479	    return 0;
480	}
481      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
482	;
483      else
484	hv = 0, lv &= GET_MODE_MASK (op_mode);
485
486      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
487      d = real_value_truncate (mode, d);
488      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
489    }
490
491  if (GET_CODE (trueop) == CONST_INT
492      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
493    {
494      HOST_WIDE_INT arg0 = INTVAL (trueop);
495      HOST_WIDE_INT val;
496
497      switch (code)
498	{
499	case NOT:
500	  val = ~ arg0;
501	  break;
502
503	case NEG:
504	  val = - arg0;
505	  break;
506
507	case ABS:
508	  val = (arg0 >= 0 ? arg0 : - arg0);
509	  break;
510
511	case FFS:
512	  /* Don't use ffs here.  Instead, get low order bit and then its
513	     number.  If arg0 is zero, this will return 0, as desired.  */
514	  arg0 &= GET_MODE_MASK (mode);
515	  val = exact_log2 (arg0 & (- arg0)) + 1;
516	  break;
517
518	case CLZ:
519	  arg0 &= GET_MODE_MASK (mode);
520	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
521	    ;
522	  else
523	    val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
524	  break;
525
526	case CTZ:
527	  arg0 &= GET_MODE_MASK (mode);
528	  if (arg0 == 0)
529	    {
530	      /* Even if the value at zero is undefined, we have to come
531		 up with some replacement.  Seems good enough.  */
532	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
533		val = GET_MODE_BITSIZE (mode);
534	    }
535	  else
536	    val = exact_log2 (arg0 & -arg0);
537	  break;
538
539	case POPCOUNT:
540	  arg0 &= GET_MODE_MASK (mode);
541	  val = 0;
542	  while (arg0)
543	    val++, arg0 &= arg0 - 1;
544	  break;
545
546	case PARITY:
547	  arg0 &= GET_MODE_MASK (mode);
548	  val = 0;
549	  while (arg0)
550	    val++, arg0 &= arg0 - 1;
551	  val &= 1;
552	  break;
553
554	case TRUNCATE:
555	  val = arg0;
556	  break;
557
558	case ZERO_EXTEND:
559	  /* When zero-extending a CONST_INT, we need to know its
560             original mode.  */
561	  if (op_mode == VOIDmode)
562	    abort ();
563	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
564	    {
565	      /* If we were really extending the mode,
566		 we would have to distinguish between zero-extension
567		 and sign-extension.  */
568	      if (width != GET_MODE_BITSIZE (op_mode))
569		abort ();
570	      val = arg0;
571	    }
572	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
573	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
574	  else
575	    return 0;
576	  break;
577
578	case SIGN_EXTEND:
579	  if (op_mode == VOIDmode)
580	    op_mode = mode;
581	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
582	    {
583	      /* If we were really extending the mode,
584		 we would have to distinguish between zero-extension
585		 and sign-extension.  */
586	      if (width != GET_MODE_BITSIZE (op_mode))
587		abort ();
588	      val = arg0;
589	    }
590	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
591	    {
592	      val
593		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
594	      if (val
595		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
596		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
597	    }
598	  else
599	    return 0;
600	  break;
601
602	case SQRT:
603	case FLOAT_EXTEND:
604	case FLOAT_TRUNCATE:
605	case SS_TRUNCATE:
606	case US_TRUNCATE:
607	  return 0;
608
609	default:
610	  abort ();
611	}
612
613      val = trunc_int_for_mode (val, mode);
614
615      return GEN_INT (val);
616    }
617
618  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
619     for a DImode operation on a CONST_INT.  */
620  else if (GET_MODE (trueop) == VOIDmode
621	   && width <= HOST_BITS_PER_WIDE_INT * 2
622	   && (GET_CODE (trueop) == CONST_DOUBLE
623	       || GET_CODE (trueop) == CONST_INT))
624    {
625      unsigned HOST_WIDE_INT l1, lv;
626      HOST_WIDE_INT h1, hv;
627
628      if (GET_CODE (trueop) == CONST_DOUBLE)
629	l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
630      else
631	l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
632
633      switch (code)
634	{
635	case NOT:
636	  lv = ~ l1;
637	  hv = ~ h1;
638	  break;
639
640	case NEG:
641	  neg_double (l1, h1, &lv, &hv);
642	  break;
643
644	case ABS:
645	  if (h1 < 0)
646	    neg_double (l1, h1, &lv, &hv);
647	  else
648	    lv = l1, hv = h1;
649	  break;
650
651	case FFS:
652	  hv = 0;
653	  if (l1 == 0)
654	    {
655	      if (h1 == 0)
656		lv = 0;
657	      else
658		lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
659	    }
660	  else
661	    lv = exact_log2 (l1 & -l1) + 1;
662	  break;
663
664	case CLZ:
665	  hv = 0;
666	  if (h1 != 0)
667	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
668	      - HOST_BITS_PER_WIDE_INT;
669	  else if (l1 != 0)
670	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
671	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
672	    lv = GET_MODE_BITSIZE (mode);
673	  break;
674
675	case CTZ:
676	  hv = 0;
677	  if (l1 != 0)
678	    lv = exact_log2 (l1 & -l1);
679	  else if (h1 != 0)
680	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
681	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
682	    lv = GET_MODE_BITSIZE (mode);
683	  break;
684
685	case POPCOUNT:
686	  hv = 0;
687	  lv = 0;
688	  while (l1)
689	    lv++, l1 &= l1 - 1;
690	  while (h1)
691	    lv++, h1 &= h1 - 1;
692	  break;
693
694	case PARITY:
695	  hv = 0;
696	  lv = 0;
697	  while (l1)
698	    lv++, l1 &= l1 - 1;
699	  while (h1)
700	    lv++, h1 &= h1 - 1;
701	  lv &= 1;
702	  break;
703
704	case TRUNCATE:
705	  /* This is just a change-of-mode, so do nothing.  */
706	  lv = l1, hv = h1;
707	  break;
708
709	case ZERO_EXTEND:
710	  if (op_mode == VOIDmode)
711	    abort ();
712
713	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
714	    return 0;
715
716	  hv = 0;
717	  lv = l1 & GET_MODE_MASK (op_mode);
718	  break;
719
720	case SIGN_EXTEND:
721	  if (op_mode == VOIDmode
722	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
723	    return 0;
724	  else
725	    {
726	      lv = l1 & GET_MODE_MASK (op_mode);
727	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
728		  && (lv & ((HOST_WIDE_INT) 1
729			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
730		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
731
732	      hv = HWI_SIGN_EXTEND (lv);
733	    }
734	  break;
735
736	case SQRT:
737	  return 0;
738
739	default:
740	  return 0;
741	}
742
743      return immed_double_const (lv, hv, mode);
744    }
745
746  else if (GET_CODE (trueop) == CONST_DOUBLE
747	   && GET_MODE_CLASS (mode) == MODE_FLOAT)
748    {
749      REAL_VALUE_TYPE d, t;
750      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
751
752      switch (code)
753	{
754	case SQRT:
755	  if (HONOR_SNANS (mode) && real_isnan (&d))
756	    return 0;
757	  real_sqrt (&t, mode, &d);
758	  d = t;
759	  break;
760	case ABS:
761	  d = REAL_VALUE_ABS (d);
762	  break;
763	case NEG:
764	  d = REAL_VALUE_NEGATE (d);
765	  break;
766	case FLOAT_TRUNCATE:
767	  d = real_value_truncate (mode, d);
768	  break;
769	case FLOAT_EXTEND:
770	  /* All this does is change the mode.  */
771	  break;
772	case FIX:
773	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
774	  break;
775
776	default:
777	  abort ();
778	}
779      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
780    }
781
782  else if (GET_CODE (trueop) == CONST_DOUBLE
783	   && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
784	   && GET_MODE_CLASS (mode) == MODE_INT
785	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
786    {
787      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
788	 operators are intentionally left unspecified (to ease implementation
789	 by target backends), for consistency, this routine implements the
790	 same semantics for constant folding as used by the middle-end.  */
791
792      HOST_WIDE_INT xh, xl, th, tl;
793      REAL_VALUE_TYPE x, t;
794      REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
795      switch (code)
796	{
797	case FIX:
798	  if (REAL_VALUE_ISNAN (x))
799	    return const0_rtx;
800
801	  /* Test against the signed upper bound.  */
802	  if (width > HOST_BITS_PER_WIDE_INT)
803	    {
804	      th = ((unsigned HOST_WIDE_INT) 1
805		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
806	      tl = -1;
807	    }
808	  else
809	    {
810	      th = 0;
811	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
812	    }
813	  real_from_integer (&t, VOIDmode, tl, th, 0);
814	  if (REAL_VALUES_LESS (t, x))
815	    {
816	      xh = th;
817	      xl = tl;
818	      break;
819	    }
820
821	  /* Test against the signed lower bound.  */
822	  if (width > HOST_BITS_PER_WIDE_INT)
823	    {
824	      th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
825	      tl = 0;
826	    }
827	  else
828	    {
829	      th = -1;
830	      tl = (HOST_WIDE_INT) -1 << (width - 1);
831	    }
832	  real_from_integer (&t, VOIDmode, tl, th, 0);
833	  if (REAL_VALUES_LESS (x, t))
834	    {
835	      xh = th;
836	      xl = tl;
837	      break;
838	    }
839	  REAL_VALUE_TO_INT (&xl, &xh, x);
840	  break;
841
842	case UNSIGNED_FIX:
843	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
844	    return const0_rtx;
845
846	  /* Test against the unsigned upper bound.  */
847	  if (width == 2*HOST_BITS_PER_WIDE_INT)
848	    {
849	      th = -1;
850	      tl = -1;
851	    }
852	  else if (width >= HOST_BITS_PER_WIDE_INT)
853	    {
854	      th = ((unsigned HOST_WIDE_INT) 1
855		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
856	      tl = -1;
857	    }
858	  else
859	    {
860	      th = 0;
861	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
862	    }
863	  real_from_integer (&t, VOIDmode, tl, th, 1);
864	  if (REAL_VALUES_LESS (t, x))
865	    {
866	      xh = th;
867	      xl = tl;
868	      break;
869	    }
870
871	  REAL_VALUE_TO_INT (&xl, &xh, x);
872	  break;
873
874	default:
875	  abort ();
876	}
877      return immed_double_const (xl, xh, mode);
878    }
879
880  /* This was formerly used only for non-IEEE float.
881     eggert@twinsun.com says it is safe for IEEE also.  */
882  else
883    {
884      enum rtx_code reversed;
885      rtx temp;
886
887      /* There are some simplifications we can do even if the operands
888	 aren't constant.  */
889      switch (code)
890	{
891	case NOT:
892	  /* (not (not X)) == X.  */
893	  if (GET_CODE (op) == NOT)
894	    return XEXP (op, 0);
895
896	  /* (not (eq X Y)) == (ne X Y), etc.  */
897	  if (GET_RTX_CLASS (GET_CODE (op)) == '<'
898	      && (mode == BImode || STORE_FLAG_VALUE == -1)
899	      && ((reversed = reversed_comparison_code (op, NULL_RTX))
900		  != UNKNOWN))
901	    return simplify_gen_relational (reversed, mode, VOIDmode,
902					    XEXP (op, 0), XEXP (op, 1));
903
904          /* (not (plus X -1)) can become (neg X).  */
905          if (GET_CODE (op) == PLUS
906	      && XEXP (op, 1) == constm1_rtx)
907	    return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
908
909	  /* Similarly, (not (neg X)) is (plus X -1).  */
910	  if (GET_CODE (op) == NEG)
911	    return plus_constant (XEXP (op, 0), -1);
912
913	  /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
914	  if (GET_CODE (op) == XOR
915	      && GET_CODE (XEXP (op, 1)) == CONST_INT
916	      && (temp = simplify_unary_operation (NOT, mode,
917						   XEXP (op, 1),
918						   mode)) != 0)
919	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
920
921
922	  /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
923	     operands other than 1, but that is not valid.  We could do a
924	     similar simplification for (not (lshiftrt C X)) where C is
925	     just the sign bit, but this doesn't seem common enough to
926	     bother with.  */
927	  if (GET_CODE (op) == ASHIFT
928	      && XEXP (op, 0) == const1_rtx)
929	    {
930	      temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
931	      return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
932	    }
933
934	  /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
935	     by reversing the comparison code if valid.  */
936	  if (STORE_FLAG_VALUE == -1
937	      && GET_RTX_CLASS (GET_CODE (op)) == '<'
938	      && (reversed = reversed_comparison_code (op, NULL_RTX))
939		 != UNKNOWN)
940	    return simplify_gen_relational (reversed, mode, VOIDmode,
941					    XEXP (op, 0), XEXP (op, 1));
942
943	  /* (not (ashiftrt foo C)) where C is the number of bits in FOO
944	     minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
945	     so we can perform the above simplification.  */
946
947	  if (STORE_FLAG_VALUE == -1
948	      && GET_CODE (op) == ASHIFTRT
949	      && GET_CODE (XEXP (op, 1)) == CONST_INT
950	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
951	    return simplify_gen_relational (GE, mode, VOIDmode,
952					    XEXP (op, 0), const0_rtx);
953
954	  break;
955
956	case NEG:
957	  /* (neg (neg X)) == X.  */
958	  if (GET_CODE (op) == NEG)
959	    return XEXP (op, 0);
960
961	  /* (neg (plus X 1)) can become (not X).  */
962	  if (GET_CODE (op) == PLUS
963	      && XEXP (op, 1) == const1_rtx)
964	    return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
965
966	  /* Similarly, (neg (not X)) is (plus X 1).  */
967	  if (GET_CODE (op) == NOT)
968	    return plus_constant (XEXP (op, 0), 1);
969
970	  /* (neg (minus X Y)) can become (minus Y X).  This transformation
971	     isn't safe for modes with signed zeros, since if X and Y are
972	     both +0, (minus Y X) is the same as (minus X Y).  If the
973	     rounding mode is towards +infinity (or -infinity) then the two
974	     expressions will be rounded differently.  */
975	  if (GET_CODE (op) == MINUS
976	      && !HONOR_SIGNED_ZEROS (mode)
977	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
978	    return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
979					XEXP (op, 0));
980
981	  if (GET_CODE (op) == PLUS
982	      && !HONOR_SIGNED_ZEROS (mode)
983	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
984	    {
985	      /* (neg (plus A C)) is simplified to (minus -C A).  */
986	      if (GET_CODE (XEXP (op, 1)) == CONST_INT
987		  || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
988		{
989		  temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
990						   mode);
991		  if (temp)
992		    return simplify_gen_binary (MINUS, mode, temp,
993						XEXP (op, 0));
994		}
995
996	      /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
997	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
998	      return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
999	    }
1000
1001	  /* (neg (mult A B)) becomes (mult (neg A) B).
1002	     This works even for floating-point values.  */
1003	  if (GET_CODE (op) == MULT
1004	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1005	    {
1006	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1007	      return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1008	    }
1009
1010	  /* NEG commutes with ASHIFT since it is multiplication.  Only do
1011	     this if we can then eliminate the NEG (e.g., if the operand
1012	     is a constant).  */
1013	  if (GET_CODE (op) == ASHIFT)
1014	    {
1015	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1016					       mode);
1017	      if (temp)
1018		return simplify_gen_binary (ASHIFT, mode, temp,
1019					    XEXP (op, 1));
1020	    }
1021
1022	  break;
1023
1024	case SIGN_EXTEND:
1025	  /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1026	     becomes just the MINUS if its mode is MODE.  This allows
1027	     folding switch statements on machines using casesi (such as
1028	     the VAX).  */
1029	  if (GET_CODE (op) == TRUNCATE
1030	      && GET_MODE (XEXP (op, 0)) == mode
1031	      && GET_CODE (XEXP (op, 0)) == MINUS
1032	      && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1033	      && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1034	    return XEXP (op, 0);
1035
1036	  /* Check for a sign extension of a subreg of a promoted
1037	     variable, where the promotion is sign-extended, and the
1038	     target mode is the same as the variable's promotion.  */
1039	  if (GET_CODE (op) == SUBREG
1040	      && SUBREG_PROMOTED_VAR_P (op)
1041	      && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1042	      && GET_MODE (XEXP (op, 0)) == mode)
1043	    return XEXP (op, 0);
1044
1045#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1046	  if (! POINTERS_EXTEND_UNSIGNED
1047	      && mode == Pmode && GET_MODE (op) == ptr_mode
1048	      && (CONSTANT_P (op)
1049		  || (GET_CODE (op) == SUBREG
1050		      && GET_CODE (SUBREG_REG (op)) == REG
1051		      && REG_POINTER (SUBREG_REG (op))
1052		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
1053	    return convert_memory_address (Pmode, op);
1054#endif
1055	  break;
1056
1057	case ZERO_EXTEND:
1058	  /* Check for a zero extension of a subreg of a promoted
1059	     variable, where the promotion is zero-extended, and the
1060	     target mode is the same as the variable's promotion.  */
1061	  if (GET_CODE (op) == SUBREG
1062	      && SUBREG_PROMOTED_VAR_P (op)
1063	      && SUBREG_PROMOTED_UNSIGNED_P (op)
1064	      && GET_MODE (XEXP (op, 0)) == mode)
1065	    return XEXP (op, 0);
1066
1067#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1068	  if (POINTERS_EXTEND_UNSIGNED > 0
1069	      && mode == Pmode && GET_MODE (op) == ptr_mode
1070	      && (CONSTANT_P (op)
1071		  || (GET_CODE (op) == SUBREG
1072		      && GET_CODE (SUBREG_REG (op)) == REG
1073		      && REG_POINTER (SUBREG_REG (op))
1074		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
1075	    return convert_memory_address (Pmode, op);
1076#endif
1077	  break;
1078
1079	default:
1080	  break;
1081	}
1082
1083      return 0;
1084    }
1085}
1086
1087/* Subroutine of simplify_associative_operation.  Return true if rtx OP
1088   is a suitable integer or floating point immediate constant.  */
1089static bool
1090associative_constant_p (rtx op)
1091{
1092  if (GET_CODE (op) == CONST_INT
1093      || GET_CODE (op) == CONST_DOUBLE)
1094    return true;
1095  op = avoid_constant_pool_reference (op);
1096  return GET_CODE (op) == CONST_INT
1097	 || GET_CODE (op) == CONST_DOUBLE;
1098}
1099
1100/* Subroutine of simplify_binary_operation to simplify an associative
1101   binary operation CODE with result mode MODE, operating on OP0 and OP1.
1102   Return 0 if no simplification is possible.  */
1103static rtx
1104simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1105				rtx op0, rtx op1)
1106{
1107  rtx tem;
1108
1109  /* Simplify (x op c1) op c2 as x op (c1 op c2).  */
1110  if (GET_CODE (op0) == code
1111      && associative_constant_p (op1)
1112      && associative_constant_p (XEXP (op0, 1)))
1113    {
1114      tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1115      if (! tem)
1116	return tem;
1117      return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1118    }
1119
1120  /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2).  */
1121  if (GET_CODE (op0) == code
1122      && GET_CODE (op1) == code
1123      && associative_constant_p (XEXP (op0, 1))
1124      && associative_constant_p (XEXP (op1, 1)))
1125    {
1126      rtx c = simplify_binary_operation (code, mode,
1127					 XEXP (op0, 1), XEXP (op1, 1));
1128      if (! c)
1129	return 0;
1130      tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1131      return simplify_gen_binary (code, mode, tem, c);
1132    }
1133
1134  /* Canonicalize (x op c) op y as (x op y) op c.  */
1135  if (GET_CODE (op0) == code
1136      && associative_constant_p (XEXP (op0, 1)))
1137    {
1138      tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1139      return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1140    }
1141
1142  /* Canonicalize x op (y op c) as (x op y) op c.  */
1143  if (GET_CODE (op1) == code
1144      && associative_constant_p (XEXP (op1, 1)))
1145    {
1146      tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1147      return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1148    }
1149
1150  return 0;
1151}
1152
1153/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154   and OP1.  Return 0 if no simplification is possible.
1155
1156   Don't use this for relational operations such as EQ or LT.
1157   Use simplify_relational_operation instead.  */
1158rtx
1159simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1160			   rtx op0, rtx op1)
1161{
1162  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1163  HOST_WIDE_INT val;
1164  unsigned int width = GET_MODE_BITSIZE (mode);
1165  rtx tem;
1166  rtx trueop0 = avoid_constant_pool_reference (op0);
1167  rtx trueop1 = avoid_constant_pool_reference (op1);
1168
1169  /* Relational operations don't work here.  We must know the mode
1170     of the operands in order to do the comparison correctly.
1171     Assuming a full word can give incorrect results.
1172     Consider comparing 128 with -128 in QImode.  */
1173
1174  if (GET_RTX_CLASS (code) == '<')
1175    abort ();
1176
1177  /* Make sure the constant is second.  */
1178  if (GET_RTX_CLASS (code) == 'c'
1179      && swap_commutative_operands_p (trueop0, trueop1))
1180    {
1181      tem = op0, op0 = op1, op1 = tem;
1182      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1183    }
1184
1185  if (VECTOR_MODE_P (mode)
1186      && GET_CODE (trueop0) == CONST_VECTOR
1187      && GET_CODE (trueop1) == CONST_VECTOR)
1188    {
1189      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1190      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1191      enum machine_mode op0mode = GET_MODE (trueop0);
1192      int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1193      unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1194      enum machine_mode op1mode = GET_MODE (trueop1);
1195      int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1196      unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1197      rtvec v = rtvec_alloc (n_elts);
1198      unsigned int i;
1199
1200      if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1201	abort ();
1202
1203      for (i = 0; i < n_elts; i++)
1204	{
1205	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1206					     CONST_VECTOR_ELT (trueop0, i),
1207					     CONST_VECTOR_ELT (trueop1, i));
1208	  if (!x)
1209	    return 0;
1210	  RTVEC_ELT (v, i) = x;
1211	}
1212
1213      return gen_rtx_CONST_VECTOR (mode, v);
1214    }
1215
1216  if (GET_MODE_CLASS (mode) == MODE_FLOAT
1217      && GET_CODE (trueop0) == CONST_DOUBLE
1218      && GET_CODE (trueop1) == CONST_DOUBLE
1219      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1220    {
1221      REAL_VALUE_TYPE f0, f1, value;
1222
1223      REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1224      REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1225      f0 = real_value_truncate (mode, f0);
1226      f1 = real_value_truncate (mode, f1);
1227
1228      if (HONOR_SNANS (mode)
1229	  && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1230	return 0;
1231
1232      if (code == DIV
1233	  && REAL_VALUES_EQUAL (f1, dconst0)
1234	  && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1235	return 0;
1236
1237      if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1238	  && flag_trapping_math
1239	  && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1240	{
1241	  int s0 = REAL_VALUE_NEGATIVE (f0);
1242	  int s1 = REAL_VALUE_NEGATIVE (f1);
1243
1244	  switch (code)
1245	    {
1246	    case PLUS:
1247	      /* Inf + -Inf = NaN plus exception.  */
1248	      if (s0 != s1)
1249	        return 0;
1250	      break;
1251	    case MINUS:
1252	      /* Inf - Inf = NaN plus exception.  */
1253	      if (s0 == s1)
1254	        return 0;
1255	      break;
1256	    case DIV:
1257	      /* Inf / Inf = NaN plus exception.  */
1258	      return 0;
1259	    default:
1260	      break;
1261	    }
1262	}
1263
1264      if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1265	  && flag_trapping_math
1266	  && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1267	      || (REAL_VALUE_ISINF (f1) && REAL_VALUES_EQUAL (f0, dconst0))))
1268	/* Inf * 0 = NaN plus exception.  */
1269	return 0;
1270
1271      REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1272
1273      value = real_value_truncate (mode, value);
1274      return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1275    }
1276
1277  /* We can fold some multi-word operations.  */
1278  if (GET_MODE_CLASS (mode) == MODE_INT
1279      && width == HOST_BITS_PER_WIDE_INT * 2
1280      && (GET_CODE (trueop0) == CONST_DOUBLE
1281	  || GET_CODE (trueop0) == CONST_INT)
1282      && (GET_CODE (trueop1) == CONST_DOUBLE
1283	  || GET_CODE (trueop1) == CONST_INT))
1284    {
1285      unsigned HOST_WIDE_INT l1, l2, lv;
1286      HOST_WIDE_INT h1, h2, hv;
1287
1288      if (GET_CODE (trueop0) == CONST_DOUBLE)
1289	l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1290      else
1291	l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1292
1293      if (GET_CODE (trueop1) == CONST_DOUBLE)
1294	l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1295      else
1296	l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1297
1298      switch (code)
1299	{
1300	case MINUS:
1301	  /* A - B == A + (-B).  */
1302	  neg_double (l2, h2, &lv, &hv);
1303	  l2 = lv, h2 = hv;
1304
1305	  /* Fall through....  */
1306
1307	case PLUS:
1308	  add_double (l1, h1, l2, h2, &lv, &hv);
1309	  break;
1310
1311	case MULT:
1312	  mul_double (l1, h1, l2, h2, &lv, &hv);
1313	  break;
1314
1315	case DIV:  case MOD:   case UDIV:  case UMOD:
1316	  /* We'd need to include tree.h to do this and it doesn't seem worth
1317	     it.  */
1318	  return 0;
1319
1320	case AND:
1321	  lv = l1 & l2, hv = h1 & h2;
1322	  break;
1323
1324	case IOR:
1325	  lv = l1 | l2, hv = h1 | h2;
1326	  break;
1327
1328	case XOR:
1329	  lv = l1 ^ l2, hv = h1 ^ h2;
1330	  break;
1331
1332	case SMIN:
1333	  if (h1 < h2
1334	      || (h1 == h2
1335		  && ((unsigned HOST_WIDE_INT) l1
1336		      < (unsigned HOST_WIDE_INT) l2)))
1337	    lv = l1, hv = h1;
1338	  else
1339	    lv = l2, hv = h2;
1340	  break;
1341
1342	case SMAX:
1343	  if (h1 > h2
1344	      || (h1 == h2
1345		  && ((unsigned HOST_WIDE_INT) l1
1346		      > (unsigned HOST_WIDE_INT) l2)))
1347	    lv = l1, hv = h1;
1348	  else
1349	    lv = l2, hv = h2;
1350	  break;
1351
1352	case UMIN:
1353	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1354	      || (h1 == h2
1355		  && ((unsigned HOST_WIDE_INT) l1
1356		      < (unsigned HOST_WIDE_INT) l2)))
1357	    lv = l1, hv = h1;
1358	  else
1359	    lv = l2, hv = h2;
1360	  break;
1361
1362	case UMAX:
1363	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1364	      || (h1 == h2
1365		  && ((unsigned HOST_WIDE_INT) l1
1366		      > (unsigned HOST_WIDE_INT) l2)))
1367	    lv = l1, hv = h1;
1368	  else
1369	    lv = l2, hv = h2;
1370	  break;
1371
1372	case LSHIFTRT:   case ASHIFTRT:
1373	case ASHIFT:
1374	case ROTATE:     case ROTATERT:
1375#ifdef SHIFT_COUNT_TRUNCATED
1376	  if (SHIFT_COUNT_TRUNCATED)
1377	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1378#endif
1379
1380	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1381	    return 0;
1382
1383	  if (code == LSHIFTRT || code == ASHIFTRT)
1384	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1385			   code == ASHIFTRT);
1386	  else if (code == ASHIFT)
1387	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1388	  else if (code == ROTATE)
1389	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1390	  else /* code == ROTATERT */
1391	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1392	  break;
1393
1394	default:
1395	  return 0;
1396	}
1397
1398      return immed_double_const (lv, hv, mode);
1399    }
1400
1401  if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1402      || width > HOST_BITS_PER_WIDE_INT || width == 0)
1403    {
1404      /* Even if we can't compute a constant result,
1405	 there are some cases worth simplifying.  */
1406
1407      switch (code)
1408	{
1409	case PLUS:
1410	  /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1411	     when x is NaN, infinite, or finite and nonzero.  They aren't
1412	     when x is -0 and the rounding mode is not towards -infinity,
1413	     since (-0) + 0 is then 0.  */
1414	  if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1415	    return op0;
1416
1417	  /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1418	     transformations are safe even for IEEE.  */
1419	  if (GET_CODE (op0) == NEG)
1420	    return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1421	  else if (GET_CODE (op1) == NEG)
1422	    return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1423
1424	  /* (~a) + 1 -> -a */
1425	  if (INTEGRAL_MODE_P (mode)
1426	      && GET_CODE (op0) == NOT
1427	      && trueop1 == const1_rtx)
1428	    return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1429
1430	  /* Handle both-operands-constant cases.  We can only add
1431	     CONST_INTs to constants since the sum of relocatable symbols
1432	     can't be handled by most assemblers.  Don't add CONST_INT
1433	     to CONST_INT since overflow won't be computed properly if wider
1434	     than HOST_BITS_PER_WIDE_INT.  */
1435
1436	  if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1437	      && GET_CODE (op1) == CONST_INT)
1438	    return plus_constant (op0, INTVAL (op1));
1439	  else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1440		   && GET_CODE (op0) == CONST_INT)
1441	    return plus_constant (op1, INTVAL (op0));
1442
1443	  /* See if this is something like X * C - X or vice versa or
1444	     if the multiplication is written as a shift.  If so, we can
1445	     distribute and make a new multiply, shift, or maybe just
1446	     have X (if C is 2 in the example above).  But don't make
1447	     real multiply if we didn't have one before.  */
1448
1449	  if (! FLOAT_MODE_P (mode))
1450	    {
1451	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1452	      rtx lhs = op0, rhs = op1;
1453	      int had_mult = 0;
1454
1455	      if (GET_CODE (lhs) == NEG)
1456		coeff0 = -1, lhs = XEXP (lhs, 0);
1457	      else if (GET_CODE (lhs) == MULT
1458		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1459		{
1460		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1461		  had_mult = 1;
1462		}
1463	      else if (GET_CODE (lhs) == ASHIFT
1464		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1465		       && INTVAL (XEXP (lhs, 1)) >= 0
1466		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1467		{
1468		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1469		  lhs = XEXP (lhs, 0);
1470		}
1471
1472	      if (GET_CODE (rhs) == NEG)
1473		coeff1 = -1, rhs = XEXP (rhs, 0);
1474	      else if (GET_CODE (rhs) == MULT
1475		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1476		{
1477		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1478		  had_mult = 1;
1479		}
1480	      else if (GET_CODE (rhs) == ASHIFT
1481		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1482		       && INTVAL (XEXP (rhs, 1)) >= 0
1483		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1484		{
1485		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1486		  rhs = XEXP (rhs, 0);
1487		}
1488
1489	      if (rtx_equal_p (lhs, rhs))
1490		{
1491		  tem = simplify_gen_binary (MULT, mode, lhs,
1492					GEN_INT (coeff0 + coeff1));
1493		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1494		}
1495	    }
1496
1497	  /* If one of the operands is a PLUS or a MINUS, see if we can
1498	     simplify this by the associative law.
1499	     Don't use the associative law for floating point.
1500	     The inaccuracy makes it nonassociative,
1501	     and subtle programs can break if operations are associated.  */
1502
1503	  if (INTEGRAL_MODE_P (mode)
1504	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1505		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1506		  || (GET_CODE (op0) == CONST
1507		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1508		  || (GET_CODE (op1) == CONST
1509		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1510	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1511	    return tem;
1512
1513	  /* Reassociate floating point addition only when the user
1514	     specifies unsafe math optimizations.  */
1515	  if (FLOAT_MODE_P (mode)
1516	      && flag_unsafe_math_optimizations)
1517	    {
1518	      tem = simplify_associative_operation (code, mode, op0, op1);
1519	      if (tem)
1520		return tem;
1521	    }
1522	  break;
1523
1524	case COMPARE:
1525#ifdef HAVE_cc0
1526	  /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1527	     using cc0, in which case we want to leave it as a COMPARE
1528	     so we can distinguish it from a register-register-copy.
1529
1530	     In IEEE floating point, x-0 is not the same as x.  */
1531
1532	  if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1533	       || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1534	      && trueop1 == CONST0_RTX (mode))
1535	    return op0;
1536#endif
1537
1538	  /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1539	  if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1540	       || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1541	      && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1542	    {
1543	      rtx xop00 = XEXP (op0, 0);
1544	      rtx xop10 = XEXP (op1, 0);
1545
1546#ifdef HAVE_cc0
1547	      if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1548#else
1549	      if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1550		  && GET_MODE (xop00) == GET_MODE (xop10)
1551		  && REGNO (xop00) == REGNO (xop10)
1552		  && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1553		  && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1554#endif
1555		return xop00;
1556	    }
1557	  break;
1558
1559	case MINUS:
1560	  /* We can't assume x-x is 0 even with non-IEEE floating point,
1561	     but since it is zero except in very strange circumstances, we
1562	     will treat it as zero with -funsafe-math-optimizations.  */
1563	  if (rtx_equal_p (trueop0, trueop1)
1564	      && ! side_effects_p (op0)
1565	      && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1566	    return CONST0_RTX (mode);
1567
1568	  /* Change subtraction from zero into negation.  (0 - x) is the
1569	     same as -x when x is NaN, infinite, or finite and nonzero.
1570	     But if the mode has signed zeros, and does not round towards
1571	     -infinity, then 0 - 0 is 0, not -0.  */
1572	  if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1573	    return simplify_gen_unary (NEG, mode, op1, mode);
1574
1575	  /* (-1 - a) is ~a.  */
1576	  if (trueop0 == constm1_rtx)
1577	    return simplify_gen_unary (NOT, mode, op1, mode);
1578
1579	  /* Subtracting 0 has no effect unless the mode has signed zeros
1580	     and supports rounding towards -infinity.  In such a case,
1581	     0 - 0 is -0.  */
1582	  if (!(HONOR_SIGNED_ZEROS (mode)
1583		&& HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1584	      && trueop1 == CONST0_RTX (mode))
1585	    return op0;
1586
1587	  /* See if this is something like X * C - X or vice versa or
1588	     if the multiplication is written as a shift.  If so, we can
1589	     distribute and make a new multiply, shift, or maybe just
1590	     have X (if C is 2 in the example above).  But don't make
1591	     real multiply if we didn't have one before.  */
1592
1593	  if (! FLOAT_MODE_P (mode))
1594	    {
1595	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1596	      rtx lhs = op0, rhs = op1;
1597	      int had_mult = 0;
1598
1599	      if (GET_CODE (lhs) == NEG)
1600		coeff0 = -1, lhs = XEXP (lhs, 0);
1601	      else if (GET_CODE (lhs) == MULT
1602		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1603		{
1604		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1605		  had_mult = 1;
1606		}
1607	      else if (GET_CODE (lhs) == ASHIFT
1608		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1609		       && INTVAL (XEXP (lhs, 1)) >= 0
1610		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1611		{
1612		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1613		  lhs = XEXP (lhs, 0);
1614		}
1615
1616	      if (GET_CODE (rhs) == NEG)
1617		coeff1 = - 1, rhs = XEXP (rhs, 0);
1618	      else if (GET_CODE (rhs) == MULT
1619		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1620		{
1621		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1622		  had_mult = 1;
1623		}
1624	      else if (GET_CODE (rhs) == ASHIFT
1625		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1626		       && INTVAL (XEXP (rhs, 1)) >= 0
1627		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1628		{
1629		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1630		  rhs = XEXP (rhs, 0);
1631		}
1632
1633	      if (rtx_equal_p (lhs, rhs))
1634		{
1635		  tem = simplify_gen_binary (MULT, mode, lhs,
1636					     GEN_INT (coeff0 - coeff1));
1637		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1638		}
1639	    }
1640
1641	  /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1642	  if (GET_CODE (op1) == NEG)
1643	    return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1644
1645	  /* (-x - c) may be simplified as (-c - x).  */
1646	  if (GET_CODE (op0) == NEG
1647	      && (GET_CODE (op1) == CONST_INT
1648		  || GET_CODE (op1) == CONST_DOUBLE))
1649	    {
1650	      tem = simplify_unary_operation (NEG, mode, op1, mode);
1651	      if (tem)
1652		return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1653	    }
1654
1655	  /* If one of the operands is a PLUS or a MINUS, see if we can
1656	     simplify this by the associative law.
1657	     Don't use the associative law for floating point.
1658	     The inaccuracy makes it nonassociative,
1659	     and subtle programs can break if operations are associated.  */
1660
1661	  if (INTEGRAL_MODE_P (mode)
1662	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1663		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1664		  || (GET_CODE (op0) == CONST
1665		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1666		  || (GET_CODE (op1) == CONST
1667		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1668	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1669	    return tem;
1670
1671	  /* Don't let a relocatable value get a negative coeff.  */
1672	  if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1673	    return simplify_gen_binary (PLUS, mode,
1674					op0,
1675					neg_const_int (mode, op1));
1676
1677	  /* (x - (x & y)) -> (x & ~y) */
1678	  if (GET_CODE (op1) == AND)
1679	    {
1680	      if (rtx_equal_p (op0, XEXP (op1, 0)))
1681		{
1682		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1683					    GET_MODE (XEXP (op1, 1)));
1684		  return simplify_gen_binary (AND, mode, op0, tem);
1685		}
1686	      if (rtx_equal_p (op0, XEXP (op1, 1)))
1687		{
1688		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1689					    GET_MODE (XEXP (op1, 0)));
1690		  return simplify_gen_binary (AND, mode, op0, tem);
1691		}
1692	    }
1693	  break;
1694
1695	case MULT:
1696	  if (trueop1 == constm1_rtx)
1697	    return simplify_gen_unary (NEG, mode, op0, mode);
1698
1699	  /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1700	     x is NaN, since x * 0 is then also NaN.  Nor is it valid
1701	     when the mode has signed zeros, since multiplying a negative
1702	     number by 0 will give -0, not 0.  */
1703	  if (!HONOR_NANS (mode)
1704	      && !HONOR_SIGNED_ZEROS (mode)
1705	      && trueop1 == CONST0_RTX (mode)
1706	      && ! side_effects_p (op0))
1707	    return op1;
1708
1709	  /* In IEEE floating point, x*1 is not equivalent to x for
1710	     signalling NaNs.  */
1711	  if (!HONOR_SNANS (mode)
1712	      && trueop1 == CONST1_RTX (mode))
1713	    return op0;
1714
1715	  /* Convert multiply by constant power of two into shift unless
1716	     we are still generating RTL.  This test is a kludge.  */
1717	  if (GET_CODE (trueop1) == CONST_INT
1718	      && (val = exact_log2 (INTVAL (trueop1))) >= 0
1719	      /* If the mode is larger than the host word size, and the
1720		 uppermost bit is set, then this isn't a power of two due
1721		 to implicit sign extension.  */
1722	      && (width <= HOST_BITS_PER_WIDE_INT
1723		  || val != HOST_BITS_PER_WIDE_INT - 1)
1724	      && ! rtx_equal_function_value_matters)
1725	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1726
1727	  /* x*2 is x+x and x*(-1) is -x */
1728	  if (GET_CODE (trueop1) == CONST_DOUBLE
1729	      && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1730	      && GET_MODE (op0) == mode)
1731	    {
1732	      REAL_VALUE_TYPE d;
1733	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1734
1735	      if (REAL_VALUES_EQUAL (d, dconst2))
1736		return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1737
1738	      if (REAL_VALUES_EQUAL (d, dconstm1))
1739		return simplify_gen_unary (NEG, mode, op0, mode);
1740	    }
1741
1742	  /* Reassociate multiplication, but for floating point MULTs
1743	     only when the user specifies unsafe math optimizations.  */
1744	  if (! FLOAT_MODE_P (mode)
1745	      || flag_unsafe_math_optimizations)
1746	    {
1747	      tem = simplify_associative_operation (code, mode, op0, op1);
1748	      if (tem)
1749		return tem;
1750	    }
1751	  break;
1752
1753	case IOR:
1754	  if (trueop1 == const0_rtx)
1755	    return op0;
1756	  if (GET_CODE (trueop1) == CONST_INT
1757	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1758	          == GET_MODE_MASK (mode)))
1759	    return op1;
1760	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1761	    return op0;
1762	  /* A | (~A) -> -1 */
1763	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1764	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1765	      && ! side_effects_p (op0)
1766	      && GET_MODE_CLASS (mode) != MODE_CC)
1767	    return constm1_rtx;
1768	  tem = simplify_associative_operation (code, mode, op0, op1);
1769	  if (tem)
1770	    return tem;
1771	  break;
1772
1773	case XOR:
1774	  if (trueop1 == const0_rtx)
1775	    return op0;
1776	  if (GET_CODE (trueop1) == CONST_INT
1777	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1778		  == GET_MODE_MASK (mode)))
1779	    return simplify_gen_unary (NOT, mode, op0, mode);
1780	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1781	      && GET_MODE_CLASS (mode) != MODE_CC)
1782	    return const0_rtx;
1783	  tem = simplify_associative_operation (code, mode, op0, op1);
1784	  if (tem)
1785	    return tem;
1786	  break;
1787
1788	case AND:
1789	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1790	    return const0_rtx;
1791	  if (GET_CODE (trueop1) == CONST_INT
1792	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1793		  == GET_MODE_MASK (mode)))
1794	    return op0;
1795	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1796	      && GET_MODE_CLASS (mode) != MODE_CC)
1797	    return op0;
1798	  /* A & (~A) -> 0 */
1799	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1800	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1801	      && ! side_effects_p (op0)
1802	      && GET_MODE_CLASS (mode) != MODE_CC)
1803	    return const0_rtx;
1804	  tem = simplify_associative_operation (code, mode, op0, op1);
1805	  if (tem)
1806	    return tem;
1807	  break;
1808
1809	case UDIV:
1810	  /* Convert divide by power of two into shift (divide by 1 handled
1811	     below).  */
1812	  if (GET_CODE (trueop1) == CONST_INT
1813	      && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1814	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1815
1816	  /* Fall through....  */
1817
1818	case DIV:
1819	  if (trueop1 == CONST1_RTX (mode))
1820	    {
1821	      /* On some platforms DIV uses narrower mode than its
1822		 operands.  */
1823	      rtx x = gen_lowpart_common (mode, op0);
1824	      if (x)
1825		return x;
1826	      else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1827		return gen_lowpart_SUBREG (mode, op0);
1828	      else
1829		return op0;
1830	    }
1831
1832	  /* Maybe change 0 / x to 0.  This transformation isn't safe for
1833	     modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1834	     Nor is it safe for modes with signed zeros, since dividing
1835	     0 by a negative number gives -0, not 0.  */
1836	  if (!HONOR_NANS (mode)
1837	      && !HONOR_SIGNED_ZEROS (mode)
1838	      && trueop0 == CONST0_RTX (mode)
1839	      && ! side_effects_p (op1))
1840	    return op0;
1841
1842	  /* Change division by a constant into multiplication.  Only do
1843	     this with -funsafe-math-optimizations.  */
1844	  else if (GET_CODE (trueop1) == CONST_DOUBLE
1845		   && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1846		   && trueop1 != CONST0_RTX (mode)
1847		   && flag_unsafe_math_optimizations)
1848	    {
1849	      REAL_VALUE_TYPE d;
1850	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1851
1852	      if (! REAL_VALUES_EQUAL (d, dconst0))
1853		{
1854		  REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1855		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1856		  return simplify_gen_binary (MULT, mode, op0, tem);
1857		}
1858	    }
1859	  break;
1860
1861	case UMOD:
1862	  /* Handle modulus by power of two (mod with 1 handled below).  */
1863	  if (GET_CODE (trueop1) == CONST_INT
1864	      && exact_log2 (INTVAL (trueop1)) > 0)
1865	    return simplify_gen_binary (AND, mode, op0,
1866					GEN_INT (INTVAL (op1) - 1));
1867
1868	  /* Fall through....  */
1869
1870	case MOD:
1871	  if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1872	      && ! side_effects_p (op0) && ! side_effects_p (op1))
1873	    return const0_rtx;
1874	  break;
1875
1876	case ROTATERT:
1877	case ROTATE:
1878	case ASHIFTRT:
1879	  /* Rotating ~0 always results in ~0.  */
1880	  if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1881	      && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1882	      && ! side_effects_p (op1))
1883	    return op0;
1884
1885	  /* Fall through....  */
1886
1887	case ASHIFT:
1888	case LSHIFTRT:
1889	  if (trueop1 == const0_rtx)
1890	    return op0;
1891	  if (trueop0 == const0_rtx && ! side_effects_p (op1))
1892	    return op0;
1893	  break;
1894
1895	case SMIN:
1896	  if (width <= HOST_BITS_PER_WIDE_INT
1897	      && GET_CODE (trueop1) == CONST_INT
1898	      && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1899	      && ! side_effects_p (op0))
1900	    return op1;
1901	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1902	    return op0;
1903	  tem = simplify_associative_operation (code, mode, op0, op1);
1904	  if (tem)
1905	    return tem;
1906	  break;
1907
1908	case SMAX:
1909	  if (width <= HOST_BITS_PER_WIDE_INT
1910	      && GET_CODE (trueop1) == CONST_INT
1911	      && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1912		  == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1913	      && ! side_effects_p (op0))
1914	    return op1;
1915	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1916	    return op0;
1917	  tem = simplify_associative_operation (code, mode, op0, op1);
1918	  if (tem)
1919	    return tem;
1920	  break;
1921
1922	case UMIN:
1923	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1924	    return op1;
1925	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1926	    return op0;
1927	  tem = simplify_associative_operation (code, mode, op0, op1);
1928	  if (tem)
1929	    return tem;
1930	  break;
1931
1932	case UMAX:
1933	  if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1934	    return op1;
1935	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1936	    return op0;
1937	  tem = simplify_associative_operation (code, mode, op0, op1);
1938	  if (tem)
1939	    return tem;
1940	  break;
1941
1942	case SS_PLUS:
1943	case US_PLUS:
1944	case SS_MINUS:
1945	case US_MINUS:
1946	  /* ??? There are simplifications that can be done.  */
1947	  return 0;
1948
1949	case VEC_SELECT:
1950	  if (!VECTOR_MODE_P (mode))
1951	    {
1952	      if (!VECTOR_MODE_P (GET_MODE (trueop0))
1953		  || (mode
1954		      != GET_MODE_INNER (GET_MODE (trueop0)))
1955		  || GET_CODE (trueop1) != PARALLEL
1956		  || XVECLEN (trueop1, 0) != 1
1957		  || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
1958		abort ();
1959
1960	      if (GET_CODE (trueop0) == CONST_VECTOR)
1961		return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
1962	    }
1963	  else
1964	    {
1965	      if (!VECTOR_MODE_P (GET_MODE (trueop0))
1966		  || (GET_MODE_INNER (mode)
1967		      != GET_MODE_INNER (GET_MODE (trueop0)))
1968		  || GET_CODE (trueop1) != PARALLEL)
1969		abort ();
1970
1971	      if (GET_CODE (trueop0) == CONST_VECTOR)
1972		{
1973		  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1974		  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1975		  rtvec v = rtvec_alloc (n_elts);
1976		  unsigned int i;
1977
1978		  if (XVECLEN (trueop1, 0) != (int) n_elts)
1979		    abort ();
1980		  for (i = 0; i < n_elts; i++)
1981		    {
1982		      rtx x = XVECEXP (trueop1, 0, i);
1983
1984		      if (GET_CODE (x) != CONST_INT)
1985			abort ();
1986		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
1987		    }
1988
1989		  return gen_rtx_CONST_VECTOR (mode, v);
1990		}
1991	    }
1992	  return 0;
1993	case VEC_CONCAT:
1994	  {
1995	    enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
1996					  ? GET_MODE (trueop0)
1997					  : GET_MODE_INNER (mode));
1998	    enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
1999					  ? GET_MODE (trueop1)
2000					  : GET_MODE_INNER (mode));
2001
2002	    if (!VECTOR_MODE_P (mode)
2003		|| (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2004		    != GET_MODE_SIZE (mode)))
2005	      abort ();
2006
2007	    if ((VECTOR_MODE_P (op0_mode)
2008		 && (GET_MODE_INNER (mode)
2009		     != GET_MODE_INNER (op0_mode)))
2010		|| (!VECTOR_MODE_P (op0_mode)
2011		    && GET_MODE_INNER (mode) != op0_mode))
2012	      abort ();
2013
2014	    if ((VECTOR_MODE_P (op1_mode)
2015		 && (GET_MODE_INNER (mode)
2016		     != GET_MODE_INNER (op1_mode)))
2017		|| (!VECTOR_MODE_P (op1_mode)
2018		    && GET_MODE_INNER (mode) != op1_mode))
2019	      abort ();
2020
2021	    if ((GET_CODE (trueop0) == CONST_VECTOR
2022		 || GET_CODE (trueop0) == CONST_INT
2023		 || GET_CODE (trueop0) == CONST_DOUBLE)
2024		&& (GET_CODE (trueop1) == CONST_VECTOR
2025		    || GET_CODE (trueop1) == CONST_INT
2026		    || GET_CODE (trueop1) == CONST_DOUBLE))
2027	      {
2028		int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2029		unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2030		rtvec v = rtvec_alloc (n_elts);
2031		unsigned int i;
2032		unsigned in_n_elts = 1;
2033
2034		if (VECTOR_MODE_P (op0_mode))
2035		  in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2036		for (i = 0; i < n_elts; i++)
2037		  {
2038		    if (i < in_n_elts)
2039		      {
2040			if (!VECTOR_MODE_P (op0_mode))
2041			  RTVEC_ELT (v, i) = trueop0;
2042			else
2043			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2044		      }
2045		    else
2046		      {
2047			if (!VECTOR_MODE_P (op1_mode))
2048			  RTVEC_ELT (v, i) = trueop1;
2049			else
2050			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2051							       i - in_n_elts);
2052		      }
2053		  }
2054
2055		return gen_rtx_CONST_VECTOR (mode, v);
2056	      }
2057	  }
2058	  return 0;
2059
2060	default:
2061	  abort ();
2062	}
2063
2064      return 0;
2065    }
2066
2067  /* Get the integer argument values in two forms:
2068     zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
2069
2070  arg0 = INTVAL (trueop0);
2071  arg1 = INTVAL (trueop1);
2072
2073  if (width < HOST_BITS_PER_WIDE_INT)
2074    {
2075      arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2076      arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2077
2078      arg0s = arg0;
2079      if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2080	arg0s |= ((HOST_WIDE_INT) (-1) << width);
2081
2082      arg1s = arg1;
2083      if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2084	arg1s |= ((HOST_WIDE_INT) (-1) << width);
2085    }
2086  else
2087    {
2088      arg0s = arg0;
2089      arg1s = arg1;
2090    }
2091
2092  /* Compute the value of the arithmetic.  */
2093
2094  switch (code)
2095    {
2096    case PLUS:
2097      val = arg0s + arg1s;
2098      break;
2099
2100    case MINUS:
2101      val = arg0s - arg1s;
2102      break;
2103
2104    case MULT:
2105      val = arg0s * arg1s;
2106      break;
2107
2108    case DIV:
2109      if (arg1s == 0
2110	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2111	      && arg1s == -1))
2112	return 0;
2113      val = arg0s / arg1s;
2114      break;
2115
2116    case MOD:
2117      if (arg1s == 0
2118	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2119	      && arg1s == -1))
2120	return 0;
2121      val = arg0s % arg1s;
2122      break;
2123
2124    case UDIV:
2125      if (arg1 == 0
2126	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2127	      && arg1s == -1))
2128	return 0;
2129      val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2130      break;
2131
2132    case UMOD:
2133      if (arg1 == 0
2134	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2135	      && arg1s == -1))
2136	return 0;
2137      val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2138      break;
2139
2140    case AND:
2141      val = arg0 & arg1;
2142      break;
2143
2144    case IOR:
2145      val = arg0 | arg1;
2146      break;
2147
2148    case XOR:
2149      val = arg0 ^ arg1;
2150      break;
2151
2152    case LSHIFTRT:
2153      /* If shift count is undefined, don't fold it; let the machine do
2154	 what it wants.  But truncate it if the machine will do that.  */
2155      if (arg1 < 0)
2156	return 0;
2157
2158#ifdef SHIFT_COUNT_TRUNCATED
2159      if (SHIFT_COUNT_TRUNCATED)
2160	arg1 %= width;
2161#endif
2162
2163      val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2164      break;
2165
2166    case ASHIFT:
2167      if (arg1 < 0)
2168	return 0;
2169
2170#ifdef SHIFT_COUNT_TRUNCATED
2171      if (SHIFT_COUNT_TRUNCATED)
2172	arg1 %= width;
2173#endif
2174
2175      val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2176      break;
2177
2178    case ASHIFTRT:
2179      if (arg1 < 0)
2180	return 0;
2181
2182#ifdef SHIFT_COUNT_TRUNCATED
2183      if (SHIFT_COUNT_TRUNCATED)
2184	arg1 %= width;
2185#endif
2186
2187      val = arg0s >> arg1;
2188
2189      /* Bootstrap compiler may not have sign extended the right shift.
2190	 Manually extend the sign to insure bootstrap cc matches gcc.  */
2191      if (arg0s < 0 && arg1 > 0)
2192	val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2193
2194      break;
2195
2196    case ROTATERT:
2197      if (arg1 < 0)
2198	return 0;
2199
2200      arg1 %= width;
2201      val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2202	     | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2203      break;
2204
2205    case ROTATE:
2206      if (arg1 < 0)
2207	return 0;
2208
2209      arg1 %= width;
2210      val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2211	     | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2212      break;
2213
2214    case COMPARE:
2215      /* Do nothing here.  */
2216      return 0;
2217
2218    case SMIN:
2219      val = arg0s <= arg1s ? arg0s : arg1s;
2220      break;
2221
2222    case UMIN:
2223      val = ((unsigned HOST_WIDE_INT) arg0
2224	     <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2225      break;
2226
2227    case SMAX:
2228      val = arg0s > arg1s ? arg0s : arg1s;
2229      break;
2230
2231    case UMAX:
2232      val = ((unsigned HOST_WIDE_INT) arg0
2233	     > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2234      break;
2235
2236    case SS_PLUS:
2237    case US_PLUS:
2238    case SS_MINUS:
2239    case US_MINUS:
2240      /* ??? There are simplifications that can be done.  */
2241      return 0;
2242
2243    default:
2244      abort ();
2245    }
2246
2247  val = trunc_int_for_mode (val, mode);
2248
2249  return GEN_INT (val);
2250}
2251
2252/* Simplify a PLUS or MINUS, at least one of whose operands may be another
2253   PLUS or MINUS.
2254
2255   Rather than test for specific case, we do this by a brute-force method
2256   and do all possible simplifications until no more changes occur.  Then
2257   we rebuild the operation.
2258
2259   If FORCE is true, then always generate the rtx.  This is used to
2260   canonicalize stuff emitted from simplify_gen_binary.  Note that this
2261   can still fail if the rtx is too complex.  It won't fail just because
2262   the result is not 'simpler' than the input, however.  */
2263
2264struct simplify_plus_minus_op_data
2265{
2266  rtx op;
2267  int neg;
2268};
2269
2270static int
2271simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2272{
2273  const struct simplify_plus_minus_op_data *d1 = p1;
2274  const struct simplify_plus_minus_op_data *d2 = p2;
2275
2276  return (commutative_operand_precedence (d2->op)
2277	  - commutative_operand_precedence (d1->op));
2278}
2279
2280static rtx
2281simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2282		     rtx op1, int force)
2283{
2284  struct simplify_plus_minus_op_data ops[8];
2285  rtx result, tem;
2286  int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2287  int first, changed;
2288  int i, j;
2289
2290  memset (ops, 0, sizeof ops);
2291
2292  /* Set up the two operands and then expand them until nothing has been
2293     changed.  If we run out of room in our array, give up; this should
2294     almost never happen.  */
2295
2296  ops[0].op = op0;
2297  ops[0].neg = 0;
2298  ops[1].op = op1;
2299  ops[1].neg = (code == MINUS);
2300
2301  do
2302    {
2303      changed = 0;
2304
2305      for (i = 0; i < n_ops; i++)
2306	{
2307	  rtx this_op = ops[i].op;
2308	  int this_neg = ops[i].neg;
2309	  enum rtx_code this_code = GET_CODE (this_op);
2310
2311	  switch (this_code)
2312	    {
2313	    case PLUS:
2314	    case MINUS:
2315	      if (n_ops == 7)
2316		return NULL_RTX;
2317
2318	      ops[n_ops].op = XEXP (this_op, 1);
2319	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2320	      n_ops++;
2321
2322	      ops[i].op = XEXP (this_op, 0);
2323	      input_ops++;
2324	      changed = 1;
2325	      break;
2326
2327	    case NEG:
2328	      ops[i].op = XEXP (this_op, 0);
2329	      ops[i].neg = ! this_neg;
2330	      changed = 1;
2331	      break;
2332
2333	    case CONST:
2334	      if (n_ops < 7
2335		  && GET_CODE (XEXP (this_op, 0)) == PLUS
2336		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2337		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2338		{
2339		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
2340		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2341		  ops[n_ops].neg = this_neg;
2342		  n_ops++;
2343		  input_consts++;
2344		  changed = 1;
2345		}
2346	      break;
2347
2348	    case NOT:
2349	      /* ~a -> (-a - 1) */
2350	      if (n_ops != 7)
2351		{
2352		  ops[n_ops].op = constm1_rtx;
2353		  ops[n_ops++].neg = this_neg;
2354		  ops[i].op = XEXP (this_op, 0);
2355		  ops[i].neg = !this_neg;
2356		  changed = 1;
2357		}
2358	      break;
2359
2360	    case CONST_INT:
2361	      if (this_neg)
2362		{
2363		  ops[i].op = neg_const_int (mode, this_op);
2364		  ops[i].neg = 0;
2365		  changed = 1;
2366		}
2367	      break;
2368
2369	    default:
2370	      break;
2371	    }
2372	}
2373    }
2374  while (changed);
2375
2376  /* If we only have two operands, we can't do anything.  */
2377  if (n_ops <= 2 && !force)
2378    return NULL_RTX;
2379
2380  /* Count the number of CONSTs we didn't split above.  */
2381  for (i = 0; i < n_ops; i++)
2382    if (GET_CODE (ops[i].op) == CONST)
2383      input_consts++;
2384
2385  /* Now simplify each pair of operands until nothing changes.  The first
2386     time through just simplify constants against each other.  */
2387
2388  first = 1;
2389  do
2390    {
2391      changed = first;
2392
2393      for (i = 0; i < n_ops - 1; i++)
2394	for (j = i + 1; j < n_ops; j++)
2395	  {
2396	    rtx lhs = ops[i].op, rhs = ops[j].op;
2397	    int lneg = ops[i].neg, rneg = ops[j].neg;
2398
2399	    if (lhs != 0 && rhs != 0
2400		&& (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2401	      {
2402		enum rtx_code ncode = PLUS;
2403
2404		if (lneg != rneg)
2405		  {
2406		    ncode = MINUS;
2407		    if (lneg)
2408		      tem = lhs, lhs = rhs, rhs = tem;
2409		  }
2410		else if (swap_commutative_operands_p (lhs, rhs))
2411		  tem = lhs, lhs = rhs, rhs = tem;
2412
2413		tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2414
2415		/* Reject "simplifications" that just wrap the two
2416		   arguments in a CONST.  Failure to do so can result
2417		   in infinite recursion with simplify_binary_operation
2418		   when it calls us to simplify CONST operations.  */
2419		if (tem
2420		    && ! (GET_CODE (tem) == CONST
2421			  && GET_CODE (XEXP (tem, 0)) == ncode
2422			  && XEXP (XEXP (tem, 0), 0) == lhs
2423			  && XEXP (XEXP (tem, 0), 1) == rhs)
2424		    /* Don't allow -x + -1 -> ~x simplifications in the
2425		       first pass.  This allows us the chance to combine
2426		       the -1 with other constants.  */
2427		    && ! (first
2428			  && GET_CODE (tem) == NOT
2429			  && XEXP (tem, 0) == rhs))
2430		  {
2431		    lneg &= rneg;
2432		    if (GET_CODE (tem) == NEG)
2433		      tem = XEXP (tem, 0), lneg = !lneg;
2434		    if (GET_CODE (tem) == CONST_INT && lneg)
2435		      tem = neg_const_int (mode, tem), lneg = 0;
2436
2437		    ops[i].op = tem;
2438		    ops[i].neg = lneg;
2439		    ops[j].op = NULL_RTX;
2440		    changed = 1;
2441		  }
2442	      }
2443	  }
2444
2445      first = 0;
2446    }
2447  while (changed);
2448
2449  /* Pack all the operands to the lower-numbered entries.  */
2450  for (i = 0, j = 0; j < n_ops; j++)
2451    if (ops[j].op)
2452      ops[i++] = ops[j];
2453  n_ops = i;
2454
2455  /* Sort the operations based on swap_commutative_operands_p.  */
2456  qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2457
2458  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
2459  if (n_ops == 2
2460      && GET_CODE (ops[1].op) == CONST_INT
2461      && CONSTANT_P (ops[0].op)
2462      && ops[0].neg)
2463    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2464
2465  /* We suppressed creation of trivial CONST expressions in the
2466     combination loop to avoid recursion.  Create one manually now.
2467     The combination loop should have ensured that there is exactly
2468     one CONST_INT, and the sort will have ensured that it is last
2469     in the array and that any other constant will be next-to-last.  */
2470
2471  if (n_ops > 1
2472      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2473      && CONSTANT_P (ops[n_ops - 2].op))
2474    {
2475      rtx value = ops[n_ops - 1].op;
2476      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2477	value = neg_const_int (mode, value);
2478      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2479      n_ops--;
2480    }
2481
2482  /* Count the number of CONSTs that we generated.  */
2483  n_consts = 0;
2484  for (i = 0; i < n_ops; i++)
2485    if (GET_CODE (ops[i].op) == CONST)
2486      n_consts++;
2487
2488  /* Give up if we didn't reduce the number of operands we had.  Make
2489     sure we count a CONST as two operands.  If we have the same
2490     number of operands, but have made more CONSTs than before, this
2491     is also an improvement, so accept it.  */
2492  if (!force
2493      && (n_ops + n_consts > input_ops
2494	  || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2495    return NULL_RTX;
2496
2497  /* Put a non-negated operand first, if possible.  */
2498
2499  for (i = 0; i < n_ops && ops[i].neg; i++)
2500    continue;
2501  if (i == n_ops)
2502    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2503  else if (i != 0)
2504    {
2505      tem = ops[0].op;
2506      ops[0] = ops[i];
2507      ops[i].op = tem;
2508      ops[i].neg = 1;
2509    }
2510
2511  /* Now make the result by performing the requested operations.  */
2512  result = ops[0].op;
2513  for (i = 1; i < n_ops; i++)
2514    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2515			     mode, result, ops[i].op);
2516
2517  return result;
2518}
2519
2520/* Like simplify_binary_operation except used for relational operators.
2521   MODE is the mode of the operands, not that of the result.  If MODE
2522   is VOIDmode, both operands must also be VOIDmode and we compare the
2523   operands in "infinite precision".
2524
2525   If no simplification is possible, this function returns zero.  Otherwise,
2526   it returns either const_true_rtx or const0_rtx.  */
2527
2528rtx
2529simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2530			       rtx op0, rtx op1)
2531{
2532  int equal, op0lt, op0ltu, op1lt, op1ltu;
2533  rtx tem;
2534  rtx trueop0;
2535  rtx trueop1;
2536
2537  if (mode == VOIDmode
2538      && (GET_MODE (op0) != VOIDmode
2539	  || GET_MODE (op1) != VOIDmode))
2540    abort ();
2541
2542  /* If op0 is a compare, extract the comparison arguments from it.  */
2543  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2544    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2545
2546  trueop0 = avoid_constant_pool_reference (op0);
2547  trueop1 = avoid_constant_pool_reference (op1);
2548
2549  /* We can't simplify MODE_CC values since we don't know what the
2550     actual comparison is.  */
2551  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2552    return 0;
2553
2554  /* Make sure the constant is second.  */
2555  if (swap_commutative_operands_p (trueop0, trueop1))
2556    {
2557      tem = op0, op0 = op1, op1 = tem;
2558      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2559      code = swap_condition (code);
2560    }
2561
2562  /* For integer comparisons of A and B maybe we can simplify A - B and can
2563     then simplify a comparison of that with zero.  If A and B are both either
2564     a register or a CONST_INT, this can't help; testing for these cases will
2565     prevent infinite recursion here and speed things up.
2566
2567     If CODE is an unsigned comparison, then we can never do this optimization,
2568     because it gives an incorrect result if the subtraction wraps around zero.
2569     ANSI C defines unsigned operations such that they never overflow, and
2570     thus such cases can not be ignored.  */
2571
2572  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2573      && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2574	    && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2575      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2576      /* We cannot do this for == or != if tem is a nonzero address.  */
2577      && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2578      && code != GTU && code != GEU && code != LTU && code != LEU)
2579    return simplify_relational_operation (signed_condition (code),
2580					  mode, tem, const0_rtx);
2581
2582  if (flag_unsafe_math_optimizations && code == ORDERED)
2583    return const_true_rtx;
2584
2585  if (flag_unsafe_math_optimizations && code == UNORDERED)
2586    return const0_rtx;
2587
2588  /* For modes without NaNs, if the two operands are equal, we know the
2589     result except if they have side-effects.  */
2590  if (! HONOR_NANS (GET_MODE (trueop0))
2591      && rtx_equal_p (trueop0, trueop1)
2592      && ! side_effects_p (trueop0))
2593    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2594
2595  /* If the operands are floating-point constants, see if we can fold
2596     the result.  */
2597  else if (GET_CODE (trueop0) == CONST_DOUBLE
2598	   && GET_CODE (trueop1) == CONST_DOUBLE
2599	   && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2600    {
2601      REAL_VALUE_TYPE d0, d1;
2602
2603      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2604      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2605
2606      /* Comparisons are unordered iff at least one of the values is NaN.  */
2607      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2608	switch (code)
2609	  {
2610	  case UNEQ:
2611	  case UNLT:
2612	  case UNGT:
2613	  case UNLE:
2614	  case UNGE:
2615	  case NE:
2616	  case UNORDERED:
2617	    return const_true_rtx;
2618	  case EQ:
2619	  case LT:
2620	  case GT:
2621	  case LE:
2622	  case GE:
2623	  case LTGT:
2624	  case ORDERED:
2625	    return const0_rtx;
2626	  default:
2627	    return 0;
2628	  }
2629
2630      equal = REAL_VALUES_EQUAL (d0, d1);
2631      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2632      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2633    }
2634
2635  /* Otherwise, see if the operands are both integers.  */
2636  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2637	   && (GET_CODE (trueop0) == CONST_DOUBLE
2638	       || GET_CODE (trueop0) == CONST_INT)
2639	   && (GET_CODE (trueop1) == CONST_DOUBLE
2640	       || GET_CODE (trueop1) == CONST_INT))
2641    {
2642      int width = GET_MODE_BITSIZE (mode);
2643      HOST_WIDE_INT l0s, h0s, l1s, h1s;
2644      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2645
2646      /* Get the two words comprising each integer constant.  */
2647      if (GET_CODE (trueop0) == CONST_DOUBLE)
2648	{
2649	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2650	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2651	}
2652      else
2653	{
2654	  l0u = l0s = INTVAL (trueop0);
2655	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
2656	}
2657
2658      if (GET_CODE (trueop1) == CONST_DOUBLE)
2659	{
2660	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2661	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2662	}
2663      else
2664	{
2665	  l1u = l1s = INTVAL (trueop1);
2666	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
2667	}
2668
2669      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2670	 we have to sign or zero-extend the values.  */
2671      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2672	{
2673	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2674	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2675
2676	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2677	    l0s |= ((HOST_WIDE_INT) (-1) << width);
2678
2679	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2680	    l1s |= ((HOST_WIDE_INT) (-1) << width);
2681	}
2682      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2683	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2684
2685      equal = (h0u == h1u && l0u == l1u);
2686      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2687      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2688      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2689      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2690    }
2691
2692  /* Otherwise, there are some code-specific tests we can make.  */
2693  else
2694    {
2695      switch (code)
2696	{
2697	case EQ:
2698	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
2699	    return const0_rtx;
2700	  break;
2701
2702	case NE:
2703	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
2704	    return const_true_rtx;
2705	  break;
2706
2707	case GEU:
2708	  /* Unsigned values are never negative.  */
2709	  if (trueop1 == const0_rtx)
2710	    return const_true_rtx;
2711	  break;
2712
2713	case LTU:
2714	  if (trueop1 == const0_rtx)
2715	    return const0_rtx;
2716	  break;
2717
2718	case LEU:
2719	  /* Unsigned values are never greater than the largest
2720	     unsigned value.  */
2721	  if (GET_CODE (trueop1) == CONST_INT
2722	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2723	    && INTEGRAL_MODE_P (mode))
2724	  return const_true_rtx;
2725	  break;
2726
2727	case GTU:
2728	  if (GET_CODE (trueop1) == CONST_INT
2729	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2730	      && INTEGRAL_MODE_P (mode))
2731	    return const0_rtx;
2732	  break;
2733
2734	case LT:
2735	  /* Optimize abs(x) < 0.0.  */
2736	  if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2737	    {
2738	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2739						       : trueop0;
2740	      if (GET_CODE (tem) == ABS)
2741		return const0_rtx;
2742	    }
2743	  break;
2744
2745	case GE:
2746	  /* Optimize abs(x) >= 0.0.  */
2747	  if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2748	    {
2749	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2750						       : trueop0;
2751	      if (GET_CODE (tem) == ABS)
2752		return const_true_rtx;
2753	    }
2754	  break;
2755
2756	case UNGE:
2757	  /* Optimize ! (abs(x) < 0.0).  */
2758	  if (trueop1 == CONST0_RTX (mode))
2759	    {
2760	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2761						       : trueop0;
2762	      if (GET_CODE (tem) == ABS)
2763		return const_true_rtx;
2764	    }
2765	  break;
2766
2767	default:
2768	  break;
2769	}
2770
2771      return 0;
2772    }
2773
2774  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2775     as appropriate.  */
2776  switch (code)
2777    {
2778    case EQ:
2779    case UNEQ:
2780      return equal ? const_true_rtx : const0_rtx;
2781    case NE:
2782    case LTGT:
2783      return ! equal ? const_true_rtx : const0_rtx;
2784    case LT:
2785    case UNLT:
2786      return op0lt ? const_true_rtx : const0_rtx;
2787    case GT:
2788    case UNGT:
2789      return op1lt ? const_true_rtx : const0_rtx;
2790    case LTU:
2791      return op0ltu ? const_true_rtx : const0_rtx;
2792    case GTU:
2793      return op1ltu ? const_true_rtx : const0_rtx;
2794    case LE:
2795    case UNLE:
2796      return equal || op0lt ? const_true_rtx : const0_rtx;
2797    case GE:
2798    case UNGE:
2799      return equal || op1lt ? const_true_rtx : const0_rtx;
2800    case LEU:
2801      return equal || op0ltu ? const_true_rtx : const0_rtx;
2802    case GEU:
2803      return equal || op1ltu ? const_true_rtx : const0_rtx;
2804    case ORDERED:
2805      return const_true_rtx;
2806    case UNORDERED:
2807      return const0_rtx;
2808    default:
2809      abort ();
2810    }
2811}
2812
2813/* Simplify CODE, an operation with result mode MODE and three operands,
2814   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
2815   a constant.  Return 0 if no simplifications is possible.  */
2816
2817rtx
2818simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2819			    enum machine_mode op0_mode, rtx op0, rtx op1,
2820			    rtx op2)
2821{
2822  unsigned int width = GET_MODE_BITSIZE (mode);
2823
2824  /* VOIDmode means "infinite" precision.  */
2825  if (width == 0)
2826    width = HOST_BITS_PER_WIDE_INT;
2827
2828  switch (code)
2829    {
2830    case SIGN_EXTRACT:
2831    case ZERO_EXTRACT:
2832      if (GET_CODE (op0) == CONST_INT
2833	  && GET_CODE (op1) == CONST_INT
2834	  && GET_CODE (op2) == CONST_INT
2835	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2836	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2837	{
2838	  /* Extracting a bit-field from a constant */
2839	  HOST_WIDE_INT val = INTVAL (op0);
2840
2841	  if (BITS_BIG_ENDIAN)
2842	    val >>= (GET_MODE_BITSIZE (op0_mode)
2843		     - INTVAL (op2) - INTVAL (op1));
2844	  else
2845	    val >>= INTVAL (op2);
2846
2847	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2848	    {
2849	      /* First zero-extend.  */
2850	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2851	      /* If desired, propagate sign bit.  */
2852	      if (code == SIGN_EXTRACT
2853		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2854		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2855	    }
2856
2857	  /* Clear the bits that don't belong in our mode,
2858	     unless they and our sign bit are all one.
2859	     So we get either a reasonable negative value or a reasonable
2860	     unsigned value for this mode.  */
2861	  if (width < HOST_BITS_PER_WIDE_INT
2862	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2863		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
2864	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
2865
2866	  return GEN_INT (val);
2867	}
2868      break;
2869
2870    case IF_THEN_ELSE:
2871      if (GET_CODE (op0) == CONST_INT)
2872	return op0 != const0_rtx ? op1 : op2;
2873
2874      /* Convert c ? a : a into "a".  */
2875      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2876	return op1;
2877
2878      /* Convert a != b ? a : b into "a".  */
2879      if (GET_CODE (op0) == NE
2880	  && ! side_effects_p (op0)
2881	  && ! HONOR_NANS (mode)
2882	  && ! HONOR_SIGNED_ZEROS (mode)
2883	  && ((rtx_equal_p (XEXP (op0, 0), op1)
2884	       && rtx_equal_p (XEXP (op0, 1), op2))
2885	      || (rtx_equal_p (XEXP (op0, 0), op2)
2886		  && rtx_equal_p (XEXP (op0, 1), op1))))
2887	return op1;
2888
2889      /* Convert a == b ? a : b into "b".  */
2890      if (GET_CODE (op0) == EQ
2891	  && ! side_effects_p (op0)
2892	  && ! HONOR_NANS (mode)
2893	  && ! HONOR_SIGNED_ZEROS (mode)
2894	  && ((rtx_equal_p (XEXP (op0, 0), op1)
2895	       && rtx_equal_p (XEXP (op0, 1), op2))
2896	      || (rtx_equal_p (XEXP (op0, 0), op2)
2897		  && rtx_equal_p (XEXP (op0, 1), op1))))
2898	return op2;
2899
2900      if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2901	{
2902	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2903					? GET_MODE (XEXP (op0, 1))
2904					: GET_MODE (XEXP (op0, 0)));
2905	  rtx temp;
2906	  if (cmp_mode == VOIDmode)
2907	    cmp_mode = op0_mode;
2908	  temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2909					        XEXP (op0, 0), XEXP (op0, 1));
2910
2911	  /* See if any simplifications were possible.  */
2912	  if (temp == const0_rtx)
2913	    return op2;
2914	  else if (temp == const_true_rtx)
2915	    return op1;
2916	  else if (temp)
2917	    abort ();
2918
2919	  /* Look for happy constants in op1 and op2.  */
2920	  if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2921	    {
2922	      HOST_WIDE_INT t = INTVAL (op1);
2923	      HOST_WIDE_INT f = INTVAL (op2);
2924
2925	      if (t == STORE_FLAG_VALUE && f == 0)
2926	        code = GET_CODE (op0);
2927	      else if (t == 0 && f == STORE_FLAG_VALUE)
2928		{
2929		  enum rtx_code tmp;
2930		  tmp = reversed_comparison_code (op0, NULL_RTX);
2931		  if (tmp == UNKNOWN)
2932		    break;
2933		  code = tmp;
2934		}
2935	      else
2936		break;
2937
2938	      return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2939	    }
2940	}
2941      break;
2942
2943    case VEC_MERGE:
2944      if (GET_MODE (op0) != mode
2945	  || GET_MODE (op1) != mode
2946	  || !VECTOR_MODE_P (mode))
2947	abort ();
2948      op2 = avoid_constant_pool_reference (op2);
2949      if (GET_CODE (op2) == CONST_INT)
2950	{
2951          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2952	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2953	  int mask = (1 << n_elts) - 1;
2954
2955	  if (!(INTVAL (op2) & mask))
2956	    return op1;
2957	  if ((INTVAL (op2) & mask) == mask)
2958	    return op0;
2959
2960	  op0 = avoid_constant_pool_reference (op0);
2961	  op1 = avoid_constant_pool_reference (op1);
2962	  if (GET_CODE (op0) == CONST_VECTOR
2963	      && GET_CODE (op1) == CONST_VECTOR)
2964	    {
2965	      rtvec v = rtvec_alloc (n_elts);
2966	      unsigned int i;
2967
2968	      for (i = 0; i < n_elts; i++)
2969		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
2970				    ? CONST_VECTOR_ELT (op0, i)
2971				    : CONST_VECTOR_ELT (op1, i));
2972	      return gen_rtx_CONST_VECTOR (mode, v);
2973	    }
2974	}
2975      break;
2976
2977    default:
2978      abort ();
2979    }
2980
2981  return 0;
2982}
2983
2984/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2985   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2986
2987   Works by unpacking OP into a collection of 8-bit values
2988   represented as a little-endian array of 'unsigned char', selecting by BYTE,
2989   and then repacking them again for OUTERMODE.  */
2990
2991static rtx
2992simplify_immed_subreg (enum machine_mode outermode, rtx op,
2993		       enum machine_mode innermode, unsigned int byte)
2994{
2995  /* We support up to 512-bit values (for V8DFmode).  */
2996  enum {
2997    max_bitsize = 512,
2998    value_bit = 8,
2999    value_mask = (1 << value_bit) - 1
3000  };
3001  unsigned char value[max_bitsize / value_bit];
3002  int value_start;
3003  int i;
3004  int elem;
3005
3006  int num_elem;
3007  rtx * elems;
3008  int elem_bitsize;
3009  rtx result_s;
3010  rtvec result_v = NULL;
3011  enum mode_class outer_class;
3012  enum machine_mode outer_submode;
3013
3014  /* Some ports misuse CCmode.  */
3015  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3016    return op;
3017
3018  /* Unpack the value.  */
3019
3020  if (GET_CODE (op) == CONST_VECTOR)
3021    {
3022      num_elem = CONST_VECTOR_NUNITS (op);
3023      elems = &CONST_VECTOR_ELT (op, 0);
3024      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3025    }
3026  else
3027    {
3028      num_elem = 1;
3029      elems = &op;
3030      elem_bitsize = max_bitsize;
3031    }
3032
3033  if (BITS_PER_UNIT % value_bit != 0)
3034    abort ();  /* Too complicated; reducing value_bit may help.  */
3035  if (elem_bitsize % BITS_PER_UNIT != 0)
3036    abort ();  /* I don't know how to handle endianness of sub-units.  */
3037
3038  for (elem = 0; elem < num_elem; elem++)
3039    {
3040      unsigned char * vp;
3041      rtx el = elems[elem];
3042
3043      /* Vectors are kept in target memory order.  (This is probably
3044	 a mistake.)  */
3045      {
3046	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3047	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3048			  / BITS_PER_UNIT);
3049	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3050	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3051	unsigned bytele = (subword_byte % UNITS_PER_WORD
3052			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3053	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3054      }
3055
3056      switch (GET_CODE (el))
3057	{
3058	case CONST_INT:
3059	  for (i = 0;
3060	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3061	       i += value_bit)
3062	    *vp++ = INTVAL (el) >> i;
3063	  /* CONST_INTs are always logically sign-extended.  */
3064	  for (; i < elem_bitsize; i += value_bit)
3065	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
3066	  break;
3067
3068	case CONST_DOUBLE:
3069	  if (GET_MODE (el) == VOIDmode)
3070	    {
3071	      /* If this triggers, someone should have generated a
3072		 CONST_INT instead.  */
3073	      if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3074		abort ();
3075
3076	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3077		*vp++ = CONST_DOUBLE_LOW (el) >> i;
3078	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3079		{
3080		  *vp++
3081		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3082		  i += value_bit;
3083		}
3084	      /* It shouldn't matter what's done here, so fill it with
3085		 zero.  */
3086	      for (; i < max_bitsize; i += value_bit)
3087		*vp++ = 0;
3088	    }
3089	  else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3090	    {
3091	      long tmp[max_bitsize / 32];
3092	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3093
3094	      if (bitsize > elem_bitsize)
3095		abort ();
3096	      if (bitsize % value_bit != 0)
3097		abort ();
3098
3099	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3100			      GET_MODE (el));
3101
3102	      /* real_to_target produces its result in words affected by
3103		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3104		 and use WORDS_BIG_ENDIAN instead; see the documentation
3105	         of SUBREG in rtl.texi.  */
3106	      for (i = 0; i < bitsize; i += value_bit)
3107		{
3108		  int ibase;
3109		  if (WORDS_BIG_ENDIAN)
3110		    ibase = bitsize - 1 - i;
3111		  else
3112		    ibase = i;
3113		  *vp++ = tmp[ibase / 32] >> i % 32;
3114		}
3115
3116	      /* It shouldn't matter what's done here, so fill it with
3117		 zero.  */
3118	      for (; i < elem_bitsize; i += value_bit)
3119		*vp++ = 0;
3120	    }
3121	  else
3122	    abort ();
3123	  break;
3124
3125	default:
3126	  abort ();
3127	}
3128    }
3129
3130  /* Now, pick the right byte to start with.  */
3131  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
3132     case is paradoxical SUBREGs, which shouldn't be adjusted since they
3133     will already have offset 0.  */
3134  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3135    {
3136      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3137			- byte);
3138      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3139      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3140      byte = (subword_byte % UNITS_PER_WORD
3141	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3142    }
3143
3144  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
3145     so if it's become negative it will instead be very large.)  */
3146  if (byte >= GET_MODE_SIZE (innermode))
3147    abort ();
3148
3149  /* Convert from bytes to chunks of size value_bit.  */
3150  value_start = byte * (BITS_PER_UNIT / value_bit);
3151
3152  /* Re-pack the value.  */
3153
3154  if (VECTOR_MODE_P (outermode))
3155    {
3156      num_elem = GET_MODE_NUNITS (outermode);
3157      result_v = rtvec_alloc (num_elem);
3158      elems = &RTVEC_ELT (result_v, 0);
3159      outer_submode = GET_MODE_INNER (outermode);
3160    }
3161  else
3162    {
3163      num_elem = 1;
3164      elems = &result_s;
3165      outer_submode = outermode;
3166    }
3167
3168  outer_class = GET_MODE_CLASS (outer_submode);
3169  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3170
3171  if (elem_bitsize % value_bit != 0)
3172    abort ();
3173  if (elem_bitsize + value_start * value_bit > max_bitsize)
3174    abort ();
3175
3176  for (elem = 0; elem < num_elem; elem++)
3177    {
3178      unsigned char *vp;
3179
3180      /* Vectors are stored in target memory order.  (This is probably
3181	 a mistake.)  */
3182      {
3183	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3184	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3185			  / BITS_PER_UNIT);
3186	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3187	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3188	unsigned bytele = (subword_byte % UNITS_PER_WORD
3189			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3190	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3191      }
3192
3193      switch (outer_class)
3194	{
3195	case MODE_INT:
3196	case MODE_PARTIAL_INT:
3197	  {
3198	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
3199
3200	    for (i = 0;
3201		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3202		 i += value_bit)
3203	      lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3204	    for (; i < elem_bitsize; i += value_bit)
3205	      hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3206		     << (i - HOST_BITS_PER_WIDE_INT));
3207
3208	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
3209	       know why.  */
3210	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3211	      elems[elem] = gen_int_mode (lo, outer_submode);
3212	    else
3213	      elems[elem] = immed_double_const (lo, hi, outer_submode);
3214	  }
3215	  break;
3216
3217	case MODE_FLOAT:
3218	  {
3219	    REAL_VALUE_TYPE r;
3220	    long tmp[max_bitsize / 32];
3221
3222	    /* real_from_target wants its input in words affected by
3223	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3224	       and use WORDS_BIG_ENDIAN instead; see the documentation
3225	       of SUBREG in rtl.texi.  */
3226	    for (i = 0; i < max_bitsize / 32; i++)
3227	      tmp[i] = 0;
3228	    for (i = 0; i < elem_bitsize; i += value_bit)
3229	      {
3230		int ibase;
3231		if (WORDS_BIG_ENDIAN)
3232		  ibase = elem_bitsize - 1 - i;
3233		else
3234		  ibase = i;
3235		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3236	      }
3237
3238	    real_from_target (&r, tmp, outer_submode);
3239	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3240	  }
3241	  break;
3242
3243	default:
3244	  abort ();
3245	}
3246    }
3247  if (VECTOR_MODE_P (outermode))
3248    return gen_rtx_CONST_VECTOR (outermode, result_v);
3249  else
3250    return result_s;
3251}
3252
3253/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3254   Return 0 if no simplifications are possible.  */
3255rtx
3256simplify_subreg (enum machine_mode outermode, rtx op,
3257		 enum machine_mode innermode, unsigned int byte)
3258{
3259  /* Little bit of sanity checking.  */
3260  if (innermode == VOIDmode || outermode == VOIDmode
3261      || innermode == BLKmode || outermode == BLKmode)
3262    abort ();
3263
3264  if (GET_MODE (op) != innermode
3265      && GET_MODE (op) != VOIDmode)
3266    abort ();
3267
3268  if (byte % GET_MODE_SIZE (outermode)
3269      || byte >= GET_MODE_SIZE (innermode))
3270    abort ();
3271
3272  if (outermode == innermode && !byte)
3273    return op;
3274
3275  if (GET_CODE (op) == CONST_INT
3276      || GET_CODE (op) == CONST_DOUBLE
3277      || GET_CODE (op) == CONST_VECTOR)
3278    return simplify_immed_subreg (outermode, op, innermode, byte);
3279
3280  /* Changing mode twice with SUBREG => just change it once,
3281     or not at all if changing back op starting mode.  */
3282  if (GET_CODE (op) == SUBREG)
3283    {
3284      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3285      int final_offset = byte + SUBREG_BYTE (op);
3286      rtx new;
3287
3288      if (outermode == innermostmode
3289	  && byte == 0 && SUBREG_BYTE (op) == 0)
3290	return SUBREG_REG (op);
3291
3292      /* The SUBREG_BYTE represents offset, as if the value were stored
3293	 in memory.  Irritating exception is paradoxical subreg, where
3294	 we define SUBREG_BYTE to be 0.  On big endian machines, this
3295	 value should be negative.  For a moment, undo this exception.  */
3296      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3297	{
3298	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3299	  if (WORDS_BIG_ENDIAN)
3300	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3301	  if (BYTES_BIG_ENDIAN)
3302	    final_offset += difference % UNITS_PER_WORD;
3303	}
3304      if (SUBREG_BYTE (op) == 0
3305	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3306	{
3307	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3308	  if (WORDS_BIG_ENDIAN)
3309	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3310	  if (BYTES_BIG_ENDIAN)
3311	    final_offset += difference % UNITS_PER_WORD;
3312	}
3313
3314      /* See whether resulting subreg will be paradoxical.  */
3315      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3316	{
3317	  /* In nonparadoxical subregs we can't handle negative offsets.  */
3318	  if (final_offset < 0)
3319	    return NULL_RTX;
3320	  /* Bail out in case resulting subreg would be incorrect.  */
3321	  if (final_offset % GET_MODE_SIZE (outermode)
3322	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3323	    return NULL_RTX;
3324	}
3325      else
3326	{
3327	  int offset = 0;
3328	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3329
3330	  /* In paradoxical subreg, see if we are still looking on lower part.
3331	     If so, our SUBREG_BYTE will be 0.  */
3332	  if (WORDS_BIG_ENDIAN)
3333	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3334	  if (BYTES_BIG_ENDIAN)
3335	    offset += difference % UNITS_PER_WORD;
3336	  if (offset == final_offset)
3337	    final_offset = 0;
3338	  else
3339	    return NULL_RTX;
3340	}
3341
3342      /* Recurse for further possible simplifications.  */
3343      new = simplify_subreg (outermode, SUBREG_REG (op),
3344			     GET_MODE (SUBREG_REG (op)),
3345			     final_offset);
3346      if (new)
3347	return new;
3348      return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3349    }
3350
3351  /* SUBREG of a hard register => just change the register number
3352     and/or mode.  If the hard register is not valid in that mode,
3353     suppress this simplification.  If the hard register is the stack,
3354     frame, or argument pointer, leave this as a SUBREG.  */
3355
3356  if (REG_P (op)
3357      && (! REG_FUNCTION_VALUE_P (op)
3358	  || ! rtx_equal_function_value_matters)
3359      && REGNO (op) < FIRST_PSEUDO_REGISTER
3360#ifdef CANNOT_CHANGE_MODE_CLASS
3361      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3362	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3363	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3364#endif
3365      && ((reload_completed && !frame_pointer_needed)
3366	  || (REGNO (op) != FRAME_POINTER_REGNUM
3367#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3368	      && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3369#endif
3370	     ))
3371#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3372      && REGNO (op) != ARG_POINTER_REGNUM
3373#endif
3374      && REGNO (op) != STACK_POINTER_REGNUM
3375      && subreg_offset_representable_p (REGNO (op), innermode,
3376					byte, outermode))
3377    {
3378      rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3379      int final_regno = subreg_hard_regno (tem, 0);
3380
3381      /* ??? We do allow it if the current REG is not valid for
3382	 its mode.  This is a kludge to work around how float/complex
3383	 arguments are passed on 32-bit SPARC and should be fixed.  */
3384      if (HARD_REGNO_MODE_OK (final_regno, outermode)
3385	  || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3386	{
3387	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3388
3389	  /* Propagate original regno.  We don't have any way to specify
3390	     the offset inside original regno, so do so only for lowpart.
3391	     The information is used only by alias analysis that can not
3392	     grog partial register anyway.  */
3393
3394	  if (subreg_lowpart_offset (outermode, innermode) == byte)
3395	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3396	  return x;
3397	}
3398    }
3399
3400  /* If we have a SUBREG of a register that we are replacing and we are
3401     replacing it with a MEM, make a new MEM and try replacing the
3402     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
3403     or if we would be widening it.  */
3404
3405  if (GET_CODE (op) == MEM
3406      && ! mode_dependent_address_p (XEXP (op, 0))
3407      /* Allow splitting of volatile memory references in case we don't
3408         have instruction to move the whole thing.  */
3409      && (! MEM_VOLATILE_P (op)
3410	  || ! have_insn_for (SET, innermode))
3411      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3412    return adjust_address_nv (op, outermode, byte);
3413
3414  /* Handle complex values represented as CONCAT
3415     of real and imaginary part.  */
3416  if (GET_CODE (op) == CONCAT)
3417    {
3418      int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3419      rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3420      unsigned int final_offset;
3421      rtx res;
3422
3423      final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3424      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3425      if (res)
3426	return res;
3427      /* We can at least simplify it by referring directly to the relevant part.  */
3428      return gen_rtx_SUBREG (outermode, part, final_offset);
3429    }
3430
3431  return NULL_RTX;
3432}
3433
3434/* Make a SUBREG operation or equivalent if it folds.  */
3435
3436rtx
3437simplify_gen_subreg (enum machine_mode outermode, rtx op,
3438		     enum machine_mode innermode, unsigned int byte)
3439{
3440  rtx new;
3441  /* Little bit of sanity checking.  */
3442  if (innermode == VOIDmode || outermode == VOIDmode
3443      || innermode == BLKmode || outermode == BLKmode)
3444    abort ();
3445
3446  if (GET_MODE (op) != innermode
3447      && GET_MODE (op) != VOIDmode)
3448    abort ();
3449
3450  if (byte % GET_MODE_SIZE (outermode)
3451      || byte >= GET_MODE_SIZE (innermode))
3452    abort ();
3453
3454  if (GET_CODE (op) == QUEUED)
3455    return NULL_RTX;
3456
3457  new = simplify_subreg (outermode, op, innermode, byte);
3458  if (new)
3459    return new;
3460
3461  if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3462    return NULL_RTX;
3463
3464  return gen_rtx_SUBREG (outermode, op, byte);
3465}
3466/* Simplify X, an rtx expression.
3467
3468   Return the simplified expression or NULL if no simplifications
3469   were possible.
3470
3471   This is the preferred entry point into the simplification routines;
3472   however, we still allow passes to call the more specific routines.
3473
3474   Right now GCC has three (yes, three) major bodies of RTL simplification
3475   code that need to be unified.
3476
3477	1. fold_rtx in cse.c.  This code uses various CSE specific
3478	   information to aid in RTL simplification.
3479
3480	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
3481	   it uses combine specific information to aid in RTL
3482	   simplification.
3483
3484	3. The routines in this file.
3485
3486
3487   Long term we want to only have one body of simplification code; to
3488   get to that state I recommend the following steps:
3489
3490	1. Pour over fold_rtx & simplify_rtx and move any simplifications
3491	   which are not pass dependent state into these routines.
3492
3493	2. As code is moved by #1, change fold_rtx & simplify_rtx to
3494	   use this routine whenever possible.
3495
3496	3. Allow for pass dependent state to be provided to these
3497	   routines and add simplifications based on the pass dependent
3498	   state.  Remove code from cse.c & combine.c that becomes
3499	   redundant/dead.
3500
3501    It will take time, but ultimately the compiler will be easier to
3502    maintain and improve.  It's totally silly that when we add a
3503    simplification that it needs to be added to 4 places (3 for RTL
3504    simplification and 1 for tree simplification.  */
3505
3506rtx
3507simplify_rtx (rtx x)
3508{
3509  enum rtx_code code = GET_CODE (x);
3510  enum machine_mode mode = GET_MODE (x);
3511  rtx temp;
3512
3513  switch (GET_RTX_CLASS (code))
3514    {
3515    case '1':
3516      return simplify_unary_operation (code, mode,
3517				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3518    case 'c':
3519      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3520	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3521
3522      /* Fall through....  */
3523
3524    case '2':
3525      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3526
3527    case '3':
3528    case 'b':
3529      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3530					 XEXP (x, 0), XEXP (x, 1),
3531					 XEXP (x, 2));
3532
3533    case '<':
3534      temp = simplify_relational_operation (code,
3535					    ((GET_MODE (XEXP (x, 0))
3536					      != VOIDmode)
3537					     ? GET_MODE (XEXP (x, 0))
3538					     : GET_MODE (XEXP (x, 1))),
3539					    XEXP (x, 0), XEXP (x, 1));
3540#ifdef FLOAT_STORE_FLAG_VALUE
3541      if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3542	{
3543	  if (temp == const0_rtx)
3544	    temp = CONST0_RTX (mode);
3545	  else
3546	    temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3547						 mode);
3548	}
3549#endif
3550      return temp;
3551
3552    case 'x':
3553      if (code == SUBREG)
3554	return simplify_gen_subreg (mode, SUBREG_REG (x),
3555				    GET_MODE (SUBREG_REG (x)),
3556				    SUBREG_BYTE (x));
3557      if (code == CONSTANT_P_RTX)
3558	{
3559	  if (CONSTANT_P (XEXP (x, 0)))
3560	    return const1_rtx;
3561	}
3562      break;
3563
3564    case 'o':
3565      if (code == LO_SUM)
3566	{
3567	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
3568	  if (GET_CODE (XEXP (x, 0)) == HIGH
3569	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3570	  return XEXP (x, 1);
3571	}
3572      break;
3573
3574    default:
3575      break;
3576    }
3577  return NULL;
3578}
3579