simplify-rtx.c revision 146895
1/* RTL simplification functions for GNU compiler.
2   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING.  If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA.  */
21
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "rtl.h"
28#include "tree.h"
29#include "tm_p.h"
30#include "regs.h"
31#include "hard-reg-set.h"
32#include "flags.h"
33#include "real.h"
34#include "insn-config.h"
35#include "recog.h"
36#include "function.h"
37#include "expr.h"
38#include "toplev.h"
39#include "output.h"
40#include "ggc.h"
41#include "target.h"
42
43/* Simplification and canonicalization of RTL.  */
44
45/* Much code operates on (low, high) pairs; the low value is an
46   unsigned wide int, the high value a signed wide int.  We
47   occasionally need to sign extend from low to high as if low were a
48   signed wide int.  */
49#define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51
52static rtx neg_const_int (enum machine_mode, rtx);
53static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx,
55				rtx, int);
56static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
57				  unsigned int);
58static bool associative_constant_p (rtx);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60					   rtx, rtx);
61
62/* Negate a CONST_INT rtx, truncating (because a conversion from a
63   maximally negative number can overflow).  */
64static rtx
65neg_const_int (enum machine_mode mode, rtx i)
66{
67  return gen_int_mode (- INTVAL (i), mode);
68}
69
70
71/* Make a binary operation by properly ordering the operands and
72   seeing if the expression folds.  */
73
74rtx
75simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
76		     rtx op1)
77{
78  rtx tem;
79
80  /* Put complex operands first and constants second if commutative.  */
81  if (GET_RTX_CLASS (code) == 'c'
82      && swap_commutative_operands_p (op0, op1))
83    tem = op0, op0 = op1, op1 = tem;
84
85  /* If this simplifies, do it.  */
86  tem = simplify_binary_operation (code, mode, op0, op1);
87  if (tem)
88    return tem;
89
90  /* Handle addition and subtraction specially.  Otherwise, just form
91     the operation.  */
92
93  if (code == PLUS || code == MINUS)
94    {
95      tem = simplify_plus_minus (code, mode, op0, op1, 1);
96      if (tem)
97	return tem;
98    }
99
100  return gen_rtx_fmt_ee (code, mode, op0, op1);
101}
102
103/* If X is a MEM referencing the constant pool, return the real value.
104   Otherwise return X.  */
105rtx
106avoid_constant_pool_reference (rtx x)
107{
108  rtx c, tmp, addr;
109  enum machine_mode cmode;
110
111  switch (GET_CODE (x))
112    {
113    case MEM:
114      break;
115
116    case FLOAT_EXTEND:
117      /* Handle float extensions of constant pool references.  */
118      tmp = XEXP (x, 0);
119      c = avoid_constant_pool_reference (tmp);
120      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
121	{
122	  REAL_VALUE_TYPE d;
123
124	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
125	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
126	}
127      return x;
128
129    default:
130      return x;
131    }
132
133  addr = XEXP (x, 0);
134
135  /* Call target hook to avoid the effects of -fpic etc....  */
136  addr = (*targetm.delegitimize_address) (addr);
137
138  if (GET_CODE (addr) == LO_SUM)
139    addr = XEXP (addr, 1);
140
141  if (GET_CODE (addr) != SYMBOL_REF
142      || ! CONSTANT_POOL_ADDRESS_P (addr))
143    return x;
144
145  c = get_pool_constant (addr);
146  cmode = get_pool_mode (addr);
147
148  /* If we're accessing the constant in a different mode than it was
149     originally stored, attempt to fix that up via subreg simplifications.
150     If that fails we have no choice but to return the original memory.  */
151  if (cmode != GET_MODE (x))
152    {
153      c = simplify_subreg (GET_MODE (x), c, cmode, 0);
154      return c ? c : x;
155    }
156
157  return c;
158}
159
160/* Make a unary operation by first seeing if it folds and otherwise making
161   the specified operation.  */
162
163rtx
164simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
165		    enum machine_mode op_mode)
166{
167  rtx tem;
168
169  /* If this simplifies, use it.  */
170  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
171    return tem;
172
173  return gen_rtx_fmt_e (code, mode, op);
174}
175
176/* Likewise for ternary operations.  */
177
178rtx
179simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
180		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
181{
182  rtx tem;
183
184  /* If this simplifies, use it.  */
185  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
186					      op0, op1, op2)))
187    return tem;
188
189  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
190}
191
192/* Likewise, for relational operations.
193   CMP_MODE specifies mode comparison is done in.
194  */
195
196rtx
197simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
198			 enum machine_mode cmp_mode, rtx op0, rtx op1)
199{
200  rtx tem;
201
202  if (cmp_mode == VOIDmode)
203    cmp_mode = GET_MODE (op0);
204  if (cmp_mode == VOIDmode)
205    cmp_mode = GET_MODE (op1);
206
207  if (cmp_mode != VOIDmode
208      && ! VECTOR_MODE_P (mode))
209    {
210      tem = simplify_relational_operation (code, cmp_mode, op0, op1);
211
212      if (tem)
213	{
214#ifdef FLOAT_STORE_FLAG_VALUE
215	  if (GET_MODE_CLASS (mode) == MODE_FLOAT)
216	    {
217	      REAL_VALUE_TYPE val;
218	      if (tem == const0_rtx)
219		return CONST0_RTX (mode);
220	      if (tem != const_true_rtx)
221		abort ();
222	      val = FLOAT_STORE_FLAG_VALUE (mode);
223	      return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
224	    }
225#endif
226	  return tem;
227	}
228    }
229
230  /* For the following tests, ensure const0_rtx is op1.  */
231  if (swap_commutative_operands_p (op0, op1)
232      || (op0 == const0_rtx && op1 != const0_rtx))
233    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
234
235  /* If op0 is a compare, extract the comparison arguments from it.  */
236  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
237    return simplify_gen_relational (code, mode, VOIDmode,
238				    XEXP (op0, 0), XEXP (op0, 1));
239
240  /* If op0 is a comparison, extract the comparison arguments form it.  */
241  if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && op1 == const0_rtx)
242    {
243      if (code == NE)
244	{
245	  if (GET_MODE (op0) == mode)
246	    return op0;
247	  return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
248					  XEXP (op0, 0), XEXP (op0, 1));
249	}
250      else if (code == EQ)
251	{
252	  enum rtx_code new = reversed_comparison_code (op0, NULL_RTX);
253	  if (new != UNKNOWN)
254	    return simplify_gen_relational (new, mode, VOIDmode,
255					    XEXP (op0, 0), XEXP (op0, 1));
256        }
257    }
258
259  return gen_rtx_fmt_ee (code, mode, op0, op1);
260}
261
262/* Replace all occurrences of OLD in X with NEW and try to simplify the
263   resulting RTX.  Return a new RTX which is as simplified as possible.  */
264
265rtx
266simplify_replace_rtx (rtx x, rtx old, rtx new)
267{
268  enum rtx_code code = GET_CODE (x);
269  enum machine_mode mode = GET_MODE (x);
270  enum machine_mode op_mode;
271  rtx op0, op1, op2;
272
273  /* If X is OLD, return NEW.  Otherwise, if this is an expression, try
274     to build a new expression substituting recursively.  If we can't do
275     anything, return our input.  */
276
277  if (x == old)
278    return new;
279
280  switch (GET_RTX_CLASS (code))
281    {
282    case '1':
283      op0 = XEXP (x, 0);
284      op_mode = GET_MODE (op0);
285      op0 = simplify_replace_rtx (op0, old, new);
286      if (op0 == XEXP (x, 0))
287	return x;
288      return simplify_gen_unary (code, mode, op0, op_mode);
289
290    case '2':
291    case 'c':
292      op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
293      op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
294      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
295	return x;
296      return simplify_gen_binary (code, mode, op0, op1);
297
298    case '<':
299      op0 = XEXP (x, 0);
300      op1 = XEXP (x, 1);
301      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
302      op0 = simplify_replace_rtx (op0, old, new);
303      op1 = simplify_replace_rtx (op1, old, new);
304      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
305	return x;
306      return simplify_gen_relational (code, mode, op_mode, op0, op1);
307
308    case '3':
309    case 'b':
310      op0 = XEXP (x, 0);
311      op_mode = GET_MODE (op0);
312      op0 = simplify_replace_rtx (op0, old, new);
313      op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
314      op2 = simplify_replace_rtx (XEXP (x, 2), old, new);
315      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
316	return x;
317      if (op_mode == VOIDmode)
318	op_mode = GET_MODE (op0);
319      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
320
321    case 'x':
322      /* The only case we try to handle is a SUBREG.  */
323      if (code == SUBREG)
324	{
325	  op0 = simplify_replace_rtx (SUBREG_REG (x), old, new);
326	  if (op0 == SUBREG_REG (x))
327	    return x;
328	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
329				     GET_MODE (SUBREG_REG (x)),
330				     SUBREG_BYTE (x));
331	  return op0 ? op0 : x;
332	}
333      break;
334
335    case 'o':
336      if (code == MEM)
337	{
338	  op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
339	  if (op0 == XEXP (x, 0))
340	    return x;
341	  return replace_equiv_address_nv (x, op0);
342	}
343      else if (code == LO_SUM)
344	{
345	  op0 = simplify_replace_rtx (XEXP (x, 0), old, new);
346	  op1 = simplify_replace_rtx (XEXP (x, 1), old, new);
347
348	  /* (lo_sum (high x) x) -> x  */
349	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
350	    return op1;
351
352	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
353	    return x;
354	  return gen_rtx_LO_SUM (mode, op0, op1);
355	}
356      else if (code == REG)
357	{
358	  if (REG_P (old) && REGNO (x) == REGNO (old))
359	    return new;
360	}
361      break;
362
363    default:
364      break;
365    }
366  return x;
367}
368
369/* Try to simplify a unary operation CODE whose output mode is to be
370   MODE with input operand OP whose mode was originally OP_MODE.
371   Return zero if no simplification can be made.  */
372rtx
373simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
374			  rtx op, enum machine_mode op_mode)
375{
376  unsigned int width = GET_MODE_BITSIZE (mode);
377  rtx trueop = avoid_constant_pool_reference (op);
378
379  if (code == VEC_DUPLICATE)
380    {
381      if (!VECTOR_MODE_P (mode))
382	abort ();
383      if (GET_MODE (trueop) != VOIDmode
384	  && !VECTOR_MODE_P (GET_MODE (trueop))
385	  && GET_MODE_INNER (mode) != GET_MODE (trueop))
386	abort ();
387      if (GET_MODE (trueop) != VOIDmode
388	  && VECTOR_MODE_P (GET_MODE (trueop))
389	  && GET_MODE_INNER (mode) != GET_MODE_INNER (GET_MODE (trueop)))
390	abort ();
391      if (GET_CODE (trueop) == CONST_INT || GET_CODE (trueop) == CONST_DOUBLE
392	  || GET_CODE (trueop) == CONST_VECTOR)
393	{
394          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
395          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
396	  rtvec v = rtvec_alloc (n_elts);
397	  unsigned int i;
398
399	  if (GET_CODE (trueop) != CONST_VECTOR)
400	    for (i = 0; i < n_elts; i++)
401	      RTVEC_ELT (v, i) = trueop;
402	  else
403	    {
404	      enum machine_mode inmode = GET_MODE (trueop);
405              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
406              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
407
408	      if (in_n_elts >= n_elts || n_elts % in_n_elts)
409		abort ();
410	      for (i = 0; i < n_elts; i++)
411	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop, i % in_n_elts);
412	    }
413	  return gen_rtx_CONST_VECTOR (mode, v);
414	}
415    }
416  else if (GET_CODE (op) == CONST)
417    return simplify_unary_operation (code, mode, XEXP (op, 0), op_mode);
418
419  if (VECTOR_MODE_P (mode) && GET_CODE (trueop) == CONST_VECTOR)
420    {
421      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
422      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
423      enum machine_mode opmode = GET_MODE (trueop);
424      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
425      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
426      rtvec v = rtvec_alloc (n_elts);
427      unsigned int i;
428
429      if (op_n_elts != n_elts)
430	abort ();
431
432      for (i = 0; i < n_elts; i++)
433	{
434	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
435					    CONST_VECTOR_ELT (trueop, i),
436					    GET_MODE_INNER (opmode));
437	  if (!x)
438	    return 0;
439	  RTVEC_ELT (v, i) = x;
440	}
441      return gen_rtx_CONST_VECTOR (mode, v);
442    }
443
444  /* The order of these tests is critical so that, for example, we don't
445     check the wrong mode (input vs. output) for a conversion operation,
446     such as FIX.  At some point, this should be simplified.  */
447
448  if (code == FLOAT && GET_MODE (trueop) == VOIDmode
449      && (GET_CODE (trueop) == CONST_DOUBLE || GET_CODE (trueop) == CONST_INT))
450    {
451      HOST_WIDE_INT hv, lv;
452      REAL_VALUE_TYPE d;
453
454      if (GET_CODE (trueop) == CONST_INT)
455	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
456      else
457	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
458
459      REAL_VALUE_FROM_INT (d, lv, hv, mode);
460      d = real_value_truncate (mode, d);
461      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
462    }
463  else if (code == UNSIGNED_FLOAT && GET_MODE (trueop) == VOIDmode
464	   && (GET_CODE (trueop) == CONST_DOUBLE
465	       || GET_CODE (trueop) == CONST_INT))
466    {
467      HOST_WIDE_INT hv, lv;
468      REAL_VALUE_TYPE d;
469
470      if (GET_CODE (trueop) == CONST_INT)
471	lv = INTVAL (trueop), hv = HWI_SIGN_EXTEND (lv);
472      else
473	lv = CONST_DOUBLE_LOW (trueop),  hv = CONST_DOUBLE_HIGH (trueop);
474
475      if (op_mode == VOIDmode)
476	{
477	  /* We don't know how to interpret negative-looking numbers in
478	     this case, so don't try to fold those.  */
479	  if (hv < 0)
480	    return 0;
481	}
482      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
483	;
484      else
485	hv = 0, lv &= GET_MODE_MASK (op_mode);
486
487      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
488      d = real_value_truncate (mode, d);
489      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
490    }
491
492  if (GET_CODE (trueop) == CONST_INT
493      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
494    {
495      HOST_WIDE_INT arg0 = INTVAL (trueop);
496      HOST_WIDE_INT val;
497
498      switch (code)
499	{
500	case NOT:
501	  val = ~ arg0;
502	  break;
503
504	case NEG:
505	  val = - arg0;
506	  break;
507
508	case ABS:
509	  val = (arg0 >= 0 ? arg0 : - arg0);
510	  break;
511
512	case FFS:
513	  /* Don't use ffs here.  Instead, get low order bit and then its
514	     number.  If arg0 is zero, this will return 0, as desired.  */
515	  arg0 &= GET_MODE_MASK (mode);
516	  val = exact_log2 (arg0 & (- arg0)) + 1;
517	  break;
518
519	case CLZ:
520	  arg0 &= GET_MODE_MASK (mode);
521	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
522	    ;
523	  else
524	    val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
525	  break;
526
527	case CTZ:
528	  arg0 &= GET_MODE_MASK (mode);
529	  if (arg0 == 0)
530	    {
531	      /* Even if the value at zero is undefined, we have to come
532		 up with some replacement.  Seems good enough.  */
533	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
534		val = GET_MODE_BITSIZE (mode);
535	    }
536	  else
537	    val = exact_log2 (arg0 & -arg0);
538	  break;
539
540	case POPCOUNT:
541	  arg0 &= GET_MODE_MASK (mode);
542	  val = 0;
543	  while (arg0)
544	    val++, arg0 &= arg0 - 1;
545	  break;
546
547	case PARITY:
548	  arg0 &= GET_MODE_MASK (mode);
549	  val = 0;
550	  while (arg0)
551	    val++, arg0 &= arg0 - 1;
552	  val &= 1;
553	  break;
554
555	case TRUNCATE:
556	  val = arg0;
557	  break;
558
559	case ZERO_EXTEND:
560	  /* When zero-extending a CONST_INT, we need to know its
561             original mode.  */
562	  if (op_mode == VOIDmode)
563	    abort ();
564	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
565	    {
566	      /* If we were really extending the mode,
567		 we would have to distinguish between zero-extension
568		 and sign-extension.  */
569	      if (width != GET_MODE_BITSIZE (op_mode))
570		abort ();
571	      val = arg0;
572	    }
573	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
574	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
575	  else
576	    return 0;
577	  break;
578
579	case SIGN_EXTEND:
580	  if (op_mode == VOIDmode)
581	    op_mode = mode;
582	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
583	    {
584	      /* If we were really extending the mode,
585		 we would have to distinguish between zero-extension
586		 and sign-extension.  */
587	      if (width != GET_MODE_BITSIZE (op_mode))
588		abort ();
589	      val = arg0;
590	    }
591	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
592	    {
593	      val
594		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
595	      if (val
596		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
597		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
598	    }
599	  else
600	    return 0;
601	  break;
602
603	case SQRT:
604	case FLOAT_EXTEND:
605	case FLOAT_TRUNCATE:
606	case SS_TRUNCATE:
607	case US_TRUNCATE:
608	  return 0;
609
610	default:
611	  abort ();
612	}
613
614      val = trunc_int_for_mode (val, mode);
615
616      return GEN_INT (val);
617    }
618
619  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
620     for a DImode operation on a CONST_INT.  */
621  else if (GET_MODE (trueop) == VOIDmode
622	   && width <= HOST_BITS_PER_WIDE_INT * 2
623	   && (GET_CODE (trueop) == CONST_DOUBLE
624	       || GET_CODE (trueop) == CONST_INT))
625    {
626      unsigned HOST_WIDE_INT l1, lv;
627      HOST_WIDE_INT h1, hv;
628
629      if (GET_CODE (trueop) == CONST_DOUBLE)
630	l1 = CONST_DOUBLE_LOW (trueop), h1 = CONST_DOUBLE_HIGH (trueop);
631      else
632	l1 = INTVAL (trueop), h1 = HWI_SIGN_EXTEND (l1);
633
634      switch (code)
635	{
636	case NOT:
637	  lv = ~ l1;
638	  hv = ~ h1;
639	  break;
640
641	case NEG:
642	  neg_double (l1, h1, &lv, &hv);
643	  break;
644
645	case ABS:
646	  if (h1 < 0)
647	    neg_double (l1, h1, &lv, &hv);
648	  else
649	    lv = l1, hv = h1;
650	  break;
651
652	case FFS:
653	  hv = 0;
654	  if (l1 == 0)
655	    {
656	      if (h1 == 0)
657		lv = 0;
658	      else
659		lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
660	    }
661	  else
662	    lv = exact_log2 (l1 & -l1) + 1;
663	  break;
664
665	case CLZ:
666	  hv = 0;
667	  if (h1 != 0)
668	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
669	      - HOST_BITS_PER_WIDE_INT;
670	  else if (l1 != 0)
671	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
672	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
673	    lv = GET_MODE_BITSIZE (mode);
674	  break;
675
676	case CTZ:
677	  hv = 0;
678	  if (l1 != 0)
679	    lv = exact_log2 (l1 & -l1);
680	  else if (h1 != 0)
681	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
682	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
683	    lv = GET_MODE_BITSIZE (mode);
684	  break;
685
686	case POPCOUNT:
687	  hv = 0;
688	  lv = 0;
689	  while (l1)
690	    lv++, l1 &= l1 - 1;
691	  while (h1)
692	    lv++, h1 &= h1 - 1;
693	  break;
694
695	case PARITY:
696	  hv = 0;
697	  lv = 0;
698	  while (l1)
699	    lv++, l1 &= l1 - 1;
700	  while (h1)
701	    lv++, h1 &= h1 - 1;
702	  lv &= 1;
703	  break;
704
705	case TRUNCATE:
706	  /* This is just a change-of-mode, so do nothing.  */
707	  lv = l1, hv = h1;
708	  break;
709
710	case ZERO_EXTEND:
711	  if (op_mode == VOIDmode)
712	    abort ();
713
714	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
715	    return 0;
716
717	  hv = 0;
718	  lv = l1 & GET_MODE_MASK (op_mode);
719	  break;
720
721	case SIGN_EXTEND:
722	  if (op_mode == VOIDmode
723	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
724	    return 0;
725	  else
726	    {
727	      lv = l1 & GET_MODE_MASK (op_mode);
728	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
729		  && (lv & ((HOST_WIDE_INT) 1
730			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
731		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
732
733	      hv = HWI_SIGN_EXTEND (lv);
734	    }
735	  break;
736
737	case SQRT:
738	  return 0;
739
740	default:
741	  return 0;
742	}
743
744      return immed_double_const (lv, hv, mode);
745    }
746
747  else if (GET_CODE (trueop) == CONST_DOUBLE
748	   && GET_MODE_CLASS (mode) == MODE_FLOAT)
749    {
750      REAL_VALUE_TYPE d, t;
751      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop);
752
753      switch (code)
754	{
755	case SQRT:
756	  if (HONOR_SNANS (mode) && real_isnan (&d))
757	    return 0;
758	  real_sqrt (&t, mode, &d);
759	  d = t;
760	  break;
761	case ABS:
762	  d = REAL_VALUE_ABS (d);
763	  break;
764	case NEG:
765	  d = REAL_VALUE_NEGATE (d);
766	  break;
767	case FLOAT_TRUNCATE:
768	  d = real_value_truncate (mode, d);
769	  break;
770	case FLOAT_EXTEND:
771	  /* All this does is change the mode.  */
772	  break;
773	case FIX:
774	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
775	  break;
776	case NOT:
777	  {
778	    long tmp[4];
779	    int i;
780
781	    real_to_target (tmp, &d, GET_MODE (trueop));
782	    for (i = 0; i < 4; i++)
783	      tmp[i] = ~tmp[i];
784	    real_from_target (&d, tmp, mode);
785	    break;
786	  }
787	default:
788	  abort ();
789	}
790      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
791    }
792
793  else if (GET_CODE (trueop) == CONST_DOUBLE
794	   && GET_MODE_CLASS (GET_MODE (trueop)) == MODE_FLOAT
795	   && GET_MODE_CLASS (mode) == MODE_INT
796	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
797    {
798      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
799	 operators are intentionally left unspecified (to ease implementation
800	 by target backends), for consistency, this routine implements the
801	 same semantics for constant folding as used by the middle-end.  */
802
803      HOST_WIDE_INT xh, xl, th, tl;
804      REAL_VALUE_TYPE x, t;
805      REAL_VALUE_FROM_CONST_DOUBLE (x, trueop);
806      switch (code)
807	{
808	case FIX:
809	  if (REAL_VALUE_ISNAN (x))
810	    return const0_rtx;
811
812	  /* Test against the signed upper bound.  */
813	  if (width > HOST_BITS_PER_WIDE_INT)
814	    {
815	      th = ((unsigned HOST_WIDE_INT) 1
816		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
817	      tl = -1;
818	    }
819	  else
820	    {
821	      th = 0;
822	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
823	    }
824	  real_from_integer (&t, VOIDmode, tl, th, 0);
825	  if (REAL_VALUES_LESS (t, x))
826	    {
827	      xh = th;
828	      xl = tl;
829	      break;
830	    }
831
832	  /* Test against the signed lower bound.  */
833	  if (width > HOST_BITS_PER_WIDE_INT)
834	    {
835	      th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
836	      tl = 0;
837	    }
838	  else
839	    {
840	      th = -1;
841	      tl = (HOST_WIDE_INT) -1 << (width - 1);
842	    }
843	  real_from_integer (&t, VOIDmode, tl, th, 0);
844	  if (REAL_VALUES_LESS (x, t))
845	    {
846	      xh = th;
847	      xl = tl;
848	      break;
849	    }
850	  REAL_VALUE_TO_INT (&xl, &xh, x);
851	  break;
852
853	case UNSIGNED_FIX:
854	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
855	    return const0_rtx;
856
857	  /* Test against the unsigned upper bound.  */
858	  if (width == 2*HOST_BITS_PER_WIDE_INT)
859	    {
860	      th = -1;
861	      tl = -1;
862	    }
863	  else if (width >= HOST_BITS_PER_WIDE_INT)
864	    {
865	      th = ((unsigned HOST_WIDE_INT) 1
866		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
867	      tl = -1;
868	    }
869	  else
870	    {
871	      th = 0;
872	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
873	    }
874	  real_from_integer (&t, VOIDmode, tl, th, 1);
875	  if (REAL_VALUES_LESS (t, x))
876	    {
877	      xh = th;
878	      xl = tl;
879	      break;
880	    }
881
882	  REAL_VALUE_TO_INT (&xl, &xh, x);
883	  break;
884
885	default:
886	  abort ();
887	}
888      return immed_double_const (xl, xh, mode);
889    }
890
891  /* This was formerly used only for non-IEEE float.
892     eggert@twinsun.com says it is safe for IEEE also.  */
893  else
894    {
895      enum rtx_code reversed;
896      rtx temp;
897
898      /* There are some simplifications we can do even if the operands
899	 aren't constant.  */
900      switch (code)
901	{
902	case NOT:
903	  /* (not (not X)) == X.  */
904	  if (GET_CODE (op) == NOT)
905	    return XEXP (op, 0);
906
907	  /* (not (eq X Y)) == (ne X Y), etc.  */
908	  if (GET_RTX_CLASS (GET_CODE (op)) == '<'
909	      && (mode == BImode || STORE_FLAG_VALUE == -1)
910	      && ((reversed = reversed_comparison_code (op, NULL_RTX))
911		  != UNKNOWN))
912	    return simplify_gen_relational (reversed, mode, VOIDmode,
913					    XEXP (op, 0), XEXP (op, 1));
914
915          /* (not (plus X -1)) can become (neg X).  */
916          if (GET_CODE (op) == PLUS
917	      && XEXP (op, 1) == constm1_rtx)
918	    return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
919
920	  /* Similarly, (not (neg X)) is (plus X -1).  */
921	  if (GET_CODE (op) == NEG)
922	    return plus_constant (XEXP (op, 0), -1);
923
924	  /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
925	  if (GET_CODE (op) == XOR
926	      && GET_CODE (XEXP (op, 1)) == CONST_INT
927	      && (temp = simplify_unary_operation (NOT, mode,
928						   XEXP (op, 1),
929						   mode)) != 0)
930	    return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
931
932
933	  /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
934	     operands other than 1, but that is not valid.  We could do a
935	     similar simplification for (not (lshiftrt C X)) where C is
936	     just the sign bit, but this doesn't seem common enough to
937	     bother with.  */
938	  if (GET_CODE (op) == ASHIFT
939	      && XEXP (op, 0) == const1_rtx)
940	    {
941	      temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
942	      return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
943	    }
944
945	  /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
946	     by reversing the comparison code if valid.  */
947	  if (STORE_FLAG_VALUE == -1
948	      && GET_RTX_CLASS (GET_CODE (op)) == '<'
949	      && (reversed = reversed_comparison_code (op, NULL_RTX))
950		 != UNKNOWN)
951	    return simplify_gen_relational (reversed, mode, VOIDmode,
952					    XEXP (op, 0), XEXP (op, 1));
953
954	  /* (not (ashiftrt foo C)) where C is the number of bits in FOO
955	     minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
956	     so we can perform the above simplification.  */
957
958	  if (STORE_FLAG_VALUE == -1
959	      && GET_CODE (op) == ASHIFTRT
960	      && GET_CODE (XEXP (op, 1)) == CONST_INT
961	      && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
962	    return simplify_gen_relational (GE, mode, VOIDmode,
963					    XEXP (op, 0), const0_rtx);
964
965	  break;
966
967	case NEG:
968	  /* (neg (neg X)) == X.  */
969	  if (GET_CODE (op) == NEG)
970	    return XEXP (op, 0);
971
972	  /* (neg (plus X 1)) can become (not X).  */
973	  if (GET_CODE (op) == PLUS
974	      && XEXP (op, 1) == const1_rtx)
975	    return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
976
977	  /* Similarly, (neg (not X)) is (plus X 1).  */
978	  if (GET_CODE (op) == NOT)
979	    return plus_constant (XEXP (op, 0), 1);
980
981	  /* (neg (minus X Y)) can become (minus Y X).  This transformation
982	     isn't safe for modes with signed zeros, since if X and Y are
983	     both +0, (minus Y X) is the same as (minus X Y).  If the
984	     rounding mode is towards +infinity (or -infinity) then the two
985	     expressions will be rounded differently.  */
986	  if (GET_CODE (op) == MINUS
987	      && !HONOR_SIGNED_ZEROS (mode)
988	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
989	    return simplify_gen_binary (MINUS, mode, XEXP (op, 1),
990					XEXP (op, 0));
991
992	  if (GET_CODE (op) == PLUS
993	      && !HONOR_SIGNED_ZEROS (mode)
994	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
995	    {
996	      /* (neg (plus A C)) is simplified to (minus -C A).  */
997	      if (GET_CODE (XEXP (op, 1)) == CONST_INT
998		  || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
999		{
1000		  temp = simplify_unary_operation (NEG, mode, XEXP (op, 1),
1001						   mode);
1002		  if (temp)
1003		    return simplify_gen_binary (MINUS, mode, temp,
1004						XEXP (op, 0));
1005		}
1006
1007	      /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
1008	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1009	      return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1010	    }
1011
1012	  /* (neg (mult A B)) becomes (mult (neg A) B).
1013	     This works even for floating-point values.  */
1014	  if (GET_CODE (op) == MULT
1015	      && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1016	    {
1017	      temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1018	      return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
1019	    }
1020
1021	  /* NEG commutes with ASHIFT since it is multiplication.  Only do
1022	     this if we can then eliminate the NEG (e.g., if the operand
1023	     is a constant).  */
1024	  if (GET_CODE (op) == ASHIFT)
1025	    {
1026	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 0),
1027					       mode);
1028	      if (temp)
1029		return simplify_gen_binary (ASHIFT, mode, temp,
1030					    XEXP (op, 1));
1031	    }
1032
1033	  break;
1034
1035	case SIGN_EXTEND:
1036	  /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1037	     becomes just the MINUS if its mode is MODE.  This allows
1038	     folding switch statements on machines using casesi (such as
1039	     the VAX).  */
1040	  if (GET_CODE (op) == TRUNCATE
1041	      && GET_MODE (XEXP (op, 0)) == mode
1042	      && GET_CODE (XEXP (op, 0)) == MINUS
1043	      && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1044	      && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1045	    return XEXP (op, 0);
1046
1047	  /* Check for a sign extension of a subreg of a promoted
1048	     variable, where the promotion is sign-extended, and the
1049	     target mode is the same as the variable's promotion.  */
1050	  if (GET_CODE (op) == SUBREG
1051	      && SUBREG_PROMOTED_VAR_P (op)
1052	      && ! SUBREG_PROMOTED_UNSIGNED_P (op)
1053	      && GET_MODE (XEXP (op, 0)) == mode)
1054	    return XEXP (op, 0);
1055
1056#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1057	  if (! POINTERS_EXTEND_UNSIGNED
1058	      && mode == Pmode && GET_MODE (op) == ptr_mode
1059	      && (CONSTANT_P (op)
1060		  || (GET_CODE (op) == SUBREG
1061		      && GET_CODE (SUBREG_REG (op)) == REG
1062		      && REG_POINTER (SUBREG_REG (op))
1063		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
1064	    return convert_memory_address (Pmode, op);
1065#endif
1066	  break;
1067
1068	case ZERO_EXTEND:
1069	  /* Check for a zero extension of a subreg of a promoted
1070	     variable, where the promotion is zero-extended, and the
1071	     target mode is the same as the variable's promotion.  */
1072	  if (GET_CODE (op) == SUBREG
1073	      && SUBREG_PROMOTED_VAR_P (op)
1074	      && SUBREG_PROMOTED_UNSIGNED_P (op)
1075	      && GET_MODE (XEXP (op, 0)) == mode)
1076	    return XEXP (op, 0);
1077
1078#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1079	  if (POINTERS_EXTEND_UNSIGNED > 0
1080	      && mode == Pmode && GET_MODE (op) == ptr_mode
1081	      && (CONSTANT_P (op)
1082		  || (GET_CODE (op) == SUBREG
1083		      && GET_CODE (SUBREG_REG (op)) == REG
1084		      && REG_POINTER (SUBREG_REG (op))
1085		      && GET_MODE (SUBREG_REG (op)) == Pmode)))
1086	    return convert_memory_address (Pmode, op);
1087#endif
1088	  break;
1089
1090	default:
1091	  break;
1092	}
1093
1094      return 0;
1095    }
1096}
1097
1098/* Subroutine of simplify_associative_operation.  Return true if rtx OP
1099   is a suitable integer or floating point immediate constant.  */
1100static bool
1101associative_constant_p (rtx op)
1102{
1103  if (GET_CODE (op) == CONST_INT
1104      || GET_CODE (op) == CONST_DOUBLE)
1105    return true;
1106  op = avoid_constant_pool_reference (op);
1107  return GET_CODE (op) == CONST_INT
1108	 || GET_CODE (op) == CONST_DOUBLE;
1109}
1110
1111/* Subroutine of simplify_binary_operation to simplify an associative
1112   binary operation CODE with result mode MODE, operating on OP0 and OP1.
1113   Return 0 if no simplification is possible.  */
1114static rtx
1115simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1116				rtx op0, rtx op1)
1117{
1118  rtx tem;
1119
1120  /* Simplify (x op c1) op c2 as x op (c1 op c2).  */
1121  if (GET_CODE (op0) == code
1122      && associative_constant_p (op1)
1123      && associative_constant_p (XEXP (op0, 1)))
1124    {
1125      tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1126      if (! tem)
1127	return tem;
1128      return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1129    }
1130
1131  /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2).  */
1132  if (GET_CODE (op0) == code
1133      && GET_CODE (op1) == code
1134      && associative_constant_p (XEXP (op0, 1))
1135      && associative_constant_p (XEXP (op1, 1)))
1136    {
1137      rtx c = simplify_binary_operation (code, mode,
1138					 XEXP (op0, 1), XEXP (op1, 1));
1139      if (! c)
1140	return 0;
1141      tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
1142      return simplify_gen_binary (code, mode, tem, c);
1143    }
1144
1145  /* Canonicalize (x op c) op y as (x op y) op c.  */
1146  if (GET_CODE (op0) == code
1147      && associative_constant_p (XEXP (op0, 1)))
1148    {
1149      tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1150      return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1151    }
1152
1153  /* Canonicalize x op (y op c) as (x op y) op c.  */
1154  if (GET_CODE (op1) == code
1155      && associative_constant_p (XEXP (op1, 1)))
1156    {
1157      tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1158      return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1159    }
1160
1161  return 0;
1162}
1163
1164/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1165   and OP1.  Return 0 if no simplification is possible.
1166
1167   Don't use this for relational operations such as EQ or LT.
1168   Use simplify_relational_operation instead.  */
1169rtx
1170simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1171			   rtx op0, rtx op1)
1172{
1173  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
1174  HOST_WIDE_INT val;
1175  unsigned int width = GET_MODE_BITSIZE (mode);
1176  rtx tem;
1177  rtx trueop0 = avoid_constant_pool_reference (op0);
1178  rtx trueop1 = avoid_constant_pool_reference (op1);
1179
1180  /* Relational operations don't work here.  We must know the mode
1181     of the operands in order to do the comparison correctly.
1182     Assuming a full word can give incorrect results.
1183     Consider comparing 128 with -128 in QImode.  */
1184
1185  if (GET_RTX_CLASS (code) == '<')
1186    abort ();
1187
1188  /* Make sure the constant is second.  */
1189  if (GET_RTX_CLASS (code) == 'c'
1190      && swap_commutative_operands_p (trueop0, trueop1))
1191    {
1192      tem = op0, op0 = op1, op1 = tem;
1193      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
1194    }
1195
1196  if (VECTOR_MODE_P (mode)
1197      && code != VEC_CONCAT
1198      && GET_CODE (trueop0) == CONST_VECTOR
1199      && GET_CODE (trueop1) == CONST_VECTOR)
1200    {
1201      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
1202      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
1203      enum machine_mode op0mode = GET_MODE (trueop0);
1204      int op0_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op0mode));
1205      unsigned op0_n_elts = (GET_MODE_SIZE (op0mode) / op0_elt_size);
1206      enum machine_mode op1mode = GET_MODE (trueop1);
1207      int op1_elt_size = GET_MODE_SIZE (GET_MODE_INNER (op1mode));
1208      unsigned op1_n_elts = (GET_MODE_SIZE (op1mode) / op1_elt_size);
1209      rtvec v = rtvec_alloc (n_elts);
1210      unsigned int i;
1211
1212      if (op0_n_elts != n_elts || op1_n_elts != n_elts)
1213	abort ();
1214
1215      for (i = 0; i < n_elts; i++)
1216	{
1217	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
1218					     CONST_VECTOR_ELT (trueop0, i),
1219					     CONST_VECTOR_ELT (trueop1, i));
1220	  if (!x)
1221	    return 0;
1222	  RTVEC_ELT (v, i) = x;
1223	}
1224
1225      return gen_rtx_CONST_VECTOR (mode, v);
1226    }
1227
1228  if (GET_MODE_CLASS (mode) == MODE_FLOAT
1229      && GET_CODE (trueop0) == CONST_DOUBLE
1230      && GET_CODE (trueop1) == CONST_DOUBLE
1231      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
1232    {
1233      if (code == AND
1234	  || code == IOR
1235	  || code == XOR)
1236	{
1237	  long tmp0[4];
1238	  long tmp1[4];
1239	  REAL_VALUE_TYPE r;
1240	  int i;
1241
1242	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
1243			  GET_MODE (op0));
1244	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
1245			  GET_MODE (op1));
1246	  for (i = 0; i < 4; i++)
1247	    {
1248	      if (code == AND)
1249		tmp0[i] &= tmp1[i];
1250	      else if (code == IOR)
1251		tmp0[i] |= tmp1[i];
1252	      else if (code == XOR)
1253		tmp0[i] ^= tmp1[i];
1254	      else
1255		abort ();
1256	    }
1257	   real_from_target (&r, tmp0, mode);
1258	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
1259	}
1260      else
1261	{
1262	  REAL_VALUE_TYPE f0, f1, value;
1263
1264	  REAL_VALUE_FROM_CONST_DOUBLE (f0, trueop0);
1265	  REAL_VALUE_FROM_CONST_DOUBLE (f1, trueop1);
1266	  f0 = real_value_truncate (mode, f0);
1267	  f1 = real_value_truncate (mode, f1);
1268
1269	  if (HONOR_SNANS (mode)
1270	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
1271	    return 0;
1272
1273	  if (code == DIV
1274	      && REAL_VALUES_EQUAL (f1, dconst0)
1275	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
1276	    return 0;
1277
1278	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1279	      && flag_trapping_math
1280	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
1281	    {
1282	      int s0 = REAL_VALUE_NEGATIVE (f0);
1283	      int s1 = REAL_VALUE_NEGATIVE (f1);
1284
1285	      switch (code)
1286		{
1287		case PLUS:
1288		  /* Inf + -Inf = NaN plus exception.  */
1289		  if (s0 != s1)
1290		    return 0;
1291		  break;
1292		case MINUS:
1293		  /* Inf - Inf = NaN plus exception.  */
1294		  if (s0 == s1)
1295		    return 0;
1296		  break;
1297		case DIV:
1298		  /* Inf / Inf = NaN plus exception.  */
1299		  return 0;
1300		default:
1301		  break;
1302		}
1303	    }
1304
1305	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
1306	      && flag_trapping_math
1307	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
1308		  || (REAL_VALUE_ISINF (f1)
1309		      && REAL_VALUES_EQUAL (f0, dconst0))))
1310	    /* Inf * 0 = NaN plus exception.  */
1311	    return 0;
1312
1313	  REAL_ARITHMETIC (value, rtx_to_tree_code (code), f0, f1);
1314
1315	  value = real_value_truncate (mode, value);
1316	  return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
1317	}
1318    }
1319
1320  /* We can fold some multi-word operations.  */
1321  if (GET_MODE_CLASS (mode) == MODE_INT
1322      && width == HOST_BITS_PER_WIDE_INT * 2
1323      && (GET_CODE (trueop0) == CONST_DOUBLE
1324	  || GET_CODE (trueop0) == CONST_INT)
1325      && (GET_CODE (trueop1) == CONST_DOUBLE
1326	  || GET_CODE (trueop1) == CONST_INT))
1327    {
1328      unsigned HOST_WIDE_INT l1, l2, lv;
1329      HOST_WIDE_INT h1, h2, hv;
1330
1331      if (GET_CODE (trueop0) == CONST_DOUBLE)
1332	l1 = CONST_DOUBLE_LOW (trueop0), h1 = CONST_DOUBLE_HIGH (trueop0);
1333      else
1334	l1 = INTVAL (trueop0), h1 = HWI_SIGN_EXTEND (l1);
1335
1336      if (GET_CODE (trueop1) == CONST_DOUBLE)
1337	l2 = CONST_DOUBLE_LOW (trueop1), h2 = CONST_DOUBLE_HIGH (trueop1);
1338      else
1339	l2 = INTVAL (trueop1), h2 = HWI_SIGN_EXTEND (l2);
1340
1341      switch (code)
1342	{
1343	case MINUS:
1344	  /* A - B == A + (-B).  */
1345	  neg_double (l2, h2, &lv, &hv);
1346	  l2 = lv, h2 = hv;
1347
1348	  /* Fall through....  */
1349
1350	case PLUS:
1351	  add_double (l1, h1, l2, h2, &lv, &hv);
1352	  break;
1353
1354	case MULT:
1355	  mul_double (l1, h1, l2, h2, &lv, &hv);
1356	  break;
1357
1358	case DIV:  case MOD:   case UDIV:  case UMOD:
1359	  /* We'd need to include tree.h to do this and it doesn't seem worth
1360	     it.  */
1361	  return 0;
1362
1363	case AND:
1364	  lv = l1 & l2, hv = h1 & h2;
1365	  break;
1366
1367	case IOR:
1368	  lv = l1 | l2, hv = h1 | h2;
1369	  break;
1370
1371	case XOR:
1372	  lv = l1 ^ l2, hv = h1 ^ h2;
1373	  break;
1374
1375	case SMIN:
1376	  if (h1 < h2
1377	      || (h1 == h2
1378		  && ((unsigned HOST_WIDE_INT) l1
1379		      < (unsigned HOST_WIDE_INT) l2)))
1380	    lv = l1, hv = h1;
1381	  else
1382	    lv = l2, hv = h2;
1383	  break;
1384
1385	case SMAX:
1386	  if (h1 > h2
1387	      || (h1 == h2
1388		  && ((unsigned HOST_WIDE_INT) l1
1389		      > (unsigned HOST_WIDE_INT) l2)))
1390	    lv = l1, hv = h1;
1391	  else
1392	    lv = l2, hv = h2;
1393	  break;
1394
1395	case UMIN:
1396	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
1397	      || (h1 == h2
1398		  && ((unsigned HOST_WIDE_INT) l1
1399		      < (unsigned HOST_WIDE_INT) l2)))
1400	    lv = l1, hv = h1;
1401	  else
1402	    lv = l2, hv = h2;
1403	  break;
1404
1405	case UMAX:
1406	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
1407	      || (h1 == h2
1408		  && ((unsigned HOST_WIDE_INT) l1
1409		      > (unsigned HOST_WIDE_INT) l2)))
1410	    lv = l1, hv = h1;
1411	  else
1412	    lv = l2, hv = h2;
1413	  break;
1414
1415	case LSHIFTRT:   case ASHIFTRT:
1416	case ASHIFT:
1417	case ROTATE:     case ROTATERT:
1418#ifdef SHIFT_COUNT_TRUNCATED
1419	  if (SHIFT_COUNT_TRUNCATED)
1420	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
1421#endif
1422
1423	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
1424	    return 0;
1425
1426	  if (code == LSHIFTRT || code == ASHIFTRT)
1427	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
1428			   code == ASHIFTRT);
1429	  else if (code == ASHIFT)
1430	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
1431	  else if (code == ROTATE)
1432	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1433	  else /* code == ROTATERT */
1434	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
1435	  break;
1436
1437	default:
1438	  return 0;
1439	}
1440
1441      return immed_double_const (lv, hv, mode);
1442    }
1443
1444  if (GET_CODE (op0) != CONST_INT || GET_CODE (op1) != CONST_INT
1445      || width > HOST_BITS_PER_WIDE_INT || width == 0)
1446    {
1447      /* Even if we can't compute a constant result,
1448	 there are some cases worth simplifying.  */
1449
1450      switch (code)
1451	{
1452	case PLUS:
1453	  /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1454	     when x is NaN, infinite, or finite and nonzero.  They aren't
1455	     when x is -0 and the rounding mode is not towards -infinity,
1456	     since (-0) + 0 is then 0.  */
1457	  if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1458	    return op0;
1459
1460	  /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1461	     transformations are safe even for IEEE.  */
1462	  if (GET_CODE (op0) == NEG)
1463	    return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1464	  else if (GET_CODE (op1) == NEG)
1465	    return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1466
1467	  /* (~a) + 1 -> -a */
1468	  if (INTEGRAL_MODE_P (mode)
1469	      && GET_CODE (op0) == NOT
1470	      && trueop1 == const1_rtx)
1471	    return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1472
1473	  /* Handle both-operands-constant cases.  We can only add
1474	     CONST_INTs to constants since the sum of relocatable symbols
1475	     can't be handled by most assemblers.  Don't add CONST_INT
1476	     to CONST_INT since overflow won't be computed properly if wider
1477	     than HOST_BITS_PER_WIDE_INT.  */
1478
1479	  if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1480	      && GET_CODE (op1) == CONST_INT)
1481	    return plus_constant (op0, INTVAL (op1));
1482	  else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1483		   && GET_CODE (op0) == CONST_INT)
1484	    return plus_constant (op1, INTVAL (op0));
1485
1486	  /* See if this is something like X * C - X or vice versa or
1487	     if the multiplication is written as a shift.  If so, we can
1488	     distribute and make a new multiply, shift, or maybe just
1489	     have X (if C is 2 in the example above).  But don't make
1490	     real multiply if we didn't have one before.  */
1491
1492	  if (! FLOAT_MODE_P (mode))
1493	    {
1494	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1495	      rtx lhs = op0, rhs = op1;
1496	      int had_mult = 0;
1497
1498	      if (GET_CODE (lhs) == NEG)
1499		coeff0 = -1, lhs = XEXP (lhs, 0);
1500	      else if (GET_CODE (lhs) == MULT
1501		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1502		{
1503		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1504		  had_mult = 1;
1505		}
1506	      else if (GET_CODE (lhs) == ASHIFT
1507		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1508		       && INTVAL (XEXP (lhs, 1)) >= 0
1509		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1510		{
1511		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1512		  lhs = XEXP (lhs, 0);
1513		}
1514
1515	      if (GET_CODE (rhs) == NEG)
1516		coeff1 = -1, rhs = XEXP (rhs, 0);
1517	      else if (GET_CODE (rhs) == MULT
1518		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1519		{
1520		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1521		  had_mult = 1;
1522		}
1523	      else if (GET_CODE (rhs) == ASHIFT
1524		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1525		       && INTVAL (XEXP (rhs, 1)) >= 0
1526		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1527		{
1528		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1529		  rhs = XEXP (rhs, 0);
1530		}
1531
1532	      if (rtx_equal_p (lhs, rhs))
1533		{
1534		  tem = simplify_gen_binary (MULT, mode, lhs,
1535					GEN_INT (coeff0 + coeff1));
1536		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1537		}
1538	    }
1539
1540	  /* If one of the operands is a PLUS or a MINUS, see if we can
1541	     simplify this by the associative law.
1542	     Don't use the associative law for floating point.
1543	     The inaccuracy makes it nonassociative,
1544	     and subtle programs can break if operations are associated.  */
1545
1546	  if (INTEGRAL_MODE_P (mode)
1547	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1548		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1549		  || (GET_CODE (op0) == CONST
1550		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1551		  || (GET_CODE (op1) == CONST
1552		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1553	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1554	    return tem;
1555
1556	  /* Reassociate floating point addition only when the user
1557	     specifies unsafe math optimizations.  */
1558	  if (FLOAT_MODE_P (mode)
1559	      && flag_unsafe_math_optimizations)
1560	    {
1561	      tem = simplify_associative_operation (code, mode, op0, op1);
1562	      if (tem)
1563		return tem;
1564	    }
1565	  break;
1566
1567	case COMPARE:
1568#ifdef HAVE_cc0
1569	  /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1570	     using cc0, in which case we want to leave it as a COMPARE
1571	     so we can distinguish it from a register-register-copy.
1572
1573	     In IEEE floating point, x-0 is not the same as x.  */
1574
1575	  if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1576	       || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1577	      && trueop1 == CONST0_RTX (mode))
1578	    return op0;
1579#endif
1580
1581	  /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1582	  if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1583	       || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1584	      && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1585	    {
1586	      rtx xop00 = XEXP (op0, 0);
1587	      rtx xop10 = XEXP (op1, 0);
1588
1589#ifdef HAVE_cc0
1590	      if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1591#else
1592	      if (GET_CODE (xop00) == REG && GET_CODE (xop10) == REG
1593		  && GET_MODE (xop00) == GET_MODE (xop10)
1594		  && REGNO (xop00) == REGNO (xop10)
1595		  && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1596		  && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1597#endif
1598		return xop00;
1599	    }
1600	  break;
1601
1602	case MINUS:
1603	  /* We can't assume x-x is 0 even with non-IEEE floating point,
1604	     but since it is zero except in very strange circumstances, we
1605	     will treat it as zero with -funsafe-math-optimizations.  */
1606	  if (rtx_equal_p (trueop0, trueop1)
1607	      && ! side_effects_p (op0)
1608	      && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1609	    return CONST0_RTX (mode);
1610
1611	  /* Change subtraction from zero into negation.  (0 - x) is the
1612	     same as -x when x is NaN, infinite, or finite and nonzero.
1613	     But if the mode has signed zeros, and does not round towards
1614	     -infinity, then 0 - 0 is 0, not -0.  */
1615	  if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1616	    return simplify_gen_unary (NEG, mode, op1, mode);
1617
1618	  /* (-1 - a) is ~a.  */
1619	  if (trueop0 == constm1_rtx)
1620	    return simplify_gen_unary (NOT, mode, op1, mode);
1621
1622	  /* Subtracting 0 has no effect unless the mode has signed zeros
1623	     and supports rounding towards -infinity.  In such a case,
1624	     0 - 0 is -0.  */
1625	  if (!(HONOR_SIGNED_ZEROS (mode)
1626		&& HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1627	      && trueop1 == CONST0_RTX (mode))
1628	    return op0;
1629
1630	  /* See if this is something like X * C - X or vice versa or
1631	     if the multiplication is written as a shift.  If so, we can
1632	     distribute and make a new multiply, shift, or maybe just
1633	     have X (if C is 2 in the example above).  But don't make
1634	     real multiply if we didn't have one before.  */
1635
1636	  if (! FLOAT_MODE_P (mode))
1637	    {
1638	      HOST_WIDE_INT coeff0 = 1, coeff1 = 1;
1639	      rtx lhs = op0, rhs = op1;
1640	      int had_mult = 0;
1641
1642	      if (GET_CODE (lhs) == NEG)
1643		coeff0 = -1, lhs = XEXP (lhs, 0);
1644	      else if (GET_CODE (lhs) == MULT
1645		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1646		{
1647		  coeff0 = INTVAL (XEXP (lhs, 1)), lhs = XEXP (lhs, 0);
1648		  had_mult = 1;
1649		}
1650	      else if (GET_CODE (lhs) == ASHIFT
1651		       && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1652		       && INTVAL (XEXP (lhs, 1)) >= 0
1653		       && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1654		{
1655		  coeff0 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1656		  lhs = XEXP (lhs, 0);
1657		}
1658
1659	      if (GET_CODE (rhs) == NEG)
1660		coeff1 = - 1, rhs = XEXP (rhs, 0);
1661	      else if (GET_CODE (rhs) == MULT
1662		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1663		{
1664		  coeff1 = INTVAL (XEXP (rhs, 1)), rhs = XEXP (rhs, 0);
1665		  had_mult = 1;
1666		}
1667	      else if (GET_CODE (rhs) == ASHIFT
1668		       && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1669		       && INTVAL (XEXP (rhs, 1)) >= 0
1670		       && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1671		{
1672		  coeff1 = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1673		  rhs = XEXP (rhs, 0);
1674		}
1675
1676	      if (rtx_equal_p (lhs, rhs))
1677		{
1678		  tem = simplify_gen_binary (MULT, mode, lhs,
1679					     GEN_INT (coeff0 - coeff1));
1680		  return (GET_CODE (tem) == MULT && ! had_mult) ? 0 : tem;
1681		}
1682	    }
1683
1684	  /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1685	  if (GET_CODE (op1) == NEG)
1686	    return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1687
1688	  /* (-x - c) may be simplified as (-c - x).  */
1689	  if (GET_CODE (op0) == NEG
1690	      && (GET_CODE (op1) == CONST_INT
1691		  || GET_CODE (op1) == CONST_DOUBLE))
1692	    {
1693	      tem = simplify_unary_operation (NEG, mode, op1, mode);
1694	      if (tem)
1695		return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1696	    }
1697
1698	  /* If one of the operands is a PLUS or a MINUS, see if we can
1699	     simplify this by the associative law.
1700	     Don't use the associative law for floating point.
1701	     The inaccuracy makes it nonassociative,
1702	     and subtle programs can break if operations are associated.  */
1703
1704	  if (INTEGRAL_MODE_P (mode)
1705	      && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS
1706		  || GET_CODE (op1) == PLUS || GET_CODE (op1) == MINUS
1707		  || (GET_CODE (op0) == CONST
1708		      && GET_CODE (XEXP (op0, 0)) == PLUS)
1709		  || (GET_CODE (op1) == CONST
1710		      && GET_CODE (XEXP (op1, 0)) == PLUS))
1711	      && (tem = simplify_plus_minus (code, mode, op0, op1, 0)) != 0)
1712	    return tem;
1713
1714	  /* Don't let a relocatable value get a negative coeff.  */
1715	  if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1716	    return simplify_gen_binary (PLUS, mode,
1717					op0,
1718					neg_const_int (mode, op1));
1719
1720	  /* (x - (x & y)) -> (x & ~y) */
1721	  if (GET_CODE (op1) == AND)
1722	    {
1723	      if (rtx_equal_p (op0, XEXP (op1, 0)))
1724		{
1725		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1726					    GET_MODE (XEXP (op1, 1)));
1727		  return simplify_gen_binary (AND, mode, op0, tem);
1728		}
1729	      if (rtx_equal_p (op0, XEXP (op1, 1)))
1730		{
1731		  tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1732					    GET_MODE (XEXP (op1, 0)));
1733		  return simplify_gen_binary (AND, mode, op0, tem);
1734		}
1735	    }
1736	  break;
1737
1738	case MULT:
1739	  if (trueop1 == constm1_rtx)
1740	    return simplify_gen_unary (NEG, mode, op0, mode);
1741
1742	  /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1743	     x is NaN, since x * 0 is then also NaN.  Nor is it valid
1744	     when the mode has signed zeros, since multiplying a negative
1745	     number by 0 will give -0, not 0.  */
1746	  if (!HONOR_NANS (mode)
1747	      && !HONOR_SIGNED_ZEROS (mode)
1748	      && trueop1 == CONST0_RTX (mode)
1749	      && ! side_effects_p (op0))
1750	    return op1;
1751
1752	  /* In IEEE floating point, x*1 is not equivalent to x for
1753	     signalling NaNs.  */
1754	  if (!HONOR_SNANS (mode)
1755	      && trueop1 == CONST1_RTX (mode))
1756	    return op0;
1757
1758	  /* Convert multiply by constant power of two into shift unless
1759	     we are still generating RTL.  This test is a kludge.  */
1760	  if (GET_CODE (trueop1) == CONST_INT
1761	      && (val = exact_log2 (INTVAL (trueop1))) >= 0
1762	      /* If the mode is larger than the host word size, and the
1763		 uppermost bit is set, then this isn't a power of two due
1764		 to implicit sign extension.  */
1765	      && (width <= HOST_BITS_PER_WIDE_INT
1766		  || val != HOST_BITS_PER_WIDE_INT - 1)
1767	      && ! rtx_equal_function_value_matters)
1768	    return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1769
1770	  /* x*2 is x+x and x*(-1) is -x */
1771	  if (GET_CODE (trueop1) == CONST_DOUBLE
1772	      && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1773	      && GET_MODE (op0) == mode)
1774	    {
1775	      REAL_VALUE_TYPE d;
1776	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1777
1778	      if (REAL_VALUES_EQUAL (d, dconst2))
1779		return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1780
1781	      if (REAL_VALUES_EQUAL (d, dconstm1))
1782		return simplify_gen_unary (NEG, mode, op0, mode);
1783	    }
1784
1785	  /* Reassociate multiplication, but for floating point MULTs
1786	     only when the user specifies unsafe math optimizations.  */
1787	  if (! FLOAT_MODE_P (mode)
1788	      || flag_unsafe_math_optimizations)
1789	    {
1790	      tem = simplify_associative_operation (code, mode, op0, op1);
1791	      if (tem)
1792		return tem;
1793	    }
1794	  break;
1795
1796	case IOR:
1797	  if (trueop1 == const0_rtx)
1798	    return op0;
1799	  if (GET_CODE (trueop1) == CONST_INT
1800	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1801	          == GET_MODE_MASK (mode)))
1802	    return op1;
1803	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1804	    return op0;
1805	  /* A | (~A) -> -1 */
1806	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1807	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1808	      && ! side_effects_p (op0)
1809	      && GET_MODE_CLASS (mode) != MODE_CC)
1810	    return constm1_rtx;
1811	  tem = simplify_associative_operation (code, mode, op0, op1);
1812	  if (tem)
1813	    return tem;
1814	  break;
1815
1816	case XOR:
1817	  if (trueop1 == const0_rtx)
1818	    return op0;
1819	  if (GET_CODE (trueop1) == CONST_INT
1820	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1821		  == GET_MODE_MASK (mode)))
1822	    return simplify_gen_unary (NOT, mode, op0, mode);
1823	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1824	      && GET_MODE_CLASS (mode) != MODE_CC)
1825	    return const0_rtx;
1826	  tem = simplify_associative_operation (code, mode, op0, op1);
1827	  if (tem)
1828	    return tem;
1829	  break;
1830
1831	case AND:
1832	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1833	    return const0_rtx;
1834	  if (GET_CODE (trueop1) == CONST_INT
1835	      && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1836		  == GET_MODE_MASK (mode)))
1837	    return op0;
1838	  if (trueop0 == trueop1 && ! side_effects_p (op0)
1839	      && GET_MODE_CLASS (mode) != MODE_CC)
1840	    return op0;
1841	  /* A & (~A) -> 0 */
1842	  if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1843	       || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1844	      && ! side_effects_p (op0)
1845	      && GET_MODE_CLASS (mode) != MODE_CC)
1846	    return const0_rtx;
1847	  tem = simplify_associative_operation (code, mode, op0, op1);
1848	  if (tem)
1849	    return tem;
1850	  break;
1851
1852	case UDIV:
1853	  /* Convert divide by power of two into shift (divide by 1 handled
1854	     below).  */
1855	  if (GET_CODE (trueop1) == CONST_INT
1856	      && (arg1 = exact_log2 (INTVAL (trueop1))) > 0)
1857	    return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (arg1));
1858
1859	  /* Fall through....  */
1860
1861	case DIV:
1862	  if (trueop1 == CONST1_RTX (mode))
1863	    {
1864	      /* On some platforms DIV uses narrower mode than its
1865		 operands.  */
1866	      rtx x = gen_lowpart_common (mode, op0);
1867	      if (x)
1868		return x;
1869	      else if (mode != GET_MODE (op0) && GET_MODE (op0) != VOIDmode)
1870		return gen_lowpart_SUBREG (mode, op0);
1871	      else
1872		return op0;
1873	    }
1874
1875	  /* Maybe change 0 / x to 0.  This transformation isn't safe for
1876	     modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1877	     Nor is it safe for modes with signed zeros, since dividing
1878	     0 by a negative number gives -0, not 0.  */
1879	  if (!HONOR_NANS (mode)
1880	      && !HONOR_SIGNED_ZEROS (mode)
1881	      && trueop0 == CONST0_RTX (mode)
1882	      && ! side_effects_p (op1))
1883	    return op0;
1884
1885	  /* Change division by a constant into multiplication.  Only do
1886	     this with -funsafe-math-optimizations.  */
1887	  else if (GET_CODE (trueop1) == CONST_DOUBLE
1888		   && GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_FLOAT
1889		   && trueop1 != CONST0_RTX (mode)
1890		   && flag_unsafe_math_optimizations)
1891	    {
1892	      REAL_VALUE_TYPE d;
1893	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1894
1895	      if (! REAL_VALUES_EQUAL (d, dconst0))
1896		{
1897		  REAL_ARITHMETIC (d, rtx_to_tree_code (DIV), dconst1, d);
1898		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1899		  return simplify_gen_binary (MULT, mode, op0, tem);
1900		}
1901	    }
1902	  break;
1903
1904	case UMOD:
1905	  /* Handle modulus by power of two (mod with 1 handled below).  */
1906	  if (GET_CODE (trueop1) == CONST_INT
1907	      && exact_log2 (INTVAL (trueop1)) > 0)
1908	    return simplify_gen_binary (AND, mode, op0,
1909					GEN_INT (INTVAL (op1) - 1));
1910
1911	  /* Fall through....  */
1912
1913	case MOD:
1914	  if ((trueop0 == const0_rtx || trueop1 == const1_rtx)
1915	      && ! side_effects_p (op0) && ! side_effects_p (op1))
1916	    return const0_rtx;
1917	  break;
1918
1919	case ROTATERT:
1920	case ROTATE:
1921	case ASHIFTRT:
1922	  /* Rotating ~0 always results in ~0.  */
1923	  if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
1924	      && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
1925	      && ! side_effects_p (op1))
1926	    return op0;
1927
1928	  /* Fall through....  */
1929
1930	case ASHIFT:
1931	case LSHIFTRT:
1932	  if (trueop1 == const0_rtx)
1933	    return op0;
1934	  if (trueop0 == const0_rtx && ! side_effects_p (op1))
1935	    return op0;
1936	  break;
1937
1938	case SMIN:
1939	  if (width <= HOST_BITS_PER_WIDE_INT
1940	      && GET_CODE (trueop1) == CONST_INT
1941	      && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
1942	      && ! side_effects_p (op0))
1943	    return op1;
1944	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1945	    return op0;
1946	  tem = simplify_associative_operation (code, mode, op0, op1);
1947	  if (tem)
1948	    return tem;
1949	  break;
1950
1951	case SMAX:
1952	  if (width <= HOST_BITS_PER_WIDE_INT
1953	      && GET_CODE (trueop1) == CONST_INT
1954	      && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
1955		  == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
1956	      && ! side_effects_p (op0))
1957	    return op1;
1958	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1959	    return op0;
1960	  tem = simplify_associative_operation (code, mode, op0, op1);
1961	  if (tem)
1962	    return tem;
1963	  break;
1964
1965	case UMIN:
1966	  if (trueop1 == const0_rtx && ! side_effects_p (op0))
1967	    return op1;
1968	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1969	    return op0;
1970	  tem = simplify_associative_operation (code, mode, op0, op1);
1971	  if (tem)
1972	    return tem;
1973	  break;
1974
1975	case UMAX:
1976	  if (trueop1 == constm1_rtx && ! side_effects_p (op0))
1977	    return op1;
1978	  if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1979	    return op0;
1980	  tem = simplify_associative_operation (code, mode, op0, op1);
1981	  if (tem)
1982	    return tem;
1983	  break;
1984
1985	case SS_PLUS:
1986	case US_PLUS:
1987	case SS_MINUS:
1988	case US_MINUS:
1989	  /* ??? There are simplifications that can be done.  */
1990	  return 0;
1991
1992	case VEC_SELECT:
1993	  if (!VECTOR_MODE_P (mode))
1994	    {
1995	      if (!VECTOR_MODE_P (GET_MODE (trueop0))
1996		  || (mode
1997		      != GET_MODE_INNER (GET_MODE (trueop0)))
1998		  || GET_CODE (trueop1) != PARALLEL
1999		  || XVECLEN (trueop1, 0) != 1
2000		  || GET_CODE (XVECEXP (trueop1, 0, 0)) != CONST_INT)
2001		abort ();
2002
2003	      if (GET_CODE (trueop0) == CONST_VECTOR)
2004		return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP (trueop1, 0, 0)));
2005	    }
2006	  else
2007	    {
2008	      if (!VECTOR_MODE_P (GET_MODE (trueop0))
2009		  || (GET_MODE_INNER (mode)
2010		      != GET_MODE_INNER (GET_MODE (trueop0)))
2011		  || GET_CODE (trueop1) != PARALLEL)
2012		abort ();
2013
2014	      if (GET_CODE (trueop0) == CONST_VECTOR)
2015		{
2016		  int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2017		  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2018		  rtvec v = rtvec_alloc (n_elts);
2019		  unsigned int i;
2020
2021		  if (XVECLEN (trueop1, 0) != (int) n_elts)
2022		    abort ();
2023		  for (i = 0; i < n_elts; i++)
2024		    {
2025		      rtx x = XVECEXP (trueop1, 0, i);
2026
2027		      if (GET_CODE (x) != CONST_INT)
2028			abort ();
2029		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, INTVAL (x));
2030		    }
2031
2032		  return gen_rtx_CONST_VECTOR (mode, v);
2033		}
2034	    }
2035	  return 0;
2036	case VEC_CONCAT:
2037	  {
2038	    enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2039					  ? GET_MODE (trueop0)
2040					  : GET_MODE_INNER (mode));
2041	    enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2042					  ? GET_MODE (trueop1)
2043					  : GET_MODE_INNER (mode));
2044
2045	    if (!VECTOR_MODE_P (mode)
2046		|| (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2047		    != GET_MODE_SIZE (mode)))
2048	      abort ();
2049
2050	    if ((VECTOR_MODE_P (op0_mode)
2051		 && (GET_MODE_INNER (mode)
2052		     != GET_MODE_INNER (op0_mode)))
2053		|| (!VECTOR_MODE_P (op0_mode)
2054		    && GET_MODE_INNER (mode) != op0_mode))
2055	      abort ();
2056
2057	    if ((VECTOR_MODE_P (op1_mode)
2058		 && (GET_MODE_INNER (mode)
2059		     != GET_MODE_INNER (op1_mode)))
2060		|| (!VECTOR_MODE_P (op1_mode)
2061		    && GET_MODE_INNER (mode) != op1_mode))
2062	      abort ();
2063
2064	    if ((GET_CODE (trueop0) == CONST_VECTOR
2065		 || GET_CODE (trueop0) == CONST_INT
2066		 || GET_CODE (trueop0) == CONST_DOUBLE)
2067		&& (GET_CODE (trueop1) == CONST_VECTOR
2068		    || GET_CODE (trueop1) == CONST_INT
2069		    || GET_CODE (trueop1) == CONST_DOUBLE))
2070	      {
2071		int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2072		unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2073		rtvec v = rtvec_alloc (n_elts);
2074		unsigned int i;
2075		unsigned in_n_elts = 1;
2076
2077		if (VECTOR_MODE_P (op0_mode))
2078		  in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2079		for (i = 0; i < n_elts; i++)
2080		  {
2081		    if (i < in_n_elts)
2082		      {
2083			if (!VECTOR_MODE_P (op0_mode))
2084			  RTVEC_ELT (v, i) = trueop0;
2085			else
2086			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2087		      }
2088		    else
2089		      {
2090			if (!VECTOR_MODE_P (op1_mode))
2091			  RTVEC_ELT (v, i) = trueop1;
2092			else
2093			  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2094							       i - in_n_elts);
2095		      }
2096		  }
2097
2098		return gen_rtx_CONST_VECTOR (mode, v);
2099	      }
2100	  }
2101	  return 0;
2102
2103	default:
2104	  abort ();
2105	}
2106
2107      return 0;
2108    }
2109
2110  /* Get the integer argument values in two forms:
2111     zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
2112
2113  arg0 = INTVAL (trueop0);
2114  arg1 = INTVAL (trueop1);
2115
2116  if (width < HOST_BITS_PER_WIDE_INT)
2117    {
2118      arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
2119      arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
2120
2121      arg0s = arg0;
2122      if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2123	arg0s |= ((HOST_WIDE_INT) (-1) << width);
2124
2125      arg1s = arg1;
2126      if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2127	arg1s |= ((HOST_WIDE_INT) (-1) << width);
2128    }
2129  else
2130    {
2131      arg0s = arg0;
2132      arg1s = arg1;
2133    }
2134
2135  /* Compute the value of the arithmetic.  */
2136
2137  switch (code)
2138    {
2139    case PLUS:
2140      val = arg0s + arg1s;
2141      break;
2142
2143    case MINUS:
2144      val = arg0s - arg1s;
2145      break;
2146
2147    case MULT:
2148      val = arg0s * arg1s;
2149      break;
2150
2151    case DIV:
2152      if (arg1s == 0
2153	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2154	      && arg1s == -1))
2155	return 0;
2156      val = arg0s / arg1s;
2157      break;
2158
2159    case MOD:
2160      if (arg1s == 0
2161	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2162	      && arg1s == -1))
2163	return 0;
2164      val = arg0s % arg1s;
2165      break;
2166
2167    case UDIV:
2168      if (arg1 == 0
2169	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2170	      && arg1s == -1))
2171	return 0;
2172      val = (unsigned HOST_WIDE_INT) arg0 / arg1;
2173      break;
2174
2175    case UMOD:
2176      if (arg1 == 0
2177	  || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
2178	      && arg1s == -1))
2179	return 0;
2180      val = (unsigned HOST_WIDE_INT) arg0 % arg1;
2181      break;
2182
2183    case AND:
2184      val = arg0 & arg1;
2185      break;
2186
2187    case IOR:
2188      val = arg0 | arg1;
2189      break;
2190
2191    case XOR:
2192      val = arg0 ^ arg1;
2193      break;
2194
2195    case LSHIFTRT:
2196      /* If shift count is undefined, don't fold it; let the machine do
2197	 what it wants.  But truncate it if the machine will do that.  */
2198      if (arg1 < 0)
2199	return 0;
2200
2201#ifdef SHIFT_COUNT_TRUNCATED
2202      if (SHIFT_COUNT_TRUNCATED)
2203	arg1 %= width;
2204#endif
2205
2206      val = ((unsigned HOST_WIDE_INT) arg0) >> arg1;
2207      break;
2208
2209    case ASHIFT:
2210      if (arg1 < 0)
2211	return 0;
2212
2213#ifdef SHIFT_COUNT_TRUNCATED
2214      if (SHIFT_COUNT_TRUNCATED)
2215	arg1 %= width;
2216#endif
2217
2218      val = ((unsigned HOST_WIDE_INT) arg0) << arg1;
2219      break;
2220
2221    case ASHIFTRT:
2222      if (arg1 < 0)
2223	return 0;
2224
2225#ifdef SHIFT_COUNT_TRUNCATED
2226      if (SHIFT_COUNT_TRUNCATED)
2227	arg1 %= width;
2228#endif
2229
2230      val = arg0s >> arg1;
2231
2232      /* Bootstrap compiler may not have sign extended the right shift.
2233	 Manually extend the sign to insure bootstrap cc matches gcc.  */
2234      if (arg0s < 0 && arg1 > 0)
2235	val |= ((HOST_WIDE_INT) -1) << (HOST_BITS_PER_WIDE_INT - arg1);
2236
2237      break;
2238
2239    case ROTATERT:
2240      if (arg1 < 0)
2241	return 0;
2242
2243      arg1 %= width;
2244      val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
2245	     | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
2246      break;
2247
2248    case ROTATE:
2249      if (arg1 < 0)
2250	return 0;
2251
2252      arg1 %= width;
2253      val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
2254	     | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
2255      break;
2256
2257    case COMPARE:
2258      /* Do nothing here.  */
2259      return 0;
2260
2261    case SMIN:
2262      val = arg0s <= arg1s ? arg0s : arg1s;
2263      break;
2264
2265    case UMIN:
2266      val = ((unsigned HOST_WIDE_INT) arg0
2267	     <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2268      break;
2269
2270    case SMAX:
2271      val = arg0s > arg1s ? arg0s : arg1s;
2272      break;
2273
2274    case UMAX:
2275      val = ((unsigned HOST_WIDE_INT) arg0
2276	     > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
2277      break;
2278
2279    case SS_PLUS:
2280    case US_PLUS:
2281    case SS_MINUS:
2282    case US_MINUS:
2283      /* ??? There are simplifications that can be done.  */
2284      return 0;
2285
2286    default:
2287      abort ();
2288    }
2289
2290  val = trunc_int_for_mode (val, mode);
2291
2292  return GEN_INT (val);
2293}
2294
2295/* Simplify a PLUS or MINUS, at least one of whose operands may be another
2296   PLUS or MINUS.
2297
2298   Rather than test for specific case, we do this by a brute-force method
2299   and do all possible simplifications until no more changes occur.  Then
2300   we rebuild the operation.
2301
2302   If FORCE is true, then always generate the rtx.  This is used to
2303   canonicalize stuff emitted from simplify_gen_binary.  Note that this
2304   can still fail if the rtx is too complex.  It won't fail just because
2305   the result is not 'simpler' than the input, however.  */
2306
2307struct simplify_plus_minus_op_data
2308{
2309  rtx op;
2310  int neg;
2311};
2312
2313static int
2314simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
2315{
2316  const struct simplify_plus_minus_op_data *d1 = p1;
2317  const struct simplify_plus_minus_op_data *d2 = p2;
2318
2319  return (commutative_operand_precedence (d2->op)
2320	  - commutative_operand_precedence (d1->op));
2321}
2322
2323static rtx
2324simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
2325		     rtx op1, int force)
2326{
2327  struct simplify_plus_minus_op_data ops[8];
2328  rtx result, tem;
2329  int n_ops = 2, input_ops = 2, input_consts = 0, n_consts;
2330  int first, changed;
2331  int i, j;
2332
2333  memset (ops, 0, sizeof ops);
2334
2335  /* Set up the two operands and then expand them until nothing has been
2336     changed.  If we run out of room in our array, give up; this should
2337     almost never happen.  */
2338
2339  ops[0].op = op0;
2340  ops[0].neg = 0;
2341  ops[1].op = op1;
2342  ops[1].neg = (code == MINUS);
2343
2344  do
2345    {
2346      changed = 0;
2347
2348      for (i = 0; i < n_ops; i++)
2349	{
2350	  rtx this_op = ops[i].op;
2351	  int this_neg = ops[i].neg;
2352	  enum rtx_code this_code = GET_CODE (this_op);
2353
2354	  switch (this_code)
2355	    {
2356	    case PLUS:
2357	    case MINUS:
2358	      if (n_ops == 7)
2359		return NULL_RTX;
2360
2361	      ops[n_ops].op = XEXP (this_op, 1);
2362	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
2363	      n_ops++;
2364
2365	      ops[i].op = XEXP (this_op, 0);
2366	      input_ops++;
2367	      changed = 1;
2368	      break;
2369
2370	    case NEG:
2371	      ops[i].op = XEXP (this_op, 0);
2372	      ops[i].neg = ! this_neg;
2373	      changed = 1;
2374	      break;
2375
2376	    case CONST:
2377	      if (n_ops < 7
2378		  && GET_CODE (XEXP (this_op, 0)) == PLUS
2379		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
2380		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
2381		{
2382		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
2383		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
2384		  ops[n_ops].neg = this_neg;
2385		  n_ops++;
2386		  input_consts++;
2387		  changed = 1;
2388		}
2389	      break;
2390
2391	    case NOT:
2392	      /* ~a -> (-a - 1) */
2393	      if (n_ops != 7)
2394		{
2395		  ops[n_ops].op = constm1_rtx;
2396		  ops[n_ops++].neg = this_neg;
2397		  ops[i].op = XEXP (this_op, 0);
2398		  ops[i].neg = !this_neg;
2399		  changed = 1;
2400		}
2401	      break;
2402
2403	    case CONST_INT:
2404	      if (this_neg)
2405		{
2406		  ops[i].op = neg_const_int (mode, this_op);
2407		  ops[i].neg = 0;
2408		  changed = 1;
2409		}
2410	      break;
2411
2412	    default:
2413	      break;
2414	    }
2415	}
2416    }
2417  while (changed);
2418
2419  /* If we only have two operands, we can't do anything.  */
2420  if (n_ops <= 2 && !force)
2421    return NULL_RTX;
2422
2423  /* Count the number of CONSTs we didn't split above.  */
2424  for (i = 0; i < n_ops; i++)
2425    if (GET_CODE (ops[i].op) == CONST)
2426      input_consts++;
2427
2428  /* Now simplify each pair of operands until nothing changes.  The first
2429     time through just simplify constants against each other.  */
2430
2431  first = 1;
2432  do
2433    {
2434      changed = first;
2435
2436      for (i = 0; i < n_ops - 1; i++)
2437	for (j = i + 1; j < n_ops; j++)
2438	  {
2439	    rtx lhs = ops[i].op, rhs = ops[j].op;
2440	    int lneg = ops[i].neg, rneg = ops[j].neg;
2441
2442	    if (lhs != 0 && rhs != 0
2443		&& (! first || (CONSTANT_P (lhs) && CONSTANT_P (rhs))))
2444	      {
2445		enum rtx_code ncode = PLUS;
2446
2447		if (lneg != rneg)
2448		  {
2449		    ncode = MINUS;
2450		    if (lneg)
2451		      tem = lhs, lhs = rhs, rhs = tem;
2452		  }
2453		else if (swap_commutative_operands_p (lhs, rhs))
2454		  tem = lhs, lhs = rhs, rhs = tem;
2455
2456		tem = simplify_binary_operation (ncode, mode, lhs, rhs);
2457
2458		/* Reject "simplifications" that just wrap the two
2459		   arguments in a CONST.  Failure to do so can result
2460		   in infinite recursion with simplify_binary_operation
2461		   when it calls us to simplify CONST operations.  */
2462		if (tem
2463		    && ! (GET_CODE (tem) == CONST
2464			  && GET_CODE (XEXP (tem, 0)) == ncode
2465			  && XEXP (XEXP (tem, 0), 0) == lhs
2466			  && XEXP (XEXP (tem, 0), 1) == rhs)
2467		    /* Don't allow -x + -1 -> ~x simplifications in the
2468		       first pass.  This allows us the chance to combine
2469		       the -1 with other constants.  */
2470		    && ! (first
2471			  && GET_CODE (tem) == NOT
2472			  && XEXP (tem, 0) == rhs))
2473		  {
2474		    lneg &= rneg;
2475		    if (GET_CODE (tem) == NEG)
2476		      tem = XEXP (tem, 0), lneg = !lneg;
2477		    if (GET_CODE (tem) == CONST_INT && lneg)
2478		      tem = neg_const_int (mode, tem), lneg = 0;
2479
2480		    ops[i].op = tem;
2481		    ops[i].neg = lneg;
2482		    ops[j].op = NULL_RTX;
2483		    changed = 1;
2484		  }
2485	      }
2486	  }
2487
2488      first = 0;
2489    }
2490  while (changed);
2491
2492  /* Pack all the operands to the lower-numbered entries.  */
2493  for (i = 0, j = 0; j < n_ops; j++)
2494    if (ops[j].op)
2495      ops[i++] = ops[j];
2496  n_ops = i;
2497
2498  /* Sort the operations based on swap_commutative_operands_p.  */
2499  qsort (ops, n_ops, sizeof (*ops), simplify_plus_minus_op_data_cmp);
2500
2501  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
2502  if (n_ops == 2
2503      && GET_CODE (ops[1].op) == CONST_INT
2504      && CONSTANT_P (ops[0].op)
2505      && ops[0].neg)
2506    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
2507
2508  /* We suppressed creation of trivial CONST expressions in the
2509     combination loop to avoid recursion.  Create one manually now.
2510     The combination loop should have ensured that there is exactly
2511     one CONST_INT, and the sort will have ensured that it is last
2512     in the array and that any other constant will be next-to-last.  */
2513
2514  if (n_ops > 1
2515      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
2516      && CONSTANT_P (ops[n_ops - 2].op))
2517    {
2518      rtx value = ops[n_ops - 1].op;
2519      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
2520	value = neg_const_int (mode, value);
2521      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
2522      n_ops--;
2523    }
2524
2525  /* Count the number of CONSTs that we generated.  */
2526  n_consts = 0;
2527  for (i = 0; i < n_ops; i++)
2528    if (GET_CODE (ops[i].op) == CONST)
2529      n_consts++;
2530
2531  /* Give up if we didn't reduce the number of operands we had.  Make
2532     sure we count a CONST as two operands.  If we have the same
2533     number of operands, but have made more CONSTs than before, this
2534     is also an improvement, so accept it.  */
2535  if (!force
2536      && (n_ops + n_consts > input_ops
2537	  || (n_ops + n_consts == input_ops && n_consts <= input_consts)))
2538    return NULL_RTX;
2539
2540  /* Put a non-negated operand first, if possible.  */
2541
2542  for (i = 0; i < n_ops && ops[i].neg; i++)
2543    continue;
2544  if (i == n_ops)
2545    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
2546  else if (i != 0)
2547    {
2548      tem = ops[0].op;
2549      ops[0] = ops[i];
2550      ops[i].op = tem;
2551      ops[i].neg = 1;
2552    }
2553
2554  /* Now make the result by performing the requested operations.  */
2555  result = ops[0].op;
2556  for (i = 1; i < n_ops; i++)
2557    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
2558			     mode, result, ops[i].op);
2559
2560  return result;
2561}
2562
2563/* Like simplify_binary_operation except used for relational operators.
2564   MODE is the mode of the operands, not that of the result.  If MODE
2565   is VOIDmode, both operands must also be VOIDmode and we compare the
2566   operands in "infinite precision".
2567
2568   If no simplification is possible, this function returns zero.  Otherwise,
2569   it returns either const_true_rtx or const0_rtx.  */
2570
2571rtx
2572simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
2573			       rtx op0, rtx op1)
2574{
2575  int equal, op0lt, op0ltu, op1lt, op1ltu;
2576  rtx tem;
2577  rtx trueop0;
2578  rtx trueop1;
2579
2580  if (mode == VOIDmode
2581      && (GET_MODE (op0) != VOIDmode
2582	  || GET_MODE (op1) != VOIDmode))
2583    abort ();
2584
2585  /* If op0 is a compare, extract the comparison arguments from it.  */
2586  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
2587    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
2588
2589  trueop0 = avoid_constant_pool_reference (op0);
2590  trueop1 = avoid_constant_pool_reference (op1);
2591
2592  /* We can't simplify MODE_CC values since we don't know what the
2593     actual comparison is.  */
2594  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
2595    return 0;
2596
2597  /* Make sure the constant is second.  */
2598  if (swap_commutative_operands_p (trueop0, trueop1))
2599    {
2600      tem = op0, op0 = op1, op1 = tem;
2601      tem = trueop0, trueop0 = trueop1, trueop1 = tem;
2602      code = swap_condition (code);
2603    }
2604
2605  /* For integer comparisons of A and B maybe we can simplify A - B and can
2606     then simplify a comparison of that with zero.  If A and B are both either
2607     a register or a CONST_INT, this can't help; testing for these cases will
2608     prevent infinite recursion here and speed things up.
2609
2610     If CODE is an unsigned comparison, then we can never do this optimization,
2611     because it gives an incorrect result if the subtraction wraps around zero.
2612     ANSI C defines unsigned operations such that they never overflow, and
2613     thus such cases can not be ignored.  */
2614
2615  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
2616      && ! ((GET_CODE (op0) == REG || GET_CODE (trueop0) == CONST_INT)
2617	    && (GET_CODE (op1) == REG || GET_CODE (trueop1) == CONST_INT))
2618      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
2619      /* We cannot do this for == or != if tem is a nonzero address.  */
2620      && ((code != EQ && code != NE) || ! nonzero_address_p (tem))
2621      && code != GTU && code != GEU && code != LTU && code != LEU)
2622    return simplify_relational_operation (signed_condition (code),
2623					  mode, tem, const0_rtx);
2624
2625  if (flag_unsafe_math_optimizations && code == ORDERED)
2626    return const_true_rtx;
2627
2628  if (flag_unsafe_math_optimizations && code == UNORDERED)
2629    return const0_rtx;
2630
2631  /* For modes without NaNs, if the two operands are equal, we know the
2632     result except if they have side-effects.  */
2633  if (! HONOR_NANS (GET_MODE (trueop0))
2634      && rtx_equal_p (trueop0, trueop1)
2635      && ! side_effects_p (trueop0))
2636    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
2637
2638  /* If the operands are floating-point constants, see if we can fold
2639     the result.  */
2640  else if (GET_CODE (trueop0) == CONST_DOUBLE
2641	   && GET_CODE (trueop1) == CONST_DOUBLE
2642	   && GET_MODE_CLASS (GET_MODE (trueop0)) == MODE_FLOAT)
2643    {
2644      REAL_VALUE_TYPE d0, d1;
2645
2646      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
2647      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
2648
2649      /* Comparisons are unordered iff at least one of the values is NaN.  */
2650      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
2651	switch (code)
2652	  {
2653	  case UNEQ:
2654	  case UNLT:
2655	  case UNGT:
2656	  case UNLE:
2657	  case UNGE:
2658	  case NE:
2659	  case UNORDERED:
2660	    return const_true_rtx;
2661	  case EQ:
2662	  case LT:
2663	  case GT:
2664	  case LE:
2665	  case GE:
2666	  case LTGT:
2667	  case ORDERED:
2668	    return const0_rtx;
2669	  default:
2670	    return 0;
2671	  }
2672
2673      equal = REAL_VALUES_EQUAL (d0, d1);
2674      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
2675      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
2676    }
2677
2678  /* Otherwise, see if the operands are both integers.  */
2679  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
2680	   && (GET_CODE (trueop0) == CONST_DOUBLE
2681	       || GET_CODE (trueop0) == CONST_INT)
2682	   && (GET_CODE (trueop1) == CONST_DOUBLE
2683	       || GET_CODE (trueop1) == CONST_INT))
2684    {
2685      int width = GET_MODE_BITSIZE (mode);
2686      HOST_WIDE_INT l0s, h0s, l1s, h1s;
2687      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
2688
2689      /* Get the two words comprising each integer constant.  */
2690      if (GET_CODE (trueop0) == CONST_DOUBLE)
2691	{
2692	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
2693	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
2694	}
2695      else
2696	{
2697	  l0u = l0s = INTVAL (trueop0);
2698	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
2699	}
2700
2701      if (GET_CODE (trueop1) == CONST_DOUBLE)
2702	{
2703	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
2704	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
2705	}
2706      else
2707	{
2708	  l1u = l1s = INTVAL (trueop1);
2709	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
2710	}
2711
2712      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2713	 we have to sign or zero-extend the values.  */
2714      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
2715	{
2716	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
2717	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
2718
2719	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
2720	    l0s |= ((HOST_WIDE_INT) (-1) << width);
2721
2722	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
2723	    l1s |= ((HOST_WIDE_INT) (-1) << width);
2724	}
2725      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
2726	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
2727
2728      equal = (h0u == h1u && l0u == l1u);
2729      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
2730      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
2731      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
2732      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
2733    }
2734
2735  /* Otherwise, there are some code-specific tests we can make.  */
2736  else
2737    {
2738      switch (code)
2739	{
2740	case EQ:
2741	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
2742	    return const0_rtx;
2743	  break;
2744
2745	case NE:
2746	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
2747	    return const_true_rtx;
2748	  break;
2749
2750	case GEU:
2751	  /* Unsigned values are never negative.  */
2752	  if (trueop1 == const0_rtx)
2753	    return const_true_rtx;
2754	  break;
2755
2756	case LTU:
2757	  if (trueop1 == const0_rtx)
2758	    return const0_rtx;
2759	  break;
2760
2761	case LEU:
2762	  /* Unsigned values are never greater than the largest
2763	     unsigned value.  */
2764	  if (GET_CODE (trueop1) == CONST_INT
2765	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2766	    && INTEGRAL_MODE_P (mode))
2767	  return const_true_rtx;
2768	  break;
2769
2770	case GTU:
2771	  if (GET_CODE (trueop1) == CONST_INT
2772	      && (unsigned HOST_WIDE_INT) INTVAL (trueop1) == GET_MODE_MASK (mode)
2773	      && INTEGRAL_MODE_P (mode))
2774	    return const0_rtx;
2775	  break;
2776
2777	case LT:
2778	  /* Optimize abs(x) < 0.0.  */
2779	  if (trueop1 == CONST0_RTX (mode) && !HONOR_SNANS (mode))
2780	    {
2781	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2782						       : trueop0;
2783	      if (GET_CODE (tem) == ABS)
2784		return const0_rtx;
2785	    }
2786	  break;
2787
2788	case GE:
2789	  /* Optimize abs(x) >= 0.0.  */
2790	  if (trueop1 == CONST0_RTX (mode) && !HONOR_NANS (mode))
2791	    {
2792	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2793						       : trueop0;
2794	      if (GET_CODE (tem) == ABS)
2795		return const_true_rtx;
2796	    }
2797	  break;
2798
2799	case UNGE:
2800	  /* Optimize ! (abs(x) < 0.0).  */
2801	  if (trueop1 == CONST0_RTX (mode))
2802	    {
2803	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
2804						       : trueop0;
2805	      if (GET_CODE (tem) == ABS)
2806		return const_true_rtx;
2807	    }
2808	  break;
2809
2810	default:
2811	  break;
2812	}
2813
2814      return 0;
2815    }
2816
2817  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2818     as appropriate.  */
2819  switch (code)
2820    {
2821    case EQ:
2822    case UNEQ:
2823      return equal ? const_true_rtx : const0_rtx;
2824    case NE:
2825    case LTGT:
2826      return ! equal ? const_true_rtx : const0_rtx;
2827    case LT:
2828    case UNLT:
2829      return op0lt ? const_true_rtx : const0_rtx;
2830    case GT:
2831    case UNGT:
2832      return op1lt ? const_true_rtx : const0_rtx;
2833    case LTU:
2834      return op0ltu ? const_true_rtx : const0_rtx;
2835    case GTU:
2836      return op1ltu ? const_true_rtx : const0_rtx;
2837    case LE:
2838    case UNLE:
2839      return equal || op0lt ? const_true_rtx : const0_rtx;
2840    case GE:
2841    case UNGE:
2842      return equal || op1lt ? const_true_rtx : const0_rtx;
2843    case LEU:
2844      return equal || op0ltu ? const_true_rtx : const0_rtx;
2845    case GEU:
2846      return equal || op1ltu ? const_true_rtx : const0_rtx;
2847    case ORDERED:
2848      return const_true_rtx;
2849    case UNORDERED:
2850      return const0_rtx;
2851    default:
2852      abort ();
2853    }
2854}
2855
2856/* Simplify CODE, an operation with result mode MODE and three operands,
2857   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
2858   a constant.  Return 0 if no simplifications is possible.  */
2859
2860rtx
2861simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
2862			    enum machine_mode op0_mode, rtx op0, rtx op1,
2863			    rtx op2)
2864{
2865  unsigned int width = GET_MODE_BITSIZE (mode);
2866
2867  /* VOIDmode means "infinite" precision.  */
2868  if (width == 0)
2869    width = HOST_BITS_PER_WIDE_INT;
2870
2871  switch (code)
2872    {
2873    case SIGN_EXTRACT:
2874    case ZERO_EXTRACT:
2875      if (GET_CODE (op0) == CONST_INT
2876	  && GET_CODE (op1) == CONST_INT
2877	  && GET_CODE (op2) == CONST_INT
2878	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
2879	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
2880	{
2881	  /* Extracting a bit-field from a constant */
2882	  HOST_WIDE_INT val = INTVAL (op0);
2883
2884	  if (BITS_BIG_ENDIAN)
2885	    val >>= (GET_MODE_BITSIZE (op0_mode)
2886		     - INTVAL (op2) - INTVAL (op1));
2887	  else
2888	    val >>= INTVAL (op2);
2889
2890	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
2891	    {
2892	      /* First zero-extend.  */
2893	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
2894	      /* If desired, propagate sign bit.  */
2895	      if (code == SIGN_EXTRACT
2896		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
2897		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
2898	    }
2899
2900	  /* Clear the bits that don't belong in our mode,
2901	     unless they and our sign bit are all one.
2902	     So we get either a reasonable negative value or a reasonable
2903	     unsigned value for this mode.  */
2904	  if (width < HOST_BITS_PER_WIDE_INT
2905	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
2906		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
2907	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
2908
2909	  return GEN_INT (val);
2910	}
2911      break;
2912
2913    case IF_THEN_ELSE:
2914      if (GET_CODE (op0) == CONST_INT)
2915	return op0 != const0_rtx ? op1 : op2;
2916
2917      /* Convert c ? a : a into "a".  */
2918      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
2919	return op1;
2920
2921      /* Convert a != b ? a : b into "a".  */
2922      if (GET_CODE (op0) == NE
2923	  && ! side_effects_p (op0)
2924	  && ! HONOR_NANS (mode)
2925	  && ! HONOR_SIGNED_ZEROS (mode)
2926	  && ((rtx_equal_p (XEXP (op0, 0), op1)
2927	       && rtx_equal_p (XEXP (op0, 1), op2))
2928	      || (rtx_equal_p (XEXP (op0, 0), op2)
2929		  && rtx_equal_p (XEXP (op0, 1), op1))))
2930	return op1;
2931
2932      /* Convert a == b ? a : b into "b".  */
2933      if (GET_CODE (op0) == EQ
2934	  && ! side_effects_p (op0)
2935	  && ! HONOR_NANS (mode)
2936	  && ! HONOR_SIGNED_ZEROS (mode)
2937	  && ((rtx_equal_p (XEXP (op0, 0), op1)
2938	       && rtx_equal_p (XEXP (op0, 1), op2))
2939	      || (rtx_equal_p (XEXP (op0, 0), op2)
2940		  && rtx_equal_p (XEXP (op0, 1), op1))))
2941	return op2;
2942
2943      if (GET_RTX_CLASS (GET_CODE (op0)) == '<' && ! side_effects_p (op0))
2944	{
2945	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
2946					? GET_MODE (XEXP (op0, 1))
2947					: GET_MODE (XEXP (op0, 0)));
2948	  rtx temp;
2949	  if (cmp_mode == VOIDmode)
2950	    cmp_mode = op0_mode;
2951	  temp = simplify_relational_operation (GET_CODE (op0), cmp_mode,
2952					        XEXP (op0, 0), XEXP (op0, 1));
2953
2954	  /* See if any simplifications were possible.  */
2955	  if (temp == const0_rtx)
2956	    return op2;
2957	  else if (temp == const_true_rtx)
2958	    return op1;
2959	  else if (temp)
2960	    abort ();
2961
2962	  /* Look for happy constants in op1 and op2.  */
2963	  if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
2964	    {
2965	      HOST_WIDE_INT t = INTVAL (op1);
2966	      HOST_WIDE_INT f = INTVAL (op2);
2967
2968	      if (t == STORE_FLAG_VALUE && f == 0)
2969	        code = GET_CODE (op0);
2970	      else if (t == 0 && f == STORE_FLAG_VALUE)
2971		{
2972		  enum rtx_code tmp;
2973		  tmp = reversed_comparison_code (op0, NULL_RTX);
2974		  if (tmp == UNKNOWN)
2975		    break;
2976		  code = tmp;
2977		}
2978	      else
2979		break;
2980
2981	      return gen_rtx_fmt_ee (code, mode, XEXP (op0, 0), XEXP (op0, 1));
2982	    }
2983	}
2984      break;
2985
2986    case VEC_MERGE:
2987      if (GET_MODE (op0) != mode
2988	  || GET_MODE (op1) != mode
2989	  || !VECTOR_MODE_P (mode))
2990	abort ();
2991      op2 = avoid_constant_pool_reference (op2);
2992      if (GET_CODE (op2) == CONST_INT)
2993	{
2994          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2995	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2996	  int mask = (1 << n_elts) - 1;
2997
2998	  if (!(INTVAL (op2) & mask))
2999	    return op1;
3000	  if ((INTVAL (op2) & mask) == mask)
3001	    return op0;
3002
3003	  op0 = avoid_constant_pool_reference (op0);
3004	  op1 = avoid_constant_pool_reference (op1);
3005	  if (GET_CODE (op0) == CONST_VECTOR
3006	      && GET_CODE (op1) == CONST_VECTOR)
3007	    {
3008	      rtvec v = rtvec_alloc (n_elts);
3009	      unsigned int i;
3010
3011	      for (i = 0; i < n_elts; i++)
3012		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
3013				    ? CONST_VECTOR_ELT (op0, i)
3014				    : CONST_VECTOR_ELT (op1, i));
3015	      return gen_rtx_CONST_VECTOR (mode, v);
3016	    }
3017	}
3018      break;
3019
3020    default:
3021      abort ();
3022    }
3023
3024  return 0;
3025}
3026
3027/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3028   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3029
3030   Works by unpacking OP into a collection of 8-bit values
3031   represented as a little-endian array of 'unsigned char', selecting by BYTE,
3032   and then repacking them again for OUTERMODE.  */
3033
3034static rtx
3035simplify_immed_subreg (enum machine_mode outermode, rtx op,
3036		       enum machine_mode innermode, unsigned int byte)
3037{
3038  /* We support up to 512-bit values (for V8DFmode).  */
3039  enum {
3040    max_bitsize = 512,
3041    value_bit = 8,
3042    value_mask = (1 << value_bit) - 1
3043  };
3044  unsigned char value[max_bitsize / value_bit];
3045  int value_start;
3046  int i;
3047  int elem;
3048
3049  int num_elem;
3050  rtx * elems;
3051  int elem_bitsize;
3052  rtx result_s;
3053  rtvec result_v = NULL;
3054  enum mode_class outer_class;
3055  enum machine_mode outer_submode;
3056
3057  /* Some ports misuse CCmode.  */
3058  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
3059    return op;
3060
3061  /* Unpack the value.  */
3062
3063  if (GET_CODE (op) == CONST_VECTOR)
3064    {
3065      num_elem = CONST_VECTOR_NUNITS (op);
3066      elems = &CONST_VECTOR_ELT (op, 0);
3067      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
3068    }
3069  else
3070    {
3071      num_elem = 1;
3072      elems = &op;
3073      elem_bitsize = max_bitsize;
3074    }
3075
3076  if (BITS_PER_UNIT % value_bit != 0)
3077    abort ();  /* Too complicated; reducing value_bit may help.  */
3078  if (elem_bitsize % BITS_PER_UNIT != 0)
3079    abort ();  /* I don't know how to handle endianness of sub-units.  */
3080
3081  for (elem = 0; elem < num_elem; elem++)
3082    {
3083      unsigned char * vp;
3084      rtx el = elems[elem];
3085
3086      /* Vectors are kept in target memory order.  (This is probably
3087	 a mistake.)  */
3088      {
3089	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3090	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3091			  / BITS_PER_UNIT);
3092	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3093	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3094	unsigned bytele = (subword_byte % UNITS_PER_WORD
3095			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3096	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
3097      }
3098
3099      switch (GET_CODE (el))
3100	{
3101	case CONST_INT:
3102	  for (i = 0;
3103	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3104	       i += value_bit)
3105	    *vp++ = INTVAL (el) >> i;
3106	  /* CONST_INTs are always logically sign-extended.  */
3107	  for (; i < elem_bitsize; i += value_bit)
3108	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
3109	  break;
3110
3111	case CONST_DOUBLE:
3112	  if (GET_MODE (el) == VOIDmode)
3113	    {
3114	      /* If this triggers, someone should have generated a
3115		 CONST_INT instead.  */
3116	      if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3117		abort ();
3118
3119	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
3120		*vp++ = CONST_DOUBLE_LOW (el) >> i;
3121	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
3122		{
3123		  *vp++
3124		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
3125		  i += value_bit;
3126		}
3127	      /* It shouldn't matter what's done here, so fill it with
3128		 zero.  */
3129	      for (; i < max_bitsize; i += value_bit)
3130		*vp++ = 0;
3131	    }
3132	  else if (GET_MODE_CLASS (GET_MODE (el)) == MODE_FLOAT)
3133	    {
3134	      long tmp[max_bitsize / 32];
3135	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
3136
3137	      if (bitsize > elem_bitsize)
3138		abort ();
3139	      if (bitsize % value_bit != 0)
3140		abort ();
3141
3142	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
3143			      GET_MODE (el));
3144
3145	      /* real_to_target produces its result in words affected by
3146		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3147		 and use WORDS_BIG_ENDIAN instead; see the documentation
3148	         of SUBREG in rtl.texi.  */
3149	      for (i = 0; i < bitsize; i += value_bit)
3150		{
3151		  int ibase;
3152		  if (WORDS_BIG_ENDIAN)
3153		    ibase = bitsize - 1 - i;
3154		  else
3155		    ibase = i;
3156		  *vp++ = tmp[ibase / 32] >> i % 32;
3157		}
3158
3159	      /* It shouldn't matter what's done here, so fill it with
3160		 zero.  */
3161	      for (; i < elem_bitsize; i += value_bit)
3162		*vp++ = 0;
3163	    }
3164	  else
3165	    abort ();
3166	  break;
3167
3168	default:
3169	  abort ();
3170	}
3171    }
3172
3173  /* Now, pick the right byte to start with.  */
3174  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
3175     case is paradoxical SUBREGs, which shouldn't be adjusted since they
3176     will already have offset 0.  */
3177  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
3178    {
3179      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
3180			- byte);
3181      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3182      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3183      byte = (subword_byte % UNITS_PER_WORD
3184	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3185    }
3186
3187  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
3188     so if it's become negative it will instead be very large.)  */
3189  if (byte >= GET_MODE_SIZE (innermode))
3190    abort ();
3191
3192  /* Convert from bytes to chunks of size value_bit.  */
3193  value_start = byte * (BITS_PER_UNIT / value_bit);
3194
3195  /* Re-pack the value.  */
3196
3197  if (VECTOR_MODE_P (outermode))
3198    {
3199      num_elem = GET_MODE_NUNITS (outermode);
3200      result_v = rtvec_alloc (num_elem);
3201      elems = &RTVEC_ELT (result_v, 0);
3202      outer_submode = GET_MODE_INNER (outermode);
3203    }
3204  else
3205    {
3206      num_elem = 1;
3207      elems = &result_s;
3208      outer_submode = outermode;
3209    }
3210
3211  outer_class = GET_MODE_CLASS (outer_submode);
3212  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
3213
3214  if (elem_bitsize % value_bit != 0)
3215    abort ();
3216  if (elem_bitsize + value_start * value_bit > max_bitsize)
3217    abort ();
3218
3219  for (elem = 0; elem < num_elem; elem++)
3220    {
3221      unsigned char *vp;
3222
3223      /* Vectors are stored in target memory order.  (This is probably
3224	 a mistake.)  */
3225      {
3226	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
3227	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
3228			  / BITS_PER_UNIT);
3229	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
3230	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
3231	unsigned bytele = (subword_byte % UNITS_PER_WORD
3232			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
3233	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
3234      }
3235
3236      switch (outer_class)
3237	{
3238	case MODE_INT:
3239	case MODE_PARTIAL_INT:
3240	  {
3241	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
3242
3243	    for (i = 0;
3244		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
3245		 i += value_bit)
3246	      lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
3247	    for (; i < elem_bitsize; i += value_bit)
3248	      hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
3249		     << (i - HOST_BITS_PER_WIDE_INT));
3250
3251	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
3252	       know why.  */
3253	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
3254	      elems[elem] = gen_int_mode (lo, outer_submode);
3255	    else
3256	      elems[elem] = immed_double_const (lo, hi, outer_submode);
3257	  }
3258	  break;
3259
3260	case MODE_FLOAT:
3261	  {
3262	    REAL_VALUE_TYPE r;
3263	    long tmp[max_bitsize / 32];
3264
3265	    /* real_from_target wants its input in words affected by
3266	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
3267	       and use WORDS_BIG_ENDIAN instead; see the documentation
3268	       of SUBREG in rtl.texi.  */
3269	    for (i = 0; i < max_bitsize / 32; i++)
3270	      tmp[i] = 0;
3271	    for (i = 0; i < elem_bitsize; i += value_bit)
3272	      {
3273		int ibase;
3274		if (WORDS_BIG_ENDIAN)
3275		  ibase = elem_bitsize - 1 - i;
3276		else
3277		  ibase = i;
3278		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
3279	      }
3280
3281	    real_from_target (&r, tmp, outer_submode);
3282	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
3283	  }
3284	  break;
3285
3286	default:
3287	  abort ();
3288	}
3289    }
3290  if (VECTOR_MODE_P (outermode))
3291    return gen_rtx_CONST_VECTOR (outermode, result_v);
3292  else
3293    return result_s;
3294}
3295
3296/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3297   Return 0 if no simplifications are possible.  */
3298rtx
3299simplify_subreg (enum machine_mode outermode, rtx op,
3300		 enum machine_mode innermode, unsigned int byte)
3301{
3302  /* Little bit of sanity checking.  */
3303  if (innermode == VOIDmode || outermode == VOIDmode
3304      || innermode == BLKmode || outermode == BLKmode)
3305    abort ();
3306
3307  if (GET_MODE (op) != innermode
3308      && GET_MODE (op) != VOIDmode)
3309    abort ();
3310
3311  if (byte % GET_MODE_SIZE (outermode)
3312      || byte >= GET_MODE_SIZE (innermode))
3313    abort ();
3314
3315  if (outermode == innermode && !byte)
3316    return op;
3317
3318  if (GET_CODE (op) == CONST_INT
3319      || GET_CODE (op) == CONST_DOUBLE
3320      || GET_CODE (op) == CONST_VECTOR)
3321    return simplify_immed_subreg (outermode, op, innermode, byte);
3322
3323  /* Changing mode twice with SUBREG => just change it once,
3324     or not at all if changing back op starting mode.  */
3325  if (GET_CODE (op) == SUBREG)
3326    {
3327      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3328      int final_offset = byte + SUBREG_BYTE (op);
3329      rtx new;
3330
3331      if (outermode == innermostmode
3332	  && byte == 0 && SUBREG_BYTE (op) == 0)
3333	return SUBREG_REG (op);
3334
3335      /* The SUBREG_BYTE represents offset, as if the value were stored
3336	 in memory.  Irritating exception is paradoxical subreg, where
3337	 we define SUBREG_BYTE to be 0.  On big endian machines, this
3338	 value should be negative.  For a moment, undo this exception.  */
3339      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
3340	{
3341	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
3342	  if (WORDS_BIG_ENDIAN)
3343	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3344	  if (BYTES_BIG_ENDIAN)
3345	    final_offset += difference % UNITS_PER_WORD;
3346	}
3347      if (SUBREG_BYTE (op) == 0
3348	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
3349	{
3350	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
3351	  if (WORDS_BIG_ENDIAN)
3352	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3353	  if (BYTES_BIG_ENDIAN)
3354	    final_offset += difference % UNITS_PER_WORD;
3355	}
3356
3357      /* See whether resulting subreg will be paradoxical.  */
3358      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
3359	{
3360	  /* In nonparadoxical subregs we can't handle negative offsets.  */
3361	  if (final_offset < 0)
3362	    return NULL_RTX;
3363	  /* Bail out in case resulting subreg would be incorrect.  */
3364	  if (final_offset % GET_MODE_SIZE (outermode)
3365	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
3366	    return NULL_RTX;
3367	}
3368      else
3369	{
3370	  int offset = 0;
3371	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
3372
3373	  /* In paradoxical subreg, see if we are still looking on lower part.
3374	     If so, our SUBREG_BYTE will be 0.  */
3375	  if (WORDS_BIG_ENDIAN)
3376	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
3377	  if (BYTES_BIG_ENDIAN)
3378	    offset += difference % UNITS_PER_WORD;
3379	  if (offset == final_offset)
3380	    final_offset = 0;
3381	  else
3382	    return NULL_RTX;
3383	}
3384
3385      /* Recurse for further possible simplifications.  */
3386      new = simplify_subreg (outermode, SUBREG_REG (op),
3387			     GET_MODE (SUBREG_REG (op)),
3388			     final_offset);
3389      if (new)
3390	return new;
3391      return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
3392    }
3393
3394  /* SUBREG of a hard register => just change the register number
3395     and/or mode.  If the hard register is not valid in that mode,
3396     suppress this simplification.  If the hard register is the stack,
3397     frame, or argument pointer, leave this as a SUBREG.  */
3398
3399  if (REG_P (op)
3400      && (! REG_FUNCTION_VALUE_P (op)
3401	  || ! rtx_equal_function_value_matters)
3402      && REGNO (op) < FIRST_PSEUDO_REGISTER
3403#ifdef CANNOT_CHANGE_MODE_CLASS
3404      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
3405	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
3406	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
3407#endif
3408      && ((reload_completed && !frame_pointer_needed)
3409	  || (REGNO (op) != FRAME_POINTER_REGNUM
3410#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3411	      && REGNO (op) != HARD_FRAME_POINTER_REGNUM
3412#endif
3413	     ))
3414#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3415      && REGNO (op) != ARG_POINTER_REGNUM
3416#endif
3417      && REGNO (op) != STACK_POINTER_REGNUM
3418      && subreg_offset_representable_p (REGNO (op), innermode,
3419					byte, outermode))
3420    {
3421      rtx tem = gen_rtx_SUBREG (outermode, op, byte);
3422      int final_regno = subreg_hard_regno (tem, 0);
3423
3424      /* ??? We do allow it if the current REG is not valid for
3425	 its mode.  This is a kludge to work around how float/complex
3426	 arguments are passed on 32-bit SPARC and should be fixed.  */
3427      if (HARD_REGNO_MODE_OK (final_regno, outermode)
3428	  || ! HARD_REGNO_MODE_OK (REGNO (op), innermode))
3429	{
3430	  rtx x = gen_rtx_REG_offset (op, outermode, final_regno, byte);
3431
3432	  /* Propagate original regno.  We don't have any way to specify
3433	     the offset inside original regno, so do so only for lowpart.
3434	     The information is used only by alias analysis that can not
3435	     grog partial register anyway.  */
3436
3437	  if (subreg_lowpart_offset (outermode, innermode) == byte)
3438	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
3439	  return x;
3440	}
3441    }
3442
3443  /* If we have a SUBREG of a register that we are replacing and we are
3444     replacing it with a MEM, make a new MEM and try replacing the
3445     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
3446     or if we would be widening it.  */
3447
3448  if (GET_CODE (op) == MEM
3449      && ! mode_dependent_address_p (XEXP (op, 0))
3450      /* Allow splitting of volatile memory references in case we don't
3451         have instruction to move the whole thing.  */
3452      && (! MEM_VOLATILE_P (op)
3453	  || ! have_insn_for (SET, innermode))
3454      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
3455    return adjust_address_nv (op, outermode, byte);
3456
3457  /* Handle complex values represented as CONCAT
3458     of real and imaginary part.  */
3459  if (GET_CODE (op) == CONCAT)
3460    {
3461      int is_realpart = byte < (unsigned int) GET_MODE_UNIT_SIZE (innermode);
3462      rtx part = is_realpart ? XEXP (op, 0) : XEXP (op, 1);
3463      unsigned int final_offset;
3464      rtx res;
3465
3466      final_offset = byte % (GET_MODE_UNIT_SIZE (innermode));
3467      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
3468      if (res)
3469	return res;
3470      /* We can at least simplify it by referring directly to the relevant part.  */
3471      return gen_rtx_SUBREG (outermode, part, final_offset);
3472    }
3473
3474  return NULL_RTX;
3475}
3476
3477/* Make a SUBREG operation or equivalent if it folds.  */
3478
3479rtx
3480simplify_gen_subreg (enum machine_mode outermode, rtx op,
3481		     enum machine_mode innermode, unsigned int byte)
3482{
3483  rtx new;
3484  /* Little bit of sanity checking.  */
3485  if (innermode == VOIDmode || outermode == VOIDmode
3486      || innermode == BLKmode || outermode == BLKmode)
3487    abort ();
3488
3489  if (GET_MODE (op) != innermode
3490      && GET_MODE (op) != VOIDmode)
3491    abort ();
3492
3493  if (byte % GET_MODE_SIZE (outermode)
3494      || byte >= GET_MODE_SIZE (innermode))
3495    abort ();
3496
3497  if (GET_CODE (op) == QUEUED)
3498    return NULL_RTX;
3499
3500  new = simplify_subreg (outermode, op, innermode, byte);
3501  if (new)
3502    return new;
3503
3504  if (GET_CODE (op) == SUBREG || GET_MODE (op) == VOIDmode)
3505    return NULL_RTX;
3506
3507  return gen_rtx_SUBREG (outermode, op, byte);
3508}
3509/* Simplify X, an rtx expression.
3510
3511   Return the simplified expression or NULL if no simplifications
3512   were possible.
3513
3514   This is the preferred entry point into the simplification routines;
3515   however, we still allow passes to call the more specific routines.
3516
3517   Right now GCC has three (yes, three) major bodies of RTL simplification
3518   code that need to be unified.
3519
3520	1. fold_rtx in cse.c.  This code uses various CSE specific
3521	   information to aid in RTL simplification.
3522
3523	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
3524	   it uses combine specific information to aid in RTL
3525	   simplification.
3526
3527	3. The routines in this file.
3528
3529
3530   Long term we want to only have one body of simplification code; to
3531   get to that state I recommend the following steps:
3532
3533	1. Pour over fold_rtx & simplify_rtx and move any simplifications
3534	   which are not pass dependent state into these routines.
3535
3536	2. As code is moved by #1, change fold_rtx & simplify_rtx to
3537	   use this routine whenever possible.
3538
3539	3. Allow for pass dependent state to be provided to these
3540	   routines and add simplifications based on the pass dependent
3541	   state.  Remove code from cse.c & combine.c that becomes
3542	   redundant/dead.
3543
3544    It will take time, but ultimately the compiler will be easier to
3545    maintain and improve.  It's totally silly that when we add a
3546    simplification that it needs to be added to 4 places (3 for RTL
3547    simplification and 1 for tree simplification.  */
3548
3549rtx
3550simplify_rtx (rtx x)
3551{
3552  enum rtx_code code = GET_CODE (x);
3553  enum machine_mode mode = GET_MODE (x);
3554  rtx temp;
3555
3556  switch (GET_RTX_CLASS (code))
3557    {
3558    case '1':
3559      return simplify_unary_operation (code, mode,
3560				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
3561    case 'c':
3562      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
3563	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
3564
3565      /* Fall through....  */
3566
3567    case '2':
3568      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
3569
3570    case '3':
3571    case 'b':
3572      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
3573					 XEXP (x, 0), XEXP (x, 1),
3574					 XEXP (x, 2));
3575
3576    case '<':
3577      if (VECTOR_MODE_P (mode))
3578	return NULL_RTX;
3579      temp = simplify_relational_operation (code,
3580					    ((GET_MODE (XEXP (x, 0))
3581					      != VOIDmode)
3582					     ? GET_MODE (XEXP (x, 0))
3583					     : GET_MODE (XEXP (x, 1))),
3584					    XEXP (x, 0), XEXP (x, 1));
3585#ifdef FLOAT_STORE_FLAG_VALUE
3586      if (temp != 0 && GET_MODE_CLASS (mode) == MODE_FLOAT)
3587	{
3588	  if (temp == const0_rtx)
3589	    temp = CONST0_RTX (mode);
3590	  else
3591	    temp = CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode),
3592						 mode);
3593	}
3594#endif
3595      return temp;
3596
3597    case 'x':
3598      if (code == SUBREG)
3599	return simplify_gen_subreg (mode, SUBREG_REG (x),
3600				    GET_MODE (SUBREG_REG (x)),
3601				    SUBREG_BYTE (x));
3602      if (code == CONSTANT_P_RTX)
3603	{
3604	  if (CONSTANT_P (XEXP (x, 0)))
3605	    return const1_rtx;
3606	}
3607      break;
3608
3609    case 'o':
3610      if (code == LO_SUM)
3611	{
3612	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
3613	  if (GET_CODE (XEXP (x, 0)) == HIGH
3614	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
3615	  return XEXP (x, 1);
3616	}
3617      break;
3618
3619    default:
3620      break;
3621    }
3622  return NULL;
3623}
3624