simplify-rtx.c revision 220150
1/* RTL simplification functions for GNU compiler.
2   Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4   Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING.  If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA.  */
22
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"
31#include "regs.h"
32#include "hard-reg-set.h"
33#include "flags.h"
34#include "real.h"
35#include "insn-config.h"
36#include "recog.h"
37#include "function.h"
38#include "expr.h"
39#include "toplev.h"
40#include "output.h"
41#include "ggc.h"
42#include "target.h"
43
44/* Simplification and canonicalization of RTL.  */
45
46/* Much code operates on (low, high) pairs; the low value is an
47   unsigned wide int, the high value a signed wide int.  We
48   occasionally need to sign extend from low to high as if low were a
49   signed wide int.  */
50#define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53static rtx neg_const_int (enum machine_mode, rtx);
54static bool plus_minus_operand_p (rtx);
55static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58				  unsigned int);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60					   rtx, rtx);
61static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62					    enum machine_mode, rtx, rtx);
63static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65					rtx, rtx, rtx, rtx);
66
67/* Negate a CONST_INT rtx, truncating (because a conversion from a
68   maximally negative number can overflow).  */
69static rtx
70neg_const_int (enum machine_mode mode, rtx i)
71{
72  return gen_int_mode (- INTVAL (i), mode);
73}
74
75/* Test whether expression, X, is an immediate constant that represents
76   the most significant bit of machine mode MODE.  */
77
78bool
79mode_signbit_p (enum machine_mode mode, rtx x)
80{
81  unsigned HOST_WIDE_INT val;
82  unsigned int width;
83
84  if (GET_MODE_CLASS (mode) != MODE_INT)
85    return false;
86
87  width = GET_MODE_BITSIZE (mode);
88  if (width == 0)
89    return false;
90
91  if (width <= HOST_BITS_PER_WIDE_INT
92      && GET_CODE (x) == CONST_INT)
93    val = INTVAL (x);
94  else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95	   && GET_CODE (x) == CONST_DOUBLE
96	   && CONST_DOUBLE_LOW (x) == 0)
97    {
98      val = CONST_DOUBLE_HIGH (x);
99      width -= HOST_BITS_PER_WIDE_INT;
100    }
101  else
102    return false;
103
104  if (width < HOST_BITS_PER_WIDE_INT)
105    val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106  return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107}
108
109/* Make a binary operation by properly ordering the operands and
110   seeing if the expression folds.  */
111
112rtx
113simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114		     rtx op1)
115{
116  rtx tem;
117
118  /* If this simplifies, do it.  */
119  tem = simplify_binary_operation (code, mode, op0, op1);
120  if (tem)
121    return tem;
122
123  /* Put complex operands first and constants second if commutative.  */
124  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125      && swap_commutative_operands_p (op0, op1))
126    tem = op0, op0 = op1, op1 = tem;
127
128  return gen_rtx_fmt_ee (code, mode, op0, op1);
129}
130
131/* If X is a MEM referencing the constant pool, return the real value.
132   Otherwise return X.  */
133rtx
134avoid_constant_pool_reference (rtx x)
135{
136  rtx c, tmp, addr;
137  enum machine_mode cmode;
138  HOST_WIDE_INT offset = 0;
139
140  switch (GET_CODE (x))
141    {
142    case MEM:
143      break;
144
145    case FLOAT_EXTEND:
146      /* Handle float extensions of constant pool references.  */
147      tmp = XEXP (x, 0);
148      c = avoid_constant_pool_reference (tmp);
149      if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150	{
151	  REAL_VALUE_TYPE d;
152
153	  REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154	  return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155	}
156      return x;
157
158    default:
159      return x;
160    }
161
162  addr = XEXP (x, 0);
163
164  /* Call target hook to avoid the effects of -fpic etc....  */
165  addr = targetm.delegitimize_address (addr);
166
167  /* Split the address into a base and integer offset.  */
168  if (GET_CODE (addr) == CONST
169      && GET_CODE (XEXP (addr, 0)) == PLUS
170      && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171    {
172      offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173      addr = XEXP (XEXP (addr, 0), 0);
174    }
175
176  if (GET_CODE (addr) == LO_SUM)
177    addr = XEXP (addr, 1);
178
179  /* If this is a constant pool reference, we can turn it into its
180     constant and hope that simplifications happen.  */
181  if (GET_CODE (addr) == SYMBOL_REF
182      && CONSTANT_POOL_ADDRESS_P (addr))
183    {
184      c = get_pool_constant (addr);
185      cmode = get_pool_mode (addr);
186
187      /* If we're accessing the constant in a different mode than it was
188         originally stored, attempt to fix that up via subreg simplifications.
189         If that fails we have no choice but to return the original memory.  */
190      if (offset != 0 || cmode != GET_MODE (x))
191        {
192          rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193          if (tem && CONSTANT_P (tem))
194            return tem;
195        }
196      else
197        return c;
198    }
199
200  return x;
201}
202
203/* Return true if X is a MEM referencing the constant pool.  */
204
205bool
206constant_pool_reference_p (rtx x)
207{
208  return avoid_constant_pool_reference (x) != x;
209}
210
211/* Make a unary operation by first seeing if it folds and otherwise making
212   the specified operation.  */
213
214rtx
215simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216		    enum machine_mode op_mode)
217{
218  rtx tem;
219
220  /* If this simplifies, use it.  */
221  if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
222    return tem;
223
224  return gen_rtx_fmt_e (code, mode, op);
225}
226
227/* Likewise for ternary operations.  */
228
229rtx
230simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231		      enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232{
233  rtx tem;
234
235  /* If this simplifies, use it.  */
236  if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
237					      op0, op1, op2)))
238    return tem;
239
240  return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
241}
242
243/* Likewise, for relational operations.
244   CMP_MODE specifies mode comparison is done in.  */
245
246rtx
247simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248			 enum machine_mode cmp_mode, rtx op0, rtx op1)
249{
250  rtx tem;
251
252  if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
253						 op0, op1)))
254    return tem;
255
256  return gen_rtx_fmt_ee (code, mode, op0, op1);
257}
258
259/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260   resulting RTX.  Return a new RTX which is as simplified as possible.  */
261
262rtx
263simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264{
265  enum rtx_code code = GET_CODE (x);
266  enum machine_mode mode = GET_MODE (x);
267  enum machine_mode op_mode;
268  rtx op0, op1, op2;
269
270  /* If X is OLD_RTX, return NEW_RTX.  Otherwise, if this is an expression, try
271     to build a new expression substituting recursively.  If we can't do
272     anything, return our input.  */
273
274  if (x == old_rtx)
275    return new_rtx;
276
277  switch (GET_RTX_CLASS (code))
278    {
279    case RTX_UNARY:
280      op0 = XEXP (x, 0);
281      op_mode = GET_MODE (op0);
282      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283      if (op0 == XEXP (x, 0))
284	return x;
285      return simplify_gen_unary (code, mode, op0, op_mode);
286
287    case RTX_BIN_ARITH:
288    case RTX_COMM_ARITH:
289      op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292	return x;
293      return simplify_gen_binary (code, mode, op0, op1);
294
295    case RTX_COMPARE:
296    case RTX_COMM_COMPARE:
297      op0 = XEXP (x, 0);
298      op1 = XEXP (x, 1);
299      op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301      op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303	return x;
304      return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306    case RTX_TERNARY:
307    case RTX_BITFIELD_OPS:
308      op0 = XEXP (x, 0);
309      op_mode = GET_MODE (op0);
310      op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311      op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312      op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313      if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314	return x;
315      if (op_mode == VOIDmode)
316	op_mode = GET_MODE (op0);
317      return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319    case RTX_EXTRA:
320      /* The only case we try to handle is a SUBREG.  */
321      if (code == SUBREG)
322	{
323	  op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324	  if (op0 == SUBREG_REG (x))
325	    return x;
326	  op0 = simplify_gen_subreg (GET_MODE (x), op0,
327				     GET_MODE (SUBREG_REG (x)),
328				     SUBREG_BYTE (x));
329	  return op0 ? op0 : x;
330	}
331      break;
332
333    case RTX_OBJ:
334      if (code == MEM)
335	{
336	  op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337	  if (op0 == XEXP (x, 0))
338	    return x;
339	  return replace_equiv_address_nv (x, op0);
340	}
341      else if (code == LO_SUM)
342	{
343	  op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344	  op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345
346	  /* (lo_sum (high x) x) -> x  */
347	  if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348	    return op1;
349
350	  if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351	    return x;
352	  return gen_rtx_LO_SUM (mode, op0, op1);
353	}
354      else if (code == REG)
355	{
356	  if (rtx_equal_p (x, old_rtx))
357	    return new_rtx;
358	}
359      break;
360
361    default:
362      break;
363    }
364  return x;
365}
366
367/* Try to simplify a unary operation CODE whose output mode is to be
368   MODE with input operand OP whose mode was originally OP_MODE.
369   Return zero if no simplification can be made.  */
370rtx
371simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372			  rtx op, enum machine_mode op_mode)
373{
374  rtx trueop, tem;
375
376  if (GET_CODE (op) == CONST)
377    op = XEXP (op, 0);
378
379  trueop = avoid_constant_pool_reference (op);
380
381  tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
382  if (tem)
383    return tem;
384
385  return simplify_unary_operation_1 (code, mode, op);
386}
387
388/* Perform some simplifications we can do even if the operands
389   aren't constant.  */
390static rtx
391simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392{
393  enum rtx_code reversed;
394  rtx temp;
395
396  switch (code)
397    {
398    case NOT:
399      /* (not (not X)) == X.  */
400      if (GET_CODE (op) == NOT)
401	return XEXP (op, 0);
402
403      /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404	 comparison is all ones.   */
405      if (COMPARISON_P (op)
406	  && (mode == BImode || STORE_FLAG_VALUE == -1)
407	  && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408	return simplify_gen_relational (reversed, mode, VOIDmode,
409					XEXP (op, 0), XEXP (op, 1));
410
411      /* (not (plus X -1)) can become (neg X).  */
412      if (GET_CODE (op) == PLUS
413	  && XEXP (op, 1) == constm1_rtx)
414	return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415
416      /* Similarly, (not (neg X)) is (plus X -1).  */
417      if (GET_CODE (op) == NEG)
418	return plus_constant (XEXP (op, 0), -1);
419
420      /* (not (xor X C)) for C constant is (xor X D) with D = ~C.  */
421      if (GET_CODE (op) == XOR
422	  && GET_CODE (XEXP (op, 1)) == CONST_INT
423	  && (temp = simplify_unary_operation (NOT, mode,
424					       XEXP (op, 1), mode)) != 0)
425	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426
427      /* (not (plus X C)) for signbit C is (xor X D) with D = ~C.  */
428      if (GET_CODE (op) == PLUS
429	  && GET_CODE (XEXP (op, 1)) == CONST_INT
430	  && mode_signbit_p (mode, XEXP (op, 1))
431	  && (temp = simplify_unary_operation (NOT, mode,
432					       XEXP (op, 1), mode)) != 0)
433	return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434
435
436      /* (not (ashift 1 X)) is (rotate ~1 X).  We used to do this for
437	 operands other than 1, but that is not valid.  We could do a
438	 similar simplification for (not (lshiftrt C X)) where C is
439	 just the sign bit, but this doesn't seem common enough to
440	 bother with.  */
441      if (GET_CODE (op) == ASHIFT
442	  && XEXP (op, 0) == const1_rtx)
443	{
444	  temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445	  return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446	}
447
448      /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449	 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450	 so we can perform the above simplification.  */
451
452      if (STORE_FLAG_VALUE == -1
453	  && GET_CODE (op) == ASHIFTRT
454	  && GET_CODE (XEXP (op, 1)) == CONST_INT
455	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456	return simplify_gen_relational (GE, mode, VOIDmode,
457					XEXP (op, 0), const0_rtx);
458
459
460      if (GET_CODE (op) == SUBREG
461	  && subreg_lowpart_p (op)
462	  && (GET_MODE_SIZE (GET_MODE (op))
463	      < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464	  && GET_CODE (SUBREG_REG (op)) == ASHIFT
465	  && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466	{
467	  enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
468	  rtx x;
469
470	  x = gen_rtx_ROTATE (inner_mode,
471			      simplify_gen_unary (NOT, inner_mode, const1_rtx,
472						  inner_mode),
473			      XEXP (SUBREG_REG (op), 1));
474	  return rtl_hooks.gen_lowpart_no_emit (mode, x);
475	}
476
477      /* Apply De Morgan's laws to reduce number of patterns for machines
478	 with negating logical insns (and-not, nand, etc.).  If result has
479	 only one NOT, put it first, since that is how the patterns are
480	 coded.  */
481
482      if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483	{
484	  rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485	  enum machine_mode op_mode;
486
487	  op_mode = GET_MODE (in1);
488	  in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489
490	  op_mode = GET_MODE (in2);
491	  if (op_mode == VOIDmode)
492	    op_mode = mode;
493	  in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494
495	  if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496	    {
497	      rtx tem = in2;
498	      in2 = in1; in1 = tem;
499	    }
500
501	  return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
502				 mode, in1, in2);
503	}
504      break;
505
506    case NEG:
507      /* (neg (neg X)) == X.  */
508      if (GET_CODE (op) == NEG)
509	return XEXP (op, 0);
510
511      /* (neg (plus X 1)) can become (not X).  */
512      if (GET_CODE (op) == PLUS
513	  && XEXP (op, 1) == const1_rtx)
514	return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515
516      /* Similarly, (neg (not X)) is (plus X 1).  */
517      if (GET_CODE (op) == NOT)
518	return plus_constant (XEXP (op, 0), 1);
519
520      /* (neg (minus X Y)) can become (minus Y X).  This transformation
521	 isn't safe for modes with signed zeros, since if X and Y are
522	 both +0, (minus Y X) is the same as (minus X Y).  If the
523	 rounding mode is towards +infinity (or -infinity) then the two
524	 expressions will be rounded differently.  */
525      if (GET_CODE (op) == MINUS
526	  && !HONOR_SIGNED_ZEROS (mode)
527	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528	return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529
530      if (GET_CODE (op) == PLUS
531	  && !HONOR_SIGNED_ZEROS (mode)
532	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533	{
534	  /* (neg (plus A C)) is simplified to (minus -C A).  */
535	  if (GET_CODE (XEXP (op, 1)) == CONST_INT
536	      || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537	    {
538	      temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539	      if (temp)
540		return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
541	    }
542
543	  /* (neg (plus A B)) is canonicalized to (minus (neg A) B).  */
544	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545	  return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
546	}
547
548      /* (neg (mult A B)) becomes (mult (neg A) B).
549	 This works even for floating-point values.  */
550      if (GET_CODE (op) == MULT
551	  && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552	{
553	  temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554	  return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
555	}
556
557      /* NEG commutes with ASHIFT since it is multiplication.  Only do
558	 this if we can then eliminate the NEG (e.g., if the operand
559	 is a constant).  */
560      if (GET_CODE (op) == ASHIFT)
561	{
562	  temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563	  if (temp)
564	    return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
565	}
566
567      /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568	 C is equal to the width of MODE minus 1.  */
569      if (GET_CODE (op) == ASHIFTRT
570	  && GET_CODE (XEXP (op, 1)) == CONST_INT
571	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572	return simplify_gen_binary (LSHIFTRT, mode,
573				    XEXP (op, 0), XEXP (op, 1));
574
575      /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576	 C is equal to the width of MODE minus 1.  */
577      if (GET_CODE (op) == LSHIFTRT
578	  && GET_CODE (XEXP (op, 1)) == CONST_INT
579	  && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580	return simplify_gen_binary (ASHIFTRT, mode,
581				    XEXP (op, 0), XEXP (op, 1));
582
583      /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1.  */
584      if (GET_CODE (op) == XOR
585	  && XEXP (op, 1) == const1_rtx
586	  && nonzero_bits (XEXP (op, 0), mode) == 1)
587	return plus_constant (XEXP (op, 0), -1);
588
589      /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1.  */
590      /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1.  */
591      if (GET_CODE (op) == LT
592	  && XEXP (op, 1) == const0_rtx
593	  && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
594	{
595	  enum machine_mode inner = GET_MODE (XEXP (op, 0));
596	  int isize = GET_MODE_BITSIZE (inner);
597	  if (STORE_FLAG_VALUE == 1)
598	    {
599	      temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
600					  GEN_INT (isize - 1));
601	      if (mode == inner)
602		return temp;
603	      if (GET_MODE_BITSIZE (mode) > isize)
604		return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
605	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
606	    }
607	  else if (STORE_FLAG_VALUE == -1)
608	    {
609	      temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
610					  GEN_INT (isize - 1));
611	      if (mode == inner)
612		return temp;
613	      if (GET_MODE_BITSIZE (mode) > isize)
614		return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
615	      return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616	    }
617	}
618      break;
619
620    case TRUNCATE:
621      /* We can't handle truncation to a partial integer mode here
622         because we don't know the real bitsize of the partial
623         integer mode.  */
624      if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
625        break;
626
627      /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI.  */
628      if ((GET_CODE (op) == SIGN_EXTEND
629	   || GET_CODE (op) == ZERO_EXTEND)
630	  && GET_MODE (XEXP (op, 0)) == mode)
631	return XEXP (op, 0);
632
633      /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
634	 (OP:SI foo:SI) if OP is NEG or ABS.  */
635      if ((GET_CODE (op) == ABS
636	   || GET_CODE (op) == NEG)
637	  && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
638	      || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
639	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
640	return simplify_gen_unary (GET_CODE (op), mode,
641				   XEXP (XEXP (op, 0), 0), mode);
642
643      /* (truncate:A (subreg:B (truncate:C X) 0)) is
644	 (truncate:A X).  */
645      if (GET_CODE (op) == SUBREG
646	  && GET_CODE (SUBREG_REG (op)) == TRUNCATE
647	  && subreg_lowpart_p (op))
648	return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
649				   GET_MODE (XEXP (SUBREG_REG (op), 0)));
650
651      /* If we know that the value is already truncated, we can
652         replace the TRUNCATE with a SUBREG.  Note that this is also
653         valid if TRULY_NOOP_TRUNCATION is false for the corresponding
654         modes we just have to apply a different definition for
655         truncation.  But don't do this for an (LSHIFTRT (MULT ...))
656         since this will cause problems with the umulXi3_highpart
657         patterns.  */
658      if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
659				 GET_MODE_BITSIZE (GET_MODE (op)))
660	   ? (num_sign_bit_copies (op, GET_MODE (op))
661	      > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
662				- GET_MODE_BITSIZE (mode)))
663	   : truncated_to_mode (mode, op))
664	  && ! (GET_CODE (op) == LSHIFTRT
665		&& GET_CODE (XEXP (op, 0)) == MULT))
666	return rtl_hooks.gen_lowpart_no_emit (mode, op);
667
668      /* A truncate of a comparison can be replaced with a subreg if
669         STORE_FLAG_VALUE permits.  This is like the previous test,
670         but it works even if the comparison is done in a mode larger
671         than HOST_BITS_PER_WIDE_INT.  */
672      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
673	  && COMPARISON_P (op)
674	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
675	return rtl_hooks.gen_lowpart_no_emit (mode, op);
676      break;
677
678    case FLOAT_TRUNCATE:
679      if (DECIMAL_FLOAT_MODE_P (mode))
680	break;
681
682      /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF.  */
683      if (GET_CODE (op) == FLOAT_EXTEND
684	  && GET_MODE (XEXP (op, 0)) == mode)
685	return XEXP (op, 0);
686
687      /* (float_truncate:SF (float_truncate:DF foo:XF))
688         = (float_truncate:SF foo:XF).
689	 This may eliminate double rounding, so it is unsafe.
690
691         (float_truncate:SF (float_extend:XF foo:DF))
692         = (float_truncate:SF foo:DF).
693
694         (float_truncate:DF (float_extend:XF foo:SF))
695         = (float_extend:SF foo:DF).  */
696      if ((GET_CODE (op) == FLOAT_TRUNCATE
697	   && flag_unsafe_math_optimizations)
698	  || GET_CODE (op) == FLOAT_EXTEND)
699	return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
700							    0)))
701				   > GET_MODE_SIZE (mode)
702				   ? FLOAT_TRUNCATE : FLOAT_EXTEND,
703				   mode,
704				   XEXP (op, 0), mode);
705
706      /*  (float_truncate (float x)) is (float x)  */
707      if (GET_CODE (op) == FLOAT
708	  && (flag_unsafe_math_optimizations
709	      || ((unsigned)significand_size (GET_MODE (op))
710		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
711		      - num_sign_bit_copies (XEXP (op, 0),
712					     GET_MODE (XEXP (op, 0)))))))
713	return simplify_gen_unary (FLOAT, mode,
714				   XEXP (op, 0),
715				   GET_MODE (XEXP (op, 0)));
716
717      /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
718	 (OP:SF foo:SF) if OP is NEG or ABS.  */
719      if ((GET_CODE (op) == ABS
720	   || GET_CODE (op) == NEG)
721	  && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
722	  && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
723	return simplify_gen_unary (GET_CODE (op), mode,
724				   XEXP (XEXP (op, 0), 0), mode);
725
726      /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
727	 is (float_truncate:SF x).  */
728      if (GET_CODE (op) == SUBREG
729	  && subreg_lowpart_p (op)
730	  && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
731	return SUBREG_REG (op);
732      break;
733
734    case FLOAT_EXTEND:
735      if (DECIMAL_FLOAT_MODE_P (mode))
736	break;
737
738      /*  (float_extend (float_extend x)) is (float_extend x)
739
740	  (float_extend (float x)) is (float x) assuming that double
741	  rounding can't happen.
742          */
743      if (GET_CODE (op) == FLOAT_EXTEND
744	  || (GET_CODE (op) == FLOAT
745	      && ((unsigned)significand_size (GET_MODE (op))
746		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
747		      - num_sign_bit_copies (XEXP (op, 0),
748					     GET_MODE (XEXP (op, 0)))))))
749	return simplify_gen_unary (GET_CODE (op), mode,
750				   XEXP (op, 0),
751				   GET_MODE (XEXP (op, 0)));
752
753      break;
754
755    case ABS:
756      /* (abs (neg <foo>)) -> (abs <foo>) */
757      if (GET_CODE (op) == NEG)
758	return simplify_gen_unary (ABS, mode, XEXP (op, 0),
759				   GET_MODE (XEXP (op, 0)));
760
761      /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
762         do nothing.  */
763      if (GET_MODE (op) == VOIDmode)
764	break;
765
766      /* If operand is something known to be positive, ignore the ABS.  */
767      if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
768	  || ((GET_MODE_BITSIZE (GET_MODE (op))
769	       <= HOST_BITS_PER_WIDE_INT)
770	      && ((nonzero_bits (op, GET_MODE (op))
771		   & ((HOST_WIDE_INT) 1
772		      << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
773		  == 0)))
774	return op;
775
776      /* If operand is known to be only -1 or 0, convert ABS to NEG.  */
777      if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
778	return gen_rtx_NEG (mode, op);
779
780      break;
781
782    case FFS:
783      /* (ffs (*_extend <X>)) = (ffs <X>) */
784      if (GET_CODE (op) == SIGN_EXTEND
785	  || GET_CODE (op) == ZERO_EXTEND)
786	return simplify_gen_unary (FFS, mode, XEXP (op, 0),
787				   GET_MODE (XEXP (op, 0)));
788      break;
789
790    case POPCOUNT:
791    case PARITY:
792      /* (pop* (zero_extend <X>)) = (pop* <X>) */
793      if (GET_CODE (op) == ZERO_EXTEND)
794	return simplify_gen_unary (code, mode, XEXP (op, 0),
795				   GET_MODE (XEXP (op, 0)));
796      break;
797
798    case FLOAT:
799      /* (float (sign_extend <X>)) = (float <X>).  */
800      if (GET_CODE (op) == SIGN_EXTEND)
801	return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
802				   GET_MODE (XEXP (op, 0)));
803      break;
804
805    case SIGN_EXTEND:
806      /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
807	 becomes just the MINUS if its mode is MODE.  This allows
808	 folding switch statements on machines using casesi (such as
809	 the VAX).  */
810      if (GET_CODE (op) == TRUNCATE
811	  && GET_MODE (XEXP (op, 0)) == mode
812	  && GET_CODE (XEXP (op, 0)) == MINUS
813	  && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
814	  && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
815	return XEXP (op, 0);
816
817      /* Check for a sign extension of a subreg of a promoted
818	 variable, where the promotion is sign-extended, and the
819	 target mode is the same as the variable's promotion.  */
820      if (GET_CODE (op) == SUBREG
821	  && SUBREG_PROMOTED_VAR_P (op)
822	  && ! SUBREG_PROMOTED_UNSIGNED_P (op)
823	  && GET_MODE (XEXP (op, 0)) == mode)
824	return XEXP (op, 0);
825
826#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827      if (! POINTERS_EXTEND_UNSIGNED
828	  && mode == Pmode && GET_MODE (op) == ptr_mode
829	  && (CONSTANT_P (op)
830	      || (GET_CODE (op) == SUBREG
831		  && REG_P (SUBREG_REG (op))
832		  && REG_POINTER (SUBREG_REG (op))
833		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
834	return convert_memory_address (Pmode, op);
835#endif
836      break;
837
838    case ZERO_EXTEND:
839      /* Check for a zero extension of a subreg of a promoted
840	 variable, where the promotion is zero-extended, and the
841	 target mode is the same as the variable's promotion.  */
842      if (GET_CODE (op) == SUBREG
843	  && SUBREG_PROMOTED_VAR_P (op)
844	  && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
845	  && GET_MODE (XEXP (op, 0)) == mode)
846	return XEXP (op, 0);
847
848#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849      if (POINTERS_EXTEND_UNSIGNED > 0
850	  && mode == Pmode && GET_MODE (op) == ptr_mode
851	  && (CONSTANT_P (op)
852	      || (GET_CODE (op) == SUBREG
853		  && REG_P (SUBREG_REG (op))
854		  && REG_POINTER (SUBREG_REG (op))
855		  && GET_MODE (SUBREG_REG (op)) == Pmode)))
856	return convert_memory_address (Pmode, op);
857#endif
858      break;
859
860    default:
861      break;
862    }
863
864  return 0;
865}
866
867/* Try to compute the value of a unary operation CODE whose output mode is to
868   be MODE with input operand OP whose mode was originally OP_MODE.
869   Return zero if the value cannot be computed.  */
870rtx
871simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
872				rtx op, enum machine_mode op_mode)
873{
874  unsigned int width = GET_MODE_BITSIZE (mode);
875
876  if (code == VEC_DUPLICATE)
877    {
878      gcc_assert (VECTOR_MODE_P (mode));
879      if (GET_MODE (op) != VOIDmode)
880      {
881	if (!VECTOR_MODE_P (GET_MODE (op)))
882	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
883	else
884	  gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
885						(GET_MODE (op)));
886      }
887      if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
888	  || GET_CODE (op) == CONST_VECTOR)
889	{
890          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
891          unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
892	  rtvec v = rtvec_alloc (n_elts);
893	  unsigned int i;
894
895	  if (GET_CODE (op) != CONST_VECTOR)
896	    for (i = 0; i < n_elts; i++)
897	      RTVEC_ELT (v, i) = op;
898	  else
899	    {
900	      enum machine_mode inmode = GET_MODE (op);
901              int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
902              unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
903
904	      gcc_assert (in_n_elts < n_elts);
905	      gcc_assert ((n_elts % in_n_elts) == 0);
906	      for (i = 0; i < n_elts; i++)
907	        RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
908	    }
909	  return gen_rtx_CONST_VECTOR (mode, v);
910	}
911    }
912
913  if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
914    {
915      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
916      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
917      enum machine_mode opmode = GET_MODE (op);
918      int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
919      unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
920      rtvec v = rtvec_alloc (n_elts);
921      unsigned int i;
922
923      gcc_assert (op_n_elts == n_elts);
924      for (i = 0; i < n_elts; i++)
925	{
926	  rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
927					    CONST_VECTOR_ELT (op, i),
928					    GET_MODE_INNER (opmode));
929	  if (!x)
930	    return 0;
931	  RTVEC_ELT (v, i) = x;
932	}
933      return gen_rtx_CONST_VECTOR (mode, v);
934    }
935
936  /* The order of these tests is critical so that, for example, we don't
937     check the wrong mode (input vs. output) for a conversion operation,
938     such as FIX.  At some point, this should be simplified.  */
939
940  if (code == FLOAT && GET_MODE (op) == VOIDmode
941      && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
942    {
943      HOST_WIDE_INT hv, lv;
944      REAL_VALUE_TYPE d;
945
946      if (GET_CODE (op) == CONST_INT)
947	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
948      else
949	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
950
951      REAL_VALUE_FROM_INT (d, lv, hv, mode);
952      d = real_value_truncate (mode, d);
953      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
954    }
955  else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
956	   && (GET_CODE (op) == CONST_DOUBLE
957	       || GET_CODE (op) == CONST_INT))
958    {
959      HOST_WIDE_INT hv, lv;
960      REAL_VALUE_TYPE d;
961
962      if (GET_CODE (op) == CONST_INT)
963	lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
964      else
965	lv = CONST_DOUBLE_LOW (op),  hv = CONST_DOUBLE_HIGH (op);
966
967      if (op_mode == VOIDmode)
968	{
969	  /* We don't know how to interpret negative-looking numbers in
970	     this case, so don't try to fold those.  */
971	  if (hv < 0)
972	    return 0;
973	}
974      else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
975	;
976      else
977	hv = 0, lv &= GET_MODE_MASK (op_mode);
978
979      REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
980      d = real_value_truncate (mode, d);
981      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982    }
983
984  if (GET_CODE (op) == CONST_INT
985      && width <= HOST_BITS_PER_WIDE_INT && width > 0)
986    {
987      HOST_WIDE_INT arg0 = INTVAL (op);
988      HOST_WIDE_INT val;
989
990      switch (code)
991	{
992	case NOT:
993	  val = ~ arg0;
994	  break;
995
996	case NEG:
997	  val = - arg0;
998	  break;
999
1000	case ABS:
1001	  val = (arg0 >= 0 ? arg0 : - arg0);
1002	  break;
1003
1004	case FFS:
1005	  /* Don't use ffs here.  Instead, get low order bit and then its
1006	     number.  If arg0 is zero, this will return 0, as desired.  */
1007	  arg0 &= GET_MODE_MASK (mode);
1008	  val = exact_log2 (arg0 & (- arg0)) + 1;
1009	  break;
1010
1011	case CLZ:
1012	  arg0 &= GET_MODE_MASK (mode);
1013	  if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1014	    ;
1015	  else
1016	    val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1017	  break;
1018
1019	case CTZ:
1020	  arg0 &= GET_MODE_MASK (mode);
1021	  if (arg0 == 0)
1022	    {
1023	      /* Even if the value at zero is undefined, we have to come
1024		 up with some replacement.  Seems good enough.  */
1025	      if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1026		val = GET_MODE_BITSIZE (mode);
1027	    }
1028	  else
1029	    val = exact_log2 (arg0 & -arg0);
1030	  break;
1031
1032	case POPCOUNT:
1033	  arg0 &= GET_MODE_MASK (mode);
1034	  val = 0;
1035	  while (arg0)
1036	    val++, arg0 &= arg0 - 1;
1037	  break;
1038
1039	case PARITY:
1040	  arg0 &= GET_MODE_MASK (mode);
1041	  val = 0;
1042	  while (arg0)
1043	    val++, arg0 &= arg0 - 1;
1044	  val &= 1;
1045	  break;
1046
1047	case TRUNCATE:
1048	  val = arg0;
1049	  break;
1050
1051	case ZERO_EXTEND:
1052	  /* When zero-extending a CONST_INT, we need to know its
1053             original mode.  */
1054	  gcc_assert (op_mode != VOIDmode);
1055	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1056	    {
1057	      /* If we were really extending the mode,
1058		 we would have to distinguish between zero-extension
1059		 and sign-extension.  */
1060	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1061	      val = arg0;
1062	    }
1063	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1064	    val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1065	  else
1066	    return 0;
1067	  break;
1068
1069	case SIGN_EXTEND:
1070	  if (op_mode == VOIDmode)
1071	    op_mode = mode;
1072	  if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1073	    {
1074	      /* If we were really extending the mode,
1075		 we would have to distinguish between zero-extension
1076		 and sign-extension.  */
1077	      gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1078	      val = arg0;
1079	    }
1080	  else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081	    {
1082	      val
1083		= arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1084	      if (val
1085		  & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1086		val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1087	    }
1088	  else
1089	    return 0;
1090	  break;
1091
1092	case SQRT:
1093	case FLOAT_EXTEND:
1094	case FLOAT_TRUNCATE:
1095	case SS_TRUNCATE:
1096	case US_TRUNCATE:
1097	case SS_NEG:
1098	  return 0;
1099
1100	default:
1101	  gcc_unreachable ();
1102	}
1103
1104      return gen_int_mode (val, mode);
1105    }
1106
1107  /* We can do some operations on integer CONST_DOUBLEs.  Also allow
1108     for a DImode operation on a CONST_INT.  */
1109  else if (GET_MODE (op) == VOIDmode
1110	   && width <= HOST_BITS_PER_WIDE_INT * 2
1111	   && (GET_CODE (op) == CONST_DOUBLE
1112	       || GET_CODE (op) == CONST_INT))
1113    {
1114      unsigned HOST_WIDE_INT l1, lv;
1115      HOST_WIDE_INT h1, hv;
1116
1117      if (GET_CODE (op) == CONST_DOUBLE)
1118	l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1119      else
1120	l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1121
1122      switch (code)
1123	{
1124	case NOT:
1125	  lv = ~ l1;
1126	  hv = ~ h1;
1127	  break;
1128
1129	case NEG:
1130	  neg_double (l1, h1, &lv, &hv);
1131	  break;
1132
1133	case ABS:
1134	  if (h1 < 0)
1135	    neg_double (l1, h1, &lv, &hv);
1136	  else
1137	    lv = l1, hv = h1;
1138	  break;
1139
1140	case FFS:
1141	  hv = 0;
1142	  if (l1 == 0)
1143	    {
1144	      if (h1 == 0)
1145		lv = 0;
1146	      else
1147		lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1148	    }
1149	  else
1150	    lv = exact_log2 (l1 & -l1) + 1;
1151	  break;
1152
1153	case CLZ:
1154	  hv = 0;
1155	  if (h1 != 0)
1156	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1157	      - HOST_BITS_PER_WIDE_INT;
1158	  else if (l1 != 0)
1159	    lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1160	  else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1161	    lv = GET_MODE_BITSIZE (mode);
1162	  break;
1163
1164	case CTZ:
1165	  hv = 0;
1166	  if (l1 != 0)
1167	    lv = exact_log2 (l1 & -l1);
1168	  else if (h1 != 0)
1169	    lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1170	  else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1171	    lv = GET_MODE_BITSIZE (mode);
1172	  break;
1173
1174	case POPCOUNT:
1175	  hv = 0;
1176	  lv = 0;
1177	  while (l1)
1178	    lv++, l1 &= l1 - 1;
1179	  while (h1)
1180	    lv++, h1 &= h1 - 1;
1181	  break;
1182
1183	case PARITY:
1184	  hv = 0;
1185	  lv = 0;
1186	  while (l1)
1187	    lv++, l1 &= l1 - 1;
1188	  while (h1)
1189	    lv++, h1 &= h1 - 1;
1190	  lv &= 1;
1191	  break;
1192
1193	case TRUNCATE:
1194	  /* This is just a change-of-mode, so do nothing.  */
1195	  lv = l1, hv = h1;
1196	  break;
1197
1198	case ZERO_EXTEND:
1199	  gcc_assert (op_mode != VOIDmode);
1200
1201	  if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1202	    return 0;
1203
1204	  hv = 0;
1205	  lv = l1 & GET_MODE_MASK (op_mode);
1206	  break;
1207
1208	case SIGN_EXTEND:
1209	  if (op_mode == VOIDmode
1210	      || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1211	    return 0;
1212	  else
1213	    {
1214	      lv = l1 & GET_MODE_MASK (op_mode);
1215	      if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1216		  && (lv & ((HOST_WIDE_INT) 1
1217			    << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1218		lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1219
1220	      hv = HWI_SIGN_EXTEND (lv);
1221	    }
1222	  break;
1223
1224	case SQRT:
1225	  return 0;
1226
1227	default:
1228	  return 0;
1229	}
1230
1231      return immed_double_const (lv, hv, mode);
1232    }
1233
1234  else if (GET_CODE (op) == CONST_DOUBLE
1235	   && SCALAR_FLOAT_MODE_P (mode))
1236    {
1237      REAL_VALUE_TYPE d, t;
1238      REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1239
1240      switch (code)
1241	{
1242	case SQRT:
1243	  if (HONOR_SNANS (mode) && real_isnan (&d))
1244	    return 0;
1245	  real_sqrt (&t, mode, &d);
1246	  d = t;
1247	  break;
1248	case ABS:
1249	  d = REAL_VALUE_ABS (d);
1250	  break;
1251	case NEG:
1252	  d = REAL_VALUE_NEGATE (d);
1253	  break;
1254	case FLOAT_TRUNCATE:
1255	  d = real_value_truncate (mode, d);
1256	  break;
1257	case FLOAT_EXTEND:
1258	  /* All this does is change the mode.  */
1259	  break;
1260	case FIX:
1261	  real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1262	  break;
1263	case NOT:
1264	  {
1265	    long tmp[4];
1266	    int i;
1267
1268	    real_to_target (tmp, &d, GET_MODE (op));
1269	    for (i = 0; i < 4; i++)
1270	      tmp[i] = ~tmp[i];
1271	    real_from_target (&d, tmp, mode);
1272	    break;
1273	  }
1274	default:
1275	  gcc_unreachable ();
1276	}
1277      return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1278    }
1279
1280  else if (GET_CODE (op) == CONST_DOUBLE
1281	   && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1282	   && GET_MODE_CLASS (mode) == MODE_INT
1283	   && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1284    {
1285      /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1286	 operators are intentionally left unspecified (to ease implementation
1287	 by target backends), for consistency, this routine implements the
1288	 same semantics for constant folding as used by the middle-end.  */
1289
1290      /* This was formerly used only for non-IEEE float.
1291	 eggert@twinsun.com says it is safe for IEEE also.  */
1292      HOST_WIDE_INT xh, xl, th, tl;
1293      REAL_VALUE_TYPE x, t;
1294      REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1295      switch (code)
1296	{
1297	case FIX:
1298	  if (REAL_VALUE_ISNAN (x))
1299	    return const0_rtx;
1300
1301	  /* Test against the signed upper bound.  */
1302	  if (width > HOST_BITS_PER_WIDE_INT)
1303	    {
1304	      th = ((unsigned HOST_WIDE_INT) 1
1305		    << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1306	      tl = -1;
1307	    }
1308	  else
1309	    {
1310	      th = 0;
1311	      tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1312	    }
1313	  real_from_integer (&t, VOIDmode, tl, th, 0);
1314	  if (REAL_VALUES_LESS (t, x))
1315	    {
1316	      xh = th;
1317	      xl = tl;
1318	      break;
1319	    }
1320
1321	  /* Test against the signed lower bound.  */
1322	  if (width > HOST_BITS_PER_WIDE_INT)
1323	    {
1324	      th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1325	      tl = 0;
1326	    }
1327	  else
1328	    {
1329	      th = -1;
1330	      tl = (HOST_WIDE_INT) -1 << (width - 1);
1331	    }
1332	  real_from_integer (&t, VOIDmode, tl, th, 0);
1333	  if (REAL_VALUES_LESS (x, t))
1334	    {
1335	      xh = th;
1336	      xl = tl;
1337	      break;
1338	    }
1339	  REAL_VALUE_TO_INT (&xl, &xh, x);
1340	  break;
1341
1342	case UNSIGNED_FIX:
1343	  if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1344	    return const0_rtx;
1345
1346	  /* Test against the unsigned upper bound.  */
1347	  if (width == 2*HOST_BITS_PER_WIDE_INT)
1348	    {
1349	      th = -1;
1350	      tl = -1;
1351	    }
1352	  else if (width >= HOST_BITS_PER_WIDE_INT)
1353	    {
1354	      th = ((unsigned HOST_WIDE_INT) 1
1355		    << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1356	      tl = -1;
1357	    }
1358	  else
1359	    {
1360	      th = 0;
1361	      tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1362	    }
1363	  real_from_integer (&t, VOIDmode, tl, th, 1);
1364	  if (REAL_VALUES_LESS (t, x))
1365	    {
1366	      xh = th;
1367	      xl = tl;
1368	      break;
1369	    }
1370
1371	  REAL_VALUE_TO_INT (&xl, &xh, x);
1372	  break;
1373
1374	default:
1375	  gcc_unreachable ();
1376	}
1377      return immed_double_const (xl, xh, mode);
1378    }
1379
1380  return NULL_RTX;
1381}
1382
1383/* Subroutine of simplify_binary_operation to simplify a commutative,
1384   associative binary operation CODE with result mode MODE, operating
1385   on OP0 and OP1.  CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1386   SMIN, SMAX, UMIN or UMAX.  Return zero if no simplification or
1387   canonicalization is possible.  */
1388
1389static rtx
1390simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1391				rtx op0, rtx op1)
1392{
1393  rtx tem;
1394
1395  /* Linearize the operator to the left.  */
1396  if (GET_CODE (op1) == code)
1397    {
1398      /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)".  */
1399      if (GET_CODE (op0) == code)
1400	{
1401	  tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1402	  return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1403	}
1404
1405      /* "a op (b op c)" becomes "(b op c) op a".  */
1406      if (! swap_commutative_operands_p (op1, op0))
1407	return simplify_gen_binary (code, mode, op1, op0);
1408
1409      tem = op0;
1410      op0 = op1;
1411      op1 = tem;
1412    }
1413
1414  if (GET_CODE (op0) == code)
1415    {
1416      /* Canonicalize "(x op c) op y" as "(x op y) op c".  */
1417      if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1418	{
1419	  tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1420	  return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1421	}
1422
1423      /* Attempt to simplify "(a op b) op c" as "a op (b op c)".  */
1424      tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1425	    ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1426	    : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1427      if (tem != 0)
1428        return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1429
1430      /* Attempt to simplify "(a op b) op c" as "(a op c) op b".  */
1431      tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1432	    ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1433	    : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1434      if (tem != 0)
1435        return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1436    }
1437
1438  return 0;
1439}
1440
1441
1442/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1443   and OP1.  Return 0 if no simplification is possible.
1444
1445   Don't use this for relational operations such as EQ or LT.
1446   Use simplify_relational_operation instead.  */
1447rtx
1448simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1449			   rtx op0, rtx op1)
1450{
1451  rtx trueop0, trueop1;
1452  rtx tem;
1453
1454  /* Relational operations don't work here.  We must know the mode
1455     of the operands in order to do the comparison correctly.
1456     Assuming a full word can give incorrect results.
1457     Consider comparing 128 with -128 in QImode.  */
1458  gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1459  gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1460
1461  /* Make sure the constant is second.  */
1462  if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1463      && swap_commutative_operands_p (op0, op1))
1464    {
1465      tem = op0, op0 = op1, op1 = tem;
1466    }
1467
1468  trueop0 = avoid_constant_pool_reference (op0);
1469  trueop1 = avoid_constant_pool_reference (op1);
1470
1471  tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1472  if (tem)
1473    return tem;
1474  return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1475}
1476
1477/* Subroutine of simplify_binary_operation.  Simplify a binary operation
1478   CODE with result mode MODE, operating on OP0 and OP1.  If OP0 and/or
1479   OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1480   actual constants.  */
1481
1482static rtx
1483simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1484			     rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1485{
1486  rtx tem, reversed, opleft, opright;
1487  HOST_WIDE_INT val;
1488  unsigned int width = GET_MODE_BITSIZE (mode);
1489
1490  /* Even if we can't compute a constant result,
1491     there are some cases worth simplifying.  */
1492
1493  switch (code)
1494    {
1495    case PLUS:
1496      /* Maybe simplify x + 0 to x.  The two expressions are equivalent
1497	 when x is NaN, infinite, or finite and nonzero.  They aren't
1498	 when x is -0 and the rounding mode is not towards -infinity,
1499	 since (-0) + 0 is then 0.  */
1500      if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1501	return op0;
1502
1503      /* ((-a) + b) -> (b - a) and similarly for (a + (-b)).  These
1504	 transformations are safe even for IEEE.  */
1505      if (GET_CODE (op0) == NEG)
1506	return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1507      else if (GET_CODE (op1) == NEG)
1508	return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1509
1510      /* (~a) + 1 -> -a */
1511      if (INTEGRAL_MODE_P (mode)
1512	  && GET_CODE (op0) == NOT
1513	  && trueop1 == const1_rtx)
1514	return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1515
1516      /* Handle both-operands-constant cases.  We can only add
1517	 CONST_INTs to constants since the sum of relocatable symbols
1518	 can't be handled by most assemblers.  Don't add CONST_INT
1519	 to CONST_INT since overflow won't be computed properly if wider
1520	 than HOST_BITS_PER_WIDE_INT.  */
1521
1522      if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1523	  && GET_CODE (op1) == CONST_INT)
1524	return plus_constant (op0, INTVAL (op1));
1525      else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1526	       && GET_CODE (op0) == CONST_INT)
1527	return plus_constant (op1, INTVAL (op0));
1528
1529      /* See if this is something like X * C - X or vice versa or
1530	 if the multiplication is written as a shift.  If so, we can
1531	 distribute and make a new multiply, shift, or maybe just
1532	 have X (if C is 2 in the example above).  But don't make
1533	 something more expensive than we had before.  */
1534
1535      if (SCALAR_INT_MODE_P (mode))
1536	{
1537	  HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1538	  unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1539	  rtx lhs = op0, rhs = op1;
1540
1541	  if (GET_CODE (lhs) == NEG)
1542	    {
1543	      coeff0l = -1;
1544	      coeff0h = -1;
1545	      lhs = XEXP (lhs, 0);
1546	    }
1547	  else if (GET_CODE (lhs) == MULT
1548		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1549	    {
1550	      coeff0l = INTVAL (XEXP (lhs, 1));
1551	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1552	      lhs = XEXP (lhs, 0);
1553	    }
1554	  else if (GET_CODE (lhs) == ASHIFT
1555		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1556		   && INTVAL (XEXP (lhs, 1)) >= 0
1557		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1558	    {
1559	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1560	      coeff0h = 0;
1561	      lhs = XEXP (lhs, 0);
1562	    }
1563
1564	  if (GET_CODE (rhs) == NEG)
1565	    {
1566	      coeff1l = -1;
1567	      coeff1h = -1;
1568	      rhs = XEXP (rhs, 0);
1569	    }
1570	  else if (GET_CODE (rhs) == MULT
1571		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1572	    {
1573	      coeff1l = INTVAL (XEXP (rhs, 1));
1574	      coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1575	      rhs = XEXP (rhs, 0);
1576	    }
1577	  else if (GET_CODE (rhs) == ASHIFT
1578		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1579		   && INTVAL (XEXP (rhs, 1)) >= 0
1580		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1581	    {
1582	      coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1583	      coeff1h = 0;
1584	      rhs = XEXP (rhs, 0);
1585	    }
1586
1587	  if (rtx_equal_p (lhs, rhs))
1588	    {
1589	      rtx orig = gen_rtx_PLUS (mode, op0, op1);
1590	      rtx coeff;
1591	      unsigned HOST_WIDE_INT l;
1592	      HOST_WIDE_INT h;
1593
1594	      add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1595	      coeff = immed_double_const (l, h, mode);
1596
1597	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1598	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1599		? tem : 0;
1600	    }
1601	}
1602
1603      /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit.  */
1604      if ((GET_CODE (op1) == CONST_INT
1605	   || GET_CODE (op1) == CONST_DOUBLE)
1606	  && GET_CODE (op0) == XOR
1607	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1608	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1609	  && mode_signbit_p (mode, op1))
1610	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1611				    simplify_gen_binary (XOR, mode, op1,
1612							 XEXP (op0, 1)));
1613
1614      /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)).  */
1615      if (GET_CODE (op0) == MULT
1616	  && GET_CODE (XEXP (op0, 0)) == NEG)
1617	{
1618	  rtx in1, in2;
1619
1620	  in1 = XEXP (XEXP (op0, 0), 0);
1621	  in2 = XEXP (op0, 1);
1622	  return simplify_gen_binary (MINUS, mode, op1,
1623				      simplify_gen_binary (MULT, mode,
1624							   in1, in2));
1625	}
1626
1627      /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1628	 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1629	 is 1.  */
1630      if (COMPARISON_P (op0)
1631	  && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1632	      || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1633	  && (reversed = reversed_comparison (op0, mode)))
1634	return
1635	  simplify_gen_unary (NEG, mode, reversed, mode);
1636
1637      /* If one of the operands is a PLUS or a MINUS, see if we can
1638	 simplify this by the associative law.
1639	 Don't use the associative law for floating point.
1640	 The inaccuracy makes it nonassociative,
1641	 and subtle programs can break if operations are associated.  */
1642
1643      if (INTEGRAL_MODE_P (mode)
1644	  && (plus_minus_operand_p (op0)
1645	      || plus_minus_operand_p (op1))
1646	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1647	return tem;
1648
1649      /* Reassociate floating point addition only when the user
1650	 specifies unsafe math optimizations.  */
1651      if (FLOAT_MODE_P (mode)
1652	  && flag_unsafe_math_optimizations)
1653	{
1654	  tem = simplify_associative_operation (code, mode, op0, op1);
1655	  if (tem)
1656	    return tem;
1657	}
1658      break;
1659
1660    case COMPARE:
1661#ifdef HAVE_cc0
1662      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1663	 using cc0, in which case we want to leave it as a COMPARE
1664	 so we can distinguish it from a register-register-copy.
1665
1666	 In IEEE floating point, x-0 is not the same as x.  */
1667
1668      if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1669	   || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1670	  && trueop1 == CONST0_RTX (mode))
1671	return op0;
1672#endif
1673
1674      /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags).  */
1675      if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1676	   || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1677	  && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1678	{
1679	  rtx xop00 = XEXP (op0, 0);
1680	  rtx xop10 = XEXP (op1, 0);
1681
1682#ifdef HAVE_cc0
1683	  if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1684#else
1685	    if (REG_P (xop00) && REG_P (xop10)
1686		&& GET_MODE (xop00) == GET_MODE (xop10)
1687		&& REGNO (xop00) == REGNO (xop10)
1688		&& GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1689		&& GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1690#endif
1691	      return xop00;
1692	}
1693      break;
1694
1695    case MINUS:
1696      /* We can't assume x-x is 0 even with non-IEEE floating point,
1697	 but since it is zero except in very strange circumstances, we
1698	 will treat it as zero with -funsafe-math-optimizations.  */
1699      if (rtx_equal_p (trueop0, trueop1)
1700	  && ! side_effects_p (op0)
1701	  && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1702	return CONST0_RTX (mode);
1703
1704      /* Change subtraction from zero into negation.  (0 - x) is the
1705	 same as -x when x is NaN, infinite, or finite and nonzero.
1706	 But if the mode has signed zeros, and does not round towards
1707	 -infinity, then 0 - 0 is 0, not -0.  */
1708      if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1709	return simplify_gen_unary (NEG, mode, op1, mode);
1710
1711      /* (-1 - a) is ~a.  */
1712      if (trueop0 == constm1_rtx)
1713	return simplify_gen_unary (NOT, mode, op1, mode);
1714
1715      /* Subtracting 0 has no effect unless the mode has signed zeros
1716	 and supports rounding towards -infinity.  In such a case,
1717	 0 - 0 is -0.  */
1718      if (!(HONOR_SIGNED_ZEROS (mode)
1719	    && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1720	  && trueop1 == CONST0_RTX (mode))
1721	return op0;
1722
1723      /* See if this is something like X * C - X or vice versa or
1724	 if the multiplication is written as a shift.  If so, we can
1725	 distribute and make a new multiply, shift, or maybe just
1726	 have X (if C is 2 in the example above).  But don't make
1727	 something more expensive than we had before.  */
1728
1729      if (SCALAR_INT_MODE_P (mode))
1730	{
1731	  HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1732	  unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1733	  rtx lhs = op0, rhs = op1;
1734
1735	  if (GET_CODE (lhs) == NEG)
1736	    {
1737	      coeff0l = -1;
1738	      coeff0h = -1;
1739	      lhs = XEXP (lhs, 0);
1740	    }
1741	  else if (GET_CODE (lhs) == MULT
1742		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1743	    {
1744	      coeff0l = INTVAL (XEXP (lhs, 1));
1745	      coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1746	      lhs = XEXP (lhs, 0);
1747	    }
1748	  else if (GET_CODE (lhs) == ASHIFT
1749		   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1750		   && INTVAL (XEXP (lhs, 1)) >= 0
1751		   && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1752	    {
1753	      coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1754	      coeff0h = 0;
1755	      lhs = XEXP (lhs, 0);
1756	    }
1757
1758	  if (GET_CODE (rhs) == NEG)
1759	    {
1760	      negcoeff1l = 1;
1761	      negcoeff1h = 0;
1762	      rhs = XEXP (rhs, 0);
1763	    }
1764	  else if (GET_CODE (rhs) == MULT
1765		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1766	    {
1767	      negcoeff1l = -INTVAL (XEXP (rhs, 1));
1768	      negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1769	      rhs = XEXP (rhs, 0);
1770	    }
1771	  else if (GET_CODE (rhs) == ASHIFT
1772		   && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1773		   && INTVAL (XEXP (rhs, 1)) >= 0
1774		   && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1775	    {
1776	      negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1777	      negcoeff1h = -1;
1778	      rhs = XEXP (rhs, 0);
1779	    }
1780
1781	  if (rtx_equal_p (lhs, rhs))
1782	    {
1783	      rtx orig = gen_rtx_MINUS (mode, op0, op1);
1784	      rtx coeff;
1785	      unsigned HOST_WIDE_INT l;
1786	      HOST_WIDE_INT h;
1787
1788	      add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1789	      coeff = immed_double_const (l, h, mode);
1790
1791	      tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1792	      return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1793		? tem : 0;
1794	    }
1795	}
1796
1797      /* (a - (-b)) -> (a + b).  True even for IEEE.  */
1798      if (GET_CODE (op1) == NEG)
1799	return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1800
1801      /* (-x - c) may be simplified as (-c - x).  */
1802      if (GET_CODE (op0) == NEG
1803	  && (GET_CODE (op1) == CONST_INT
1804	      || GET_CODE (op1) == CONST_DOUBLE))
1805	{
1806	  tem = simplify_unary_operation (NEG, mode, op1, mode);
1807	  if (tem)
1808	    return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1809	}
1810
1811      /* Don't let a relocatable value get a negative coeff.  */
1812      if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1813	return simplify_gen_binary (PLUS, mode,
1814				    op0,
1815				    neg_const_int (mode, op1));
1816
1817      /* (x - (x & y)) -> (x & ~y) */
1818      if (GET_CODE (op1) == AND)
1819	{
1820	  if (rtx_equal_p (op0, XEXP (op1, 0)))
1821	    {
1822	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1823					GET_MODE (XEXP (op1, 1)));
1824	      return simplify_gen_binary (AND, mode, op0, tem);
1825	    }
1826	  if (rtx_equal_p (op0, XEXP (op1, 1)))
1827	    {
1828	      tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1829					GET_MODE (XEXP (op1, 0)));
1830	      return simplify_gen_binary (AND, mode, op0, tem);
1831	    }
1832	}
1833
1834      /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1835	 by reversing the comparison code if valid.  */
1836      if (STORE_FLAG_VALUE == 1
1837	  && trueop0 == const1_rtx
1838	  && COMPARISON_P (op1)
1839	  && (reversed = reversed_comparison (op1, mode)))
1840	return reversed;
1841
1842      /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A).  */
1843      if (GET_CODE (op1) == MULT
1844	  && GET_CODE (XEXP (op1, 0)) == NEG)
1845	{
1846	  rtx in1, in2;
1847
1848	  in1 = XEXP (XEXP (op1, 0), 0);
1849	  in2 = XEXP (op1, 1);
1850	  return simplify_gen_binary (PLUS, mode,
1851				      simplify_gen_binary (MULT, mode,
1852							   in1, in2),
1853				      op0);
1854	}
1855
1856      /* Canonicalize (minus (neg A) (mult B C)) to
1857	 (minus (mult (neg B) C) A).  */
1858      if (GET_CODE (op1) == MULT
1859	  && GET_CODE (op0) == NEG)
1860	{
1861	  rtx in1, in2;
1862
1863	  in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1864	  in2 = XEXP (op1, 1);
1865	  return simplify_gen_binary (MINUS, mode,
1866				      simplify_gen_binary (MULT, mode,
1867							   in1, in2),
1868				      XEXP (op0, 0));
1869	}
1870
1871      /* If one of the operands is a PLUS or a MINUS, see if we can
1872	 simplify this by the associative law.  This will, for example,
1873         canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1874	 Don't use the associative law for floating point.
1875	 The inaccuracy makes it nonassociative,
1876	 and subtle programs can break if operations are associated.  */
1877
1878      if (INTEGRAL_MODE_P (mode)
1879	  && (plus_minus_operand_p (op0)
1880	      || plus_minus_operand_p (op1))
1881	  && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1882	return tem;
1883      break;
1884
1885    case MULT:
1886      if (trueop1 == constm1_rtx)
1887	return simplify_gen_unary (NEG, mode, op0, mode);
1888
1889      /* Maybe simplify x * 0 to 0.  The reduction is not valid if
1890	 x is NaN, since x * 0 is then also NaN.  Nor is it valid
1891	 when the mode has signed zeros, since multiplying a negative
1892	 number by 0 will give -0, not 0.  */
1893      if (!HONOR_NANS (mode)
1894	  && !HONOR_SIGNED_ZEROS (mode)
1895	  && trueop1 == CONST0_RTX (mode)
1896	  && ! side_effects_p (op0))
1897	return op1;
1898
1899      /* In IEEE floating point, x*1 is not equivalent to x for
1900	 signalling NaNs.  */
1901      if (!HONOR_SNANS (mode)
1902	  && trueop1 == CONST1_RTX (mode))
1903	return op0;
1904
1905      /* Convert multiply by constant power of two into shift unless
1906	 we are still generating RTL.  This test is a kludge.  */
1907      if (GET_CODE (trueop1) == CONST_INT
1908	  && (val = exact_log2 (INTVAL (trueop1))) >= 0
1909	  /* If the mode is larger than the host word size, and the
1910	     uppermost bit is set, then this isn't a power of two due
1911	     to implicit sign extension.  */
1912	  && (width <= HOST_BITS_PER_WIDE_INT
1913	      || val != HOST_BITS_PER_WIDE_INT - 1))
1914	return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1915
1916      /* Likewise for multipliers wider than a word.  */
1917      if (GET_CODE (trueop1) == CONST_DOUBLE
1918	  && (GET_MODE (trueop1) == VOIDmode
1919	      || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1920	  && GET_MODE (op0) == mode
1921	  && CONST_DOUBLE_LOW (trueop1) == 0
1922	  && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1923	return simplify_gen_binary (ASHIFT, mode, op0,
1924				    GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1925
1926      /* x*2 is x+x and x*(-1) is -x */
1927      if (GET_CODE (trueop1) == CONST_DOUBLE
1928	  && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1929	  && GET_MODE (op0) == mode)
1930	{
1931	  REAL_VALUE_TYPE d;
1932	  REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1933
1934	  if (REAL_VALUES_EQUAL (d, dconst2))
1935	    return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1936
1937	  if (!HONOR_SNANS (mode)
1938	      && REAL_VALUES_EQUAL (d, dconstm1))
1939	    return simplify_gen_unary (NEG, mode, op0, mode);
1940	}
1941
1942      /* Optimize -x * -x as x * x.  */
1943      if (FLOAT_MODE_P (mode)
1944	  && GET_CODE (op0) == NEG
1945	  && GET_CODE (op1) == NEG
1946	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1947	  && !side_effects_p (XEXP (op0, 0)))
1948	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1949
1950      /* Likewise, optimize abs(x) * abs(x) as x * x.  */
1951      if (SCALAR_FLOAT_MODE_P (mode)
1952	  && GET_CODE (op0) == ABS
1953	  && GET_CODE (op1) == ABS
1954	  && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1955	  && !side_effects_p (XEXP (op0, 0)))
1956	return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1957
1958      /* Reassociate multiplication, but for floating point MULTs
1959	 only when the user specifies unsafe math optimizations.  */
1960      if (! FLOAT_MODE_P (mode)
1961	  || flag_unsafe_math_optimizations)
1962	{
1963	  tem = simplify_associative_operation (code, mode, op0, op1);
1964	  if (tem)
1965	    return tem;
1966	}
1967      break;
1968
1969    case IOR:
1970      if (trueop1 == const0_rtx)
1971	return op0;
1972      if (GET_CODE (trueop1) == CONST_INT
1973	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1974	      == GET_MODE_MASK (mode)))
1975	return op1;
1976      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1977	return op0;
1978      /* A | (~A) -> -1 */
1979      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1980	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1981	  && ! side_effects_p (op0)
1982	  && SCALAR_INT_MODE_P (mode))
1983	return constm1_rtx;
1984
1985      /* (ior A C) is C if all bits of A that might be nonzero are on in C.  */
1986      if (GET_CODE (op1) == CONST_INT
1987	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1988	  && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1989	return op1;
1990
1991      /* Convert (A & B) | A to A.  */
1992      if (GET_CODE (op0) == AND
1993	  && (rtx_equal_p (XEXP (op0, 0), op1)
1994	      || rtx_equal_p (XEXP (op0, 1), op1))
1995	  && ! side_effects_p (XEXP (op0, 0))
1996	  && ! side_effects_p (XEXP (op0, 1)))
1997	return op1;
1998
1999      /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2000         mode size to (rotate A CX).  */
2001
2002      if (GET_CODE (op1) == ASHIFT
2003          || GET_CODE (op1) == SUBREG)
2004        {
2005	  opleft = op1;
2006	  opright = op0;
2007	}
2008      else
2009        {
2010	  opright = op1;
2011	  opleft = op0;
2012	}
2013
2014      if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2015          && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2016          && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2017          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2018          && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2019              == GET_MODE_BITSIZE (mode)))
2020        return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2021
2022      /* Same, but for ashift that has been "simplified" to a wider mode
2023        by simplify_shift_const.  */
2024
2025      if (GET_CODE (opleft) == SUBREG
2026          && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2027          && GET_CODE (opright) == LSHIFTRT
2028          && GET_CODE (XEXP (opright, 0)) == SUBREG
2029          && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2030          && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2031          && (GET_MODE_SIZE (GET_MODE (opleft))
2032              < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2033          && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2034                          SUBREG_REG (XEXP (opright, 0)))
2035          && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2036          && GET_CODE (XEXP (opright, 1)) == CONST_INT
2037          && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2038              == GET_MODE_BITSIZE (mode)))
2039        return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2040                               XEXP (SUBREG_REG (opleft), 1));
2041
2042      /* If we have (ior (and (X C1) C2)), simplify this by making
2043	 C1 as small as possible if C1 actually changes.  */
2044      if (GET_CODE (op1) == CONST_INT
2045	  && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2046	      || INTVAL (op1) > 0)
2047	  && GET_CODE (op0) == AND
2048	  && GET_CODE (XEXP (op0, 1)) == CONST_INT
2049	  && GET_CODE (op1) == CONST_INT
2050	  && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2051	return simplify_gen_binary (IOR, mode,
2052				    simplify_gen_binary
2053					  (AND, mode, XEXP (op0, 0),
2054					   GEN_INT (INTVAL (XEXP (op0, 1))
2055						    & ~INTVAL (op1))),
2056				    op1);
2057
2058      /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2059         a (sign_extend (plus ...)).  Then check if OP1 is a CONST_INT and
2060	 the PLUS does not affect any of the bits in OP1: then we can do
2061	 the IOR as a PLUS and we can associate.  This is valid if OP1
2062         can be safely shifted left C bits.  */
2063      if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2064          && GET_CODE (XEXP (op0, 0)) == PLUS
2065          && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2066          && GET_CODE (XEXP (op0, 1)) == CONST_INT
2067          && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2068        {
2069          int count = INTVAL (XEXP (op0, 1));
2070          HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2071
2072          if (mask >> count == INTVAL (trueop1)
2073              && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2074	    return simplify_gen_binary (ASHIFTRT, mode,
2075					plus_constant (XEXP (op0, 0), mask),
2076					XEXP (op0, 1));
2077        }
2078
2079      tem = simplify_associative_operation (code, mode, op0, op1);
2080      if (tem)
2081	return tem;
2082      break;
2083
2084    case XOR:
2085      if (trueop1 == const0_rtx)
2086	return op0;
2087      if (GET_CODE (trueop1) == CONST_INT
2088	  && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2089	      == GET_MODE_MASK (mode)))
2090	return simplify_gen_unary (NOT, mode, op0, mode);
2091      if (rtx_equal_p (trueop0, trueop1)
2092	  && ! side_effects_p (op0)
2093	  && GET_MODE_CLASS (mode) != MODE_CC)
2094	 return CONST0_RTX (mode);
2095
2096      /* Canonicalize XOR of the most significant bit to PLUS.  */
2097      if ((GET_CODE (op1) == CONST_INT
2098	   || GET_CODE (op1) == CONST_DOUBLE)
2099	  && mode_signbit_p (mode, op1))
2100	return simplify_gen_binary (PLUS, mode, op0, op1);
2101      /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit.  */
2102      if ((GET_CODE (op1) == CONST_INT
2103	   || GET_CODE (op1) == CONST_DOUBLE)
2104	  && GET_CODE (op0) == PLUS
2105	  && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2106	      || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2107	  && mode_signbit_p (mode, XEXP (op0, 1)))
2108	return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2109				    simplify_gen_binary (XOR, mode, op1,
2110							 XEXP (op0, 1)));
2111
2112      /* If we are XORing two things that have no bits in common,
2113	 convert them into an IOR.  This helps to detect rotation encoded
2114	 using those methods and possibly other simplifications.  */
2115
2116      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2117	  && (nonzero_bits (op0, mode)
2118	      & nonzero_bits (op1, mode)) == 0)
2119	return (simplify_gen_binary (IOR, mode, op0, op1));
2120
2121      /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2122	 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2123	 (NOT y).  */
2124      {
2125	int num_negated = 0;
2126
2127	if (GET_CODE (op0) == NOT)
2128	  num_negated++, op0 = XEXP (op0, 0);
2129	if (GET_CODE (op1) == NOT)
2130	  num_negated++, op1 = XEXP (op1, 0);
2131
2132	if (num_negated == 2)
2133	  return simplify_gen_binary (XOR, mode, op0, op1);
2134	else if (num_negated == 1)
2135	  return simplify_gen_unary (NOT, mode,
2136				     simplify_gen_binary (XOR, mode, op0, op1),
2137				     mode);
2138      }
2139
2140      /* Convert (xor (and A B) B) to (and (not A) B).  The latter may
2141	 correspond to a machine insn or result in further simplifications
2142	 if B is a constant.  */
2143
2144      if (GET_CODE (op0) == AND
2145	  && rtx_equal_p (XEXP (op0, 1), op1)
2146	  && ! side_effects_p (op1))
2147	return simplify_gen_binary (AND, mode,
2148				    simplify_gen_unary (NOT, mode,
2149							XEXP (op0, 0), mode),
2150				    op1);
2151
2152      else if (GET_CODE (op0) == AND
2153	       && rtx_equal_p (XEXP (op0, 0), op1)
2154	       && ! side_effects_p (op1))
2155	return simplify_gen_binary (AND, mode,
2156				    simplify_gen_unary (NOT, mode,
2157							XEXP (op0, 1), mode),
2158				    op1);
2159
2160      /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2161	 comparison if STORE_FLAG_VALUE is 1.  */
2162      if (STORE_FLAG_VALUE == 1
2163	  && trueop1 == const1_rtx
2164	  && COMPARISON_P (op0)
2165	  && (reversed = reversed_comparison (op0, mode)))
2166	return reversed;
2167
2168      /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2169	 is (lt foo (const_int 0)), so we can perform the above
2170	 simplification if STORE_FLAG_VALUE is 1.  */
2171
2172      if (STORE_FLAG_VALUE == 1
2173	  && trueop1 == const1_rtx
2174	  && GET_CODE (op0) == LSHIFTRT
2175	  && GET_CODE (XEXP (op0, 1)) == CONST_INT
2176	  && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2177	return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2178
2179      /* (xor (comparison foo bar) (const_int sign-bit))
2180	 when STORE_FLAG_VALUE is the sign bit.  */
2181      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2182	  && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2183	      == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2184	  && trueop1 == const_true_rtx
2185	  && COMPARISON_P (op0)
2186	  && (reversed = reversed_comparison (op0, mode)))
2187	return reversed;
2188
2189      break;
2190
2191      tem = simplify_associative_operation (code, mode, op0, op1);
2192      if (tem)
2193	return tem;
2194      break;
2195
2196    case AND:
2197      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2198	return trueop1;
2199      /* If we are turning off bits already known off in OP0, we need
2200	 not do an AND.  */
2201      if (GET_CODE (trueop1) == CONST_INT
2202	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2203	  && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2204	return op0;
2205      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2206	  && GET_MODE_CLASS (mode) != MODE_CC)
2207	return op0;
2208      /* A & (~A) -> 0 */
2209      if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2210	   || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2211	  && ! side_effects_p (op0)
2212	  && GET_MODE_CLASS (mode) != MODE_CC)
2213	return CONST0_RTX (mode);
2214
2215      /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2216	 there are no nonzero bits of C outside of X's mode.  */
2217      if ((GET_CODE (op0) == SIGN_EXTEND
2218	   || GET_CODE (op0) == ZERO_EXTEND)
2219	  && GET_CODE (trueop1) == CONST_INT
2220	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221	  && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2222	      & INTVAL (trueop1)) == 0)
2223	{
2224	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2225	  tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2226				     gen_int_mode (INTVAL (trueop1),
2227						   imode));
2228	  return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2229	}
2230
2231      /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2232	 insn (and may simplify more).  */
2233      if (GET_CODE (op0) == XOR
2234	  && rtx_equal_p (XEXP (op0, 0), op1)
2235	  && ! side_effects_p (op1))
2236	return simplify_gen_binary (AND, mode,
2237				    simplify_gen_unary (NOT, mode,
2238							XEXP (op0, 1), mode),
2239				    op1);
2240
2241      if (GET_CODE (op0) == XOR
2242	  && rtx_equal_p (XEXP (op0, 1), op1)
2243	  && ! side_effects_p (op1))
2244	return simplify_gen_binary (AND, mode,
2245				    simplify_gen_unary (NOT, mode,
2246							XEXP (op0, 0), mode),
2247				    op1);
2248
2249      /* Similarly for (~(A ^ B)) & A.  */
2250      if (GET_CODE (op0) == NOT
2251	  && GET_CODE (XEXP (op0, 0)) == XOR
2252	  && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2253	  && ! side_effects_p (op1))
2254	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2255
2256      if (GET_CODE (op0) == NOT
2257	  && GET_CODE (XEXP (op0, 0)) == XOR
2258	  && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2259	  && ! side_effects_p (op1))
2260	return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2261
2262      /* Convert (A | B) & A to A.  */
2263      if (GET_CODE (op0) == IOR
2264	  && (rtx_equal_p (XEXP (op0, 0), op1)
2265	      || rtx_equal_p (XEXP (op0, 1), op1))
2266	  && ! side_effects_p (XEXP (op0, 0))
2267	  && ! side_effects_p (XEXP (op0, 1)))
2268	return op1;
2269
2270      /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2271	 ((A & N) + B) & M -> (A + B) & M
2272	 Similarly if (N & M) == 0,
2273	 ((A | N) + B) & M -> (A + B) & M
2274	 and for - instead of + and/or ^ instead of |.  */
2275      if (GET_CODE (trueop1) == CONST_INT
2276	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2277	  && ~INTVAL (trueop1)
2278	  && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2279	  && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2280	{
2281	  rtx pmop[2];
2282	  int which;
2283
2284	  pmop[0] = XEXP (op0, 0);
2285	  pmop[1] = XEXP (op0, 1);
2286
2287	  for (which = 0; which < 2; which++)
2288	    {
2289	      tem = pmop[which];
2290	      switch (GET_CODE (tem))
2291		{
2292		case AND:
2293		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2294		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2295		      == INTVAL (trueop1))
2296		    pmop[which] = XEXP (tem, 0);
2297		  break;
2298		case IOR:
2299		case XOR:
2300		  if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2301		      && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2302		    pmop[which] = XEXP (tem, 0);
2303		  break;
2304		default:
2305		  break;
2306		}
2307	    }
2308
2309	  if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2310	    {
2311	      tem = simplify_gen_binary (GET_CODE (op0), mode,
2312					 pmop[0], pmop[1]);
2313	      return simplify_gen_binary (code, mode, tem, op1);
2314	    }
2315	}
2316      tem = simplify_associative_operation (code, mode, op0, op1);
2317      if (tem)
2318	return tem;
2319      break;
2320
2321    case UDIV:
2322      /* 0/x is 0 (or x&0 if x has side-effects).  */
2323      if (trueop0 == CONST0_RTX (mode))
2324	{
2325	  if (side_effects_p (op1))
2326	    return simplify_gen_binary (AND, mode, op1, trueop0);
2327	  return trueop0;
2328	}
2329      /* x/1 is x.  */
2330      if (trueop1 == CONST1_RTX (mode))
2331	return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2332      /* Convert divide by power of two into shift.  */
2333      if (GET_CODE (trueop1) == CONST_INT
2334	  && (val = exact_log2 (INTVAL (trueop1))) > 0)
2335	return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2336      break;
2337
2338    case DIV:
2339      /* Handle floating point and integers separately.  */
2340      if (SCALAR_FLOAT_MODE_P (mode))
2341	{
2342	  /* Maybe change 0.0 / x to 0.0.  This transformation isn't
2343	     safe for modes with NaNs, since 0.0 / 0.0 will then be
2344	     NaN rather than 0.0.  Nor is it safe for modes with signed
2345	     zeros, since dividing 0 by a negative number gives -0.0  */
2346	  if (trueop0 == CONST0_RTX (mode)
2347	      && !HONOR_NANS (mode)
2348	      && !HONOR_SIGNED_ZEROS (mode)
2349	      && ! side_effects_p (op1))
2350	    return op0;
2351	  /* x/1.0 is x.  */
2352	  if (trueop1 == CONST1_RTX (mode)
2353	      && !HONOR_SNANS (mode))
2354	    return op0;
2355
2356	  if (GET_CODE (trueop1) == CONST_DOUBLE
2357	      && trueop1 != CONST0_RTX (mode))
2358	    {
2359	      REAL_VALUE_TYPE d;
2360	      REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2361
2362	      /* x/-1.0 is -x.  */
2363	      if (REAL_VALUES_EQUAL (d, dconstm1)
2364		  && !HONOR_SNANS (mode))
2365		return simplify_gen_unary (NEG, mode, op0, mode);
2366
2367	      /* Change FP division by a constant into multiplication.
2368		 Only do this with -funsafe-math-optimizations.  */
2369	      if (flag_unsafe_math_optimizations
2370		  && !REAL_VALUES_EQUAL (d, dconst0))
2371		{
2372		  REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2373		  tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2374		  return simplify_gen_binary (MULT, mode, op0, tem);
2375		}
2376	    }
2377	}
2378      else
2379	{
2380	  /* 0/x is 0 (or x&0 if x has side-effects).  */
2381	  if (trueop0 == CONST0_RTX (mode))
2382	    {
2383	      if (side_effects_p (op1))
2384		return simplify_gen_binary (AND, mode, op1, trueop0);
2385	      return trueop0;
2386	    }
2387	  /* x/1 is x.  */
2388	  if (trueop1 == CONST1_RTX (mode))
2389	    return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2390	  /* x/-1 is -x.  */
2391	  if (trueop1 == constm1_rtx)
2392	    {
2393	      rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2394	      return simplify_gen_unary (NEG, mode, x, mode);
2395	    }
2396	}
2397      break;
2398
2399    case UMOD:
2400      /* 0%x is 0 (or x&0 if x has side-effects).  */
2401      if (trueop0 == CONST0_RTX (mode))
2402	{
2403	  if (side_effects_p (op1))
2404	    return simplify_gen_binary (AND, mode, op1, trueop0);
2405	  return trueop0;
2406	}
2407      /* x%1 is 0 (of x&0 if x has side-effects).  */
2408      if (trueop1 == CONST1_RTX (mode))
2409	{
2410	  if (side_effects_p (op0))
2411	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2412	  return CONST0_RTX (mode);
2413	}
2414      /* Implement modulus by power of two as AND.  */
2415      if (GET_CODE (trueop1) == CONST_INT
2416	  && exact_log2 (INTVAL (trueop1)) > 0)
2417	return simplify_gen_binary (AND, mode, op0,
2418				    GEN_INT (INTVAL (op1) - 1));
2419      break;
2420
2421    case MOD:
2422      /* 0%x is 0 (or x&0 if x has side-effects).  */
2423      if (trueop0 == CONST0_RTX (mode))
2424	{
2425	  if (side_effects_p (op1))
2426	    return simplify_gen_binary (AND, mode, op1, trueop0);
2427	  return trueop0;
2428	}
2429      /* x%1 and x%-1 is 0 (or x&0 if x has side-effects).  */
2430      if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2431	{
2432	  if (side_effects_p (op0))
2433	    return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2434	  return CONST0_RTX (mode);
2435	}
2436      break;
2437
2438    case ROTATERT:
2439    case ROTATE:
2440    case ASHIFTRT:
2441      if (trueop1 == CONST0_RTX (mode))
2442	return op0;
2443      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2444	return op0;
2445      /* Rotating ~0 always results in ~0.  */
2446      if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2447	  && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2448	  && ! side_effects_p (op1))
2449	return op0;
2450      break;
2451
2452    case ASHIFT:
2453    case SS_ASHIFT:
2454      if (trueop1 == CONST0_RTX (mode))
2455	return op0;
2456      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2457	return op0;
2458      break;
2459
2460    case LSHIFTRT:
2461      if (trueop1 == CONST0_RTX (mode))
2462	return op0;
2463      if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2464	return op0;
2465      /* Optimize (lshiftrt (clz X) C) as (eq X 0).  */
2466      if (GET_CODE (op0) == CLZ
2467	  && GET_CODE (trueop1) == CONST_INT
2468	  && STORE_FLAG_VALUE == 1
2469	  && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2470	{
2471	  enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2472	  unsigned HOST_WIDE_INT zero_val = 0;
2473
2474	  if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2475	      && zero_val == GET_MODE_BITSIZE (imode)
2476	      && INTVAL (trueop1) == exact_log2 (zero_val))
2477	    return simplify_gen_relational (EQ, mode, imode,
2478					    XEXP (op0, 0), const0_rtx);
2479	}
2480      break;
2481
2482    case SMIN:
2483      if (width <= HOST_BITS_PER_WIDE_INT
2484	  && GET_CODE (trueop1) == CONST_INT
2485	  && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2486	  && ! side_effects_p (op0))
2487	return op1;
2488      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2489	return op0;
2490      tem = simplify_associative_operation (code, mode, op0, op1);
2491      if (tem)
2492	return tem;
2493      break;
2494
2495    case SMAX:
2496      if (width <= HOST_BITS_PER_WIDE_INT
2497	  && GET_CODE (trueop1) == CONST_INT
2498	  && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2499	      == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2500	  && ! side_effects_p (op0))
2501	return op1;
2502      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2503	return op0;
2504      tem = simplify_associative_operation (code, mode, op0, op1);
2505      if (tem)
2506	return tem;
2507      break;
2508
2509    case UMIN:
2510      if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2511	return op1;
2512      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2513	return op0;
2514      tem = simplify_associative_operation (code, mode, op0, op1);
2515      if (tem)
2516	return tem;
2517      break;
2518
2519    case UMAX:
2520      if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2521	return op1;
2522      if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2523	return op0;
2524      tem = simplify_associative_operation (code, mode, op0, op1);
2525      if (tem)
2526	return tem;
2527      break;
2528
2529    case SS_PLUS:
2530    case US_PLUS:
2531    case SS_MINUS:
2532    case US_MINUS:
2533      /* ??? There are simplifications that can be done.  */
2534      return 0;
2535
2536    case VEC_SELECT:
2537      if (!VECTOR_MODE_P (mode))
2538	{
2539	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2540	  gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2541	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2542	  gcc_assert (XVECLEN (trueop1, 0) == 1);
2543	  gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2544
2545	  if (GET_CODE (trueop0) == CONST_VECTOR)
2546	    return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2547						      (trueop1, 0, 0)));
2548	}
2549      else
2550	{
2551	  gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2552	  gcc_assert (GET_MODE_INNER (mode)
2553		      == GET_MODE_INNER (GET_MODE (trueop0)));
2554	  gcc_assert (GET_CODE (trueop1) == PARALLEL);
2555
2556	  if (GET_CODE (trueop0) == CONST_VECTOR)
2557	    {
2558	      int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2559	      unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2560	      rtvec v = rtvec_alloc (n_elts);
2561	      unsigned int i;
2562
2563	      gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2564	      for (i = 0; i < n_elts; i++)
2565		{
2566		  rtx x = XVECEXP (trueop1, 0, i);
2567
2568		  gcc_assert (GET_CODE (x) == CONST_INT);
2569		  RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2570						       INTVAL (x));
2571		}
2572
2573	      return gen_rtx_CONST_VECTOR (mode, v);
2574	    }
2575	}
2576
2577      if (XVECLEN (trueop1, 0) == 1
2578	  && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2579	  && GET_CODE (trueop0) == VEC_CONCAT)
2580	{
2581	  rtx vec = trueop0;
2582	  int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2583
2584	  /* Try to find the element in the VEC_CONCAT.  */
2585	  while (GET_MODE (vec) != mode
2586		 && GET_CODE (vec) == VEC_CONCAT)
2587	    {
2588	      HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2589	      if (offset < vec_size)
2590		vec = XEXP (vec, 0);
2591	      else
2592		{
2593		  offset -= vec_size;
2594		  vec = XEXP (vec, 1);
2595		}
2596	      vec = avoid_constant_pool_reference (vec);
2597	    }
2598
2599	  if (GET_MODE (vec) == mode)
2600	    return vec;
2601	}
2602
2603      return 0;
2604    case VEC_CONCAT:
2605      {
2606	enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2607				      ? GET_MODE (trueop0)
2608				      : GET_MODE_INNER (mode));
2609	enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2610				      ? GET_MODE (trueop1)
2611				      : GET_MODE_INNER (mode));
2612
2613	gcc_assert (VECTOR_MODE_P (mode));
2614	gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2615		    == GET_MODE_SIZE (mode));
2616
2617	if (VECTOR_MODE_P (op0_mode))
2618	  gcc_assert (GET_MODE_INNER (mode)
2619		      == GET_MODE_INNER (op0_mode));
2620	else
2621	  gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2622
2623	if (VECTOR_MODE_P (op1_mode))
2624	  gcc_assert (GET_MODE_INNER (mode)
2625		      == GET_MODE_INNER (op1_mode));
2626	else
2627	  gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2628
2629	if ((GET_CODE (trueop0) == CONST_VECTOR
2630	     || GET_CODE (trueop0) == CONST_INT
2631	     || GET_CODE (trueop0) == CONST_DOUBLE)
2632	    && (GET_CODE (trueop1) == CONST_VECTOR
2633		|| GET_CODE (trueop1) == CONST_INT
2634		|| GET_CODE (trueop1) == CONST_DOUBLE))
2635	  {
2636	    int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2637	    unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2638	    rtvec v = rtvec_alloc (n_elts);
2639	    unsigned int i;
2640	    unsigned in_n_elts = 1;
2641
2642	    if (VECTOR_MODE_P (op0_mode))
2643	      in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2644	    for (i = 0; i < n_elts; i++)
2645	      {
2646		if (i < in_n_elts)
2647		  {
2648		    if (!VECTOR_MODE_P (op0_mode))
2649		      RTVEC_ELT (v, i) = trueop0;
2650		    else
2651		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2652		  }
2653		else
2654		  {
2655		    if (!VECTOR_MODE_P (op1_mode))
2656		      RTVEC_ELT (v, i) = trueop1;
2657		    else
2658		      RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2659							   i - in_n_elts);
2660		  }
2661	      }
2662
2663	    return gen_rtx_CONST_VECTOR (mode, v);
2664	  }
2665      }
2666      return 0;
2667
2668    default:
2669      gcc_unreachable ();
2670    }
2671
2672  return 0;
2673}
2674
2675rtx
2676simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2677				 rtx op0, rtx op1)
2678{
2679  HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2680  HOST_WIDE_INT val;
2681  unsigned int width = GET_MODE_BITSIZE (mode);
2682
2683  if (VECTOR_MODE_P (mode)
2684      && code != VEC_CONCAT
2685      && GET_CODE (op0) == CONST_VECTOR
2686      && GET_CODE (op1) == CONST_VECTOR)
2687    {
2688      unsigned n_elts = GET_MODE_NUNITS (mode);
2689      enum machine_mode op0mode = GET_MODE (op0);
2690      unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2691      enum machine_mode op1mode = GET_MODE (op1);
2692      unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2693      rtvec v = rtvec_alloc (n_elts);
2694      unsigned int i;
2695
2696      gcc_assert (op0_n_elts == n_elts);
2697      gcc_assert (op1_n_elts == n_elts);
2698      for (i = 0; i < n_elts; i++)
2699	{
2700	  rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2701					     CONST_VECTOR_ELT (op0, i),
2702					     CONST_VECTOR_ELT (op1, i));
2703	  if (!x)
2704	    return 0;
2705	  RTVEC_ELT (v, i) = x;
2706	}
2707
2708      return gen_rtx_CONST_VECTOR (mode, v);
2709    }
2710
2711  if (VECTOR_MODE_P (mode)
2712      && code == VEC_CONCAT
2713      && CONSTANT_P (op0) && CONSTANT_P (op1))
2714    {
2715      unsigned n_elts = GET_MODE_NUNITS (mode);
2716      rtvec v = rtvec_alloc (n_elts);
2717
2718      gcc_assert (n_elts >= 2);
2719      if (n_elts == 2)
2720	{
2721	  gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2722	  gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2723
2724	  RTVEC_ELT (v, 0) = op0;
2725	  RTVEC_ELT (v, 1) = op1;
2726	}
2727      else
2728	{
2729	  unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2730	  unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2731	  unsigned i;
2732
2733	  gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2734	  gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2735	  gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2736
2737	  for (i = 0; i < op0_n_elts; ++i)
2738	    RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2739	  for (i = 0; i < op1_n_elts; ++i)
2740	    RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2741	}
2742
2743      return gen_rtx_CONST_VECTOR (mode, v);
2744    }
2745
2746  if (SCALAR_FLOAT_MODE_P (mode)
2747      && GET_CODE (op0) == CONST_DOUBLE
2748      && GET_CODE (op1) == CONST_DOUBLE
2749      && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2750    {
2751      if (code == AND
2752	  || code == IOR
2753	  || code == XOR)
2754	{
2755	  long tmp0[4];
2756	  long tmp1[4];
2757	  REAL_VALUE_TYPE r;
2758	  int i;
2759
2760	  real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2761			  GET_MODE (op0));
2762	  real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2763			  GET_MODE (op1));
2764	  for (i = 0; i < 4; i++)
2765	    {
2766	      switch (code)
2767	      {
2768	      case AND:
2769		tmp0[i] &= tmp1[i];
2770		break;
2771	      case IOR:
2772		tmp0[i] |= tmp1[i];
2773		break;
2774	      case XOR:
2775		tmp0[i] ^= tmp1[i];
2776		break;
2777	      default:
2778		gcc_unreachable ();
2779	      }
2780	    }
2781	   real_from_target (&r, tmp0, mode);
2782	   return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2783	}
2784      else
2785	{
2786	  REAL_VALUE_TYPE f0, f1, value, result;
2787	  bool inexact;
2788
2789	  REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2790	  REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2791	  real_convert (&f0, mode, &f0);
2792	  real_convert (&f1, mode, &f1);
2793
2794	  if (HONOR_SNANS (mode)
2795	      && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2796	    return 0;
2797
2798	  if (code == DIV
2799	      && REAL_VALUES_EQUAL (f1, dconst0)
2800	      && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2801	    return 0;
2802
2803	  if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2804	      && flag_trapping_math
2805	      && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2806	    {
2807	      int s0 = REAL_VALUE_NEGATIVE (f0);
2808	      int s1 = REAL_VALUE_NEGATIVE (f1);
2809
2810	      switch (code)
2811		{
2812		case PLUS:
2813		  /* Inf + -Inf = NaN plus exception.  */
2814		  if (s0 != s1)
2815		    return 0;
2816		  break;
2817		case MINUS:
2818		  /* Inf - Inf = NaN plus exception.  */
2819		  if (s0 == s1)
2820		    return 0;
2821		  break;
2822		case DIV:
2823		  /* Inf / Inf = NaN plus exception.  */
2824		  return 0;
2825		default:
2826		  break;
2827		}
2828	    }
2829
2830	  if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2831	      && flag_trapping_math
2832	      && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2833		  || (REAL_VALUE_ISINF (f1)
2834		      && REAL_VALUES_EQUAL (f0, dconst0))))
2835	    /* Inf * 0 = NaN plus exception.  */
2836	    return 0;
2837
2838	  inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2839				     &f0, &f1);
2840	  real_convert (&result, mode, &value);
2841
2842	  /* Don't constant fold this floating point operation if
2843	     the result has overflowed and flag_trapping_math.  */
2844
2845	  if (flag_trapping_math
2846	      && MODE_HAS_INFINITIES (mode)
2847	      && REAL_VALUE_ISINF (result)
2848	      && !REAL_VALUE_ISINF (f0)
2849	      && !REAL_VALUE_ISINF (f1))
2850	    /* Overflow plus exception.  */
2851	    return 0;
2852
2853	  /* Don't constant fold this floating point operation if the
2854	     result may dependent upon the run-time rounding mode and
2855	     flag_rounding_math is set, or if GCC's software emulation
2856	     is unable to accurately represent the result.  */
2857
2858	  if ((flag_rounding_math
2859	       || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2860		   && !flag_unsafe_math_optimizations))
2861	      && (inexact || !real_identical (&result, &value)))
2862	    return NULL_RTX;
2863
2864	  return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2865	}
2866    }
2867
2868  /* We can fold some multi-word operations.  */
2869  if (GET_MODE_CLASS (mode) == MODE_INT
2870      && width == HOST_BITS_PER_WIDE_INT * 2
2871      && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2872      && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2873    {
2874      unsigned HOST_WIDE_INT l1, l2, lv, lt;
2875      HOST_WIDE_INT h1, h2, hv, ht;
2876
2877      if (GET_CODE (op0) == CONST_DOUBLE)
2878	l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2879      else
2880	l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2881
2882      if (GET_CODE (op1) == CONST_DOUBLE)
2883	l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2884      else
2885	l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2886
2887      switch (code)
2888	{
2889	case MINUS:
2890	  /* A - B == A + (-B).  */
2891	  neg_double (l2, h2, &lv, &hv);
2892	  l2 = lv, h2 = hv;
2893
2894	  /* Fall through....  */
2895
2896	case PLUS:
2897	  add_double (l1, h1, l2, h2, &lv, &hv);
2898	  break;
2899
2900	case MULT:
2901	  mul_double (l1, h1, l2, h2, &lv, &hv);
2902	  break;
2903
2904	case DIV:
2905	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2906				    &lv, &hv, &lt, &ht))
2907	    return 0;
2908	  break;
2909
2910	case MOD:
2911	  if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2912				    &lt, &ht, &lv, &hv))
2913	    return 0;
2914	  break;
2915
2916	case UDIV:
2917	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2918				    &lv, &hv, &lt, &ht))
2919	    return 0;
2920	  break;
2921
2922	case UMOD:
2923	  if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2924				    &lt, &ht, &lv, &hv))
2925	    return 0;
2926	  break;
2927
2928	case AND:
2929	  lv = l1 & l2, hv = h1 & h2;
2930	  break;
2931
2932	case IOR:
2933	  lv = l1 | l2, hv = h1 | h2;
2934	  break;
2935
2936	case XOR:
2937	  lv = l1 ^ l2, hv = h1 ^ h2;
2938	  break;
2939
2940	case SMIN:
2941	  if (h1 < h2
2942	      || (h1 == h2
2943		  && ((unsigned HOST_WIDE_INT) l1
2944		      < (unsigned HOST_WIDE_INT) l2)))
2945	    lv = l1, hv = h1;
2946	  else
2947	    lv = l2, hv = h2;
2948	  break;
2949
2950	case SMAX:
2951	  if (h1 > h2
2952	      || (h1 == h2
2953		  && ((unsigned HOST_WIDE_INT) l1
2954		      > (unsigned HOST_WIDE_INT) l2)))
2955	    lv = l1, hv = h1;
2956	  else
2957	    lv = l2, hv = h2;
2958	  break;
2959
2960	case UMIN:
2961	  if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2962	      || (h1 == h2
2963		  && ((unsigned HOST_WIDE_INT) l1
2964		      < (unsigned HOST_WIDE_INT) l2)))
2965	    lv = l1, hv = h1;
2966	  else
2967	    lv = l2, hv = h2;
2968	  break;
2969
2970	case UMAX:
2971	  if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2972	      || (h1 == h2
2973		  && ((unsigned HOST_WIDE_INT) l1
2974		      > (unsigned HOST_WIDE_INT) l2)))
2975	    lv = l1, hv = h1;
2976	  else
2977	    lv = l2, hv = h2;
2978	  break;
2979
2980	case LSHIFTRT:   case ASHIFTRT:
2981	case ASHIFT:
2982	case ROTATE:     case ROTATERT:
2983	  if (SHIFT_COUNT_TRUNCATED)
2984	    l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2985
2986	  if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2987	    return 0;
2988
2989	  if (code == LSHIFTRT || code == ASHIFTRT)
2990	    rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2991			   code == ASHIFTRT);
2992	  else if (code == ASHIFT)
2993	    lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2994	  else if (code == ROTATE)
2995	    lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2996	  else /* code == ROTATERT */
2997	    rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2998	  break;
2999
3000	default:
3001	  return 0;
3002	}
3003
3004      return immed_double_const (lv, hv, mode);
3005    }
3006
3007  if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3008      && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3009    {
3010      /* Get the integer argument values in two forms:
3011         zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S.  */
3012
3013      arg0 = INTVAL (op0);
3014      arg1 = INTVAL (op1);
3015
3016      if (width < HOST_BITS_PER_WIDE_INT)
3017        {
3018          arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019          arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3020
3021          arg0s = arg0;
3022          if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3023	    arg0s |= ((HOST_WIDE_INT) (-1) << width);
3024
3025	  arg1s = arg1;
3026	  if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3027	    arg1s |= ((HOST_WIDE_INT) (-1) << width);
3028	}
3029      else
3030	{
3031	  arg0s = arg0;
3032	  arg1s = arg1;
3033	}
3034
3035      /* Compute the value of the arithmetic.  */
3036
3037      switch (code)
3038	{
3039	case PLUS:
3040	  val = arg0s + arg1s;
3041	  break;
3042
3043	case MINUS:
3044	  val = arg0s - arg1s;
3045	  break;
3046
3047	case MULT:
3048	  val = arg0s * arg1s;
3049	  break;
3050
3051	case DIV:
3052	  if (arg1s == 0
3053	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3054		  && arg1s == -1))
3055	    return 0;
3056	  val = arg0s / arg1s;
3057	  break;
3058
3059	case MOD:
3060	  if (arg1s == 0
3061	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3062		  && arg1s == -1))
3063	    return 0;
3064	  val = arg0s % arg1s;
3065	  break;
3066
3067	case UDIV:
3068	  if (arg1 == 0
3069	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3070		  && arg1s == -1))
3071	    return 0;
3072	  val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3073	  break;
3074
3075	case UMOD:
3076	  if (arg1 == 0
3077	      || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3078		  && arg1s == -1))
3079	    return 0;
3080	  val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3081	  break;
3082
3083	case AND:
3084	  val = arg0 & arg1;
3085	  break;
3086
3087	case IOR:
3088	  val = arg0 | arg1;
3089	  break;
3090
3091	case XOR:
3092	  val = arg0 ^ arg1;
3093	  break;
3094
3095	case LSHIFTRT:
3096	case ASHIFT:
3097	case ASHIFTRT:
3098	  /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3099	     the value is in range.  We can't return any old value for
3100	     out-of-range arguments because either the middle-end (via
3101	     shift_truncation_mask) or the back-end might be relying on
3102	     target-specific knowledge.  Nor can we rely on
3103	     shift_truncation_mask, since the shift might not be part of an
3104	     ashlM3, lshrM3 or ashrM3 instruction.  */
3105	  if (SHIFT_COUNT_TRUNCATED)
3106	    arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3107	  else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3108	    return 0;
3109
3110	  val = (code == ASHIFT
3111		 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3112		 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3113
3114	  /* Sign-extend the result for arithmetic right shifts.  */
3115	  if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3116	    val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3117	  break;
3118
3119	case ROTATERT:
3120	  if (arg1 < 0)
3121	    return 0;
3122
3123	  arg1 %= width;
3124	  val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3125		 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3126	  break;
3127
3128	case ROTATE:
3129	  if (arg1 < 0)
3130	    return 0;
3131
3132	  arg1 %= width;
3133	  val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3134		 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3135	  break;
3136
3137	case COMPARE:
3138	  /* Do nothing here.  */
3139	  return 0;
3140
3141	case SMIN:
3142	  val = arg0s <= arg1s ? arg0s : arg1s;
3143	  break;
3144
3145	case UMIN:
3146	  val = ((unsigned HOST_WIDE_INT) arg0
3147		 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3148	  break;
3149
3150	case SMAX:
3151	  val = arg0s > arg1s ? arg0s : arg1s;
3152	  break;
3153
3154	case UMAX:
3155	  val = ((unsigned HOST_WIDE_INT) arg0
3156		 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3157	  break;
3158
3159	case SS_PLUS:
3160	case US_PLUS:
3161	case SS_MINUS:
3162	case US_MINUS:
3163	case SS_ASHIFT:
3164	  /* ??? There are simplifications that can be done.  */
3165	  return 0;
3166
3167	default:
3168	  gcc_unreachable ();
3169	}
3170
3171      return gen_int_mode (val, mode);
3172    }
3173
3174  return NULL_RTX;
3175}
3176
3177
3178
3179/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3180   PLUS or MINUS.
3181
3182   Rather than test for specific case, we do this by a brute-force method
3183   and do all possible simplifications until no more changes occur.  Then
3184   we rebuild the operation.  */
3185
3186struct simplify_plus_minus_op_data
3187{
3188  rtx op;
3189  short neg;
3190};
3191
3192static int
3193simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3194{
3195  const struct simplify_plus_minus_op_data *d1 = p1;
3196  const struct simplify_plus_minus_op_data *d2 = p2;
3197  int result;
3198
3199  result = (commutative_operand_precedence (d2->op)
3200	    - commutative_operand_precedence (d1->op));
3201  if (result)
3202    return result;
3203
3204  /* Group together equal REGs to do more simplification.  */
3205  if (REG_P (d1->op) && REG_P (d2->op))
3206    return REGNO (d1->op) - REGNO (d2->op);
3207  else
3208    return 0;
3209}
3210
3211static rtx
3212simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3213		     rtx op1)
3214{
3215  struct simplify_plus_minus_op_data ops[8];
3216  rtx result, tem;
3217  int n_ops = 2, input_ops = 2;
3218  int changed, n_constants = 0, canonicalized = 0;
3219  int i, j;
3220
3221  memset (ops, 0, sizeof ops);
3222
3223  /* Set up the two operands and then expand them until nothing has been
3224     changed.  If we run out of room in our array, give up; this should
3225     almost never happen.  */
3226
3227  ops[0].op = op0;
3228  ops[0].neg = 0;
3229  ops[1].op = op1;
3230  ops[1].neg = (code == MINUS);
3231
3232  do
3233    {
3234      changed = 0;
3235
3236      for (i = 0; i < n_ops; i++)
3237	{
3238	  rtx this_op = ops[i].op;
3239	  int this_neg = ops[i].neg;
3240	  enum rtx_code this_code = GET_CODE (this_op);
3241
3242	  switch (this_code)
3243	    {
3244	    case PLUS:
3245	    case MINUS:
3246	      if (n_ops == 7)
3247		return NULL_RTX;
3248
3249	      ops[n_ops].op = XEXP (this_op, 1);
3250	      ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3251	      n_ops++;
3252
3253	      ops[i].op = XEXP (this_op, 0);
3254	      input_ops++;
3255	      changed = 1;
3256	      canonicalized |= this_neg;
3257	      break;
3258
3259	    case NEG:
3260	      ops[i].op = XEXP (this_op, 0);
3261	      ops[i].neg = ! this_neg;
3262	      changed = 1;
3263	      canonicalized = 1;
3264	      break;
3265
3266	    case CONST:
3267	      if (n_ops < 7
3268		  && GET_CODE (XEXP (this_op, 0)) == PLUS
3269		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3270		  && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3271		{
3272		  ops[i].op = XEXP (XEXP (this_op, 0), 0);
3273		  ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3274		  ops[n_ops].neg = this_neg;
3275		  n_ops++;
3276		  changed = 1;
3277	          canonicalized = 1;
3278		}
3279	      break;
3280
3281	    case NOT:
3282	      /* ~a -> (-a - 1) */
3283	      if (n_ops != 7)
3284		{
3285		  ops[n_ops].op = constm1_rtx;
3286		  ops[n_ops++].neg = this_neg;
3287		  ops[i].op = XEXP (this_op, 0);
3288		  ops[i].neg = !this_neg;
3289		  changed = 1;
3290	          canonicalized = 1;
3291		}
3292	      break;
3293
3294	    case CONST_INT:
3295	      n_constants++;
3296	      if (this_neg)
3297		{
3298		  ops[i].op = neg_const_int (mode, this_op);
3299		  ops[i].neg = 0;
3300		  changed = 1;
3301	          canonicalized = 1;
3302		}
3303	      break;
3304
3305	    default:
3306	      break;
3307	    }
3308	}
3309    }
3310  while (changed);
3311
3312  if (n_constants > 1)
3313    canonicalized = 1;
3314
3315  gcc_assert (n_ops >= 2);
3316
3317  /* If we only have two operands, we can avoid the loops.  */
3318  if (n_ops == 2)
3319    {
3320      enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3321      rtx lhs, rhs;
3322
3323      /* Get the two operands.  Be careful with the order, especially for
3324	 the cases where code == MINUS.  */
3325      if (ops[0].neg && ops[1].neg)
3326	{
3327	  lhs = gen_rtx_NEG (mode, ops[0].op);
3328	  rhs = ops[1].op;
3329	}
3330      else if (ops[0].neg)
3331	{
3332	  lhs = ops[1].op;
3333	  rhs = ops[0].op;
3334	}
3335      else
3336	{
3337	  lhs = ops[0].op;
3338	  rhs = ops[1].op;
3339	}
3340
3341      return simplify_const_binary_operation (code, mode, lhs, rhs);
3342    }
3343
3344  /* Now simplify each pair of operands until nothing changes.  */
3345  do
3346    {
3347      /* Insertion sort is good enough for an eight-element array.  */
3348      for (i = 1; i < n_ops; i++)
3349        {
3350          struct simplify_plus_minus_op_data save;
3351          j = i - 1;
3352          if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3353	    continue;
3354
3355          canonicalized = 1;
3356          save = ops[i];
3357          do
3358	    ops[j + 1] = ops[j];
3359          while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3360          ops[j + 1] = save;
3361        }
3362
3363      /* This is only useful the first time through.  */
3364      if (!canonicalized)
3365        return NULL_RTX;
3366
3367      changed = 0;
3368      for (i = n_ops - 1; i > 0; i--)
3369	for (j = i - 1; j >= 0; j--)
3370	  {
3371	    rtx lhs = ops[j].op, rhs = ops[i].op;
3372	    int lneg = ops[j].neg, rneg = ops[i].neg;
3373
3374	    if (lhs != 0 && rhs != 0)
3375	      {
3376		enum rtx_code ncode = PLUS;
3377
3378		if (lneg != rneg)
3379		  {
3380		    ncode = MINUS;
3381		    if (lneg)
3382		      tem = lhs, lhs = rhs, rhs = tem;
3383		  }
3384		else if (swap_commutative_operands_p (lhs, rhs))
3385		  tem = lhs, lhs = rhs, rhs = tem;
3386
3387		if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3388		    && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3389		  {
3390		    rtx tem_lhs, tem_rhs;
3391
3392		    tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3393		    tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3394		    tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3395
3396		    if (tem && !CONSTANT_P (tem))
3397		      tem = gen_rtx_CONST (GET_MODE (tem), tem);
3398		  }
3399		else
3400		  tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3401
3402		/* Reject "simplifications" that just wrap the two
3403		   arguments in a CONST.  Failure to do so can result
3404		   in infinite recursion with simplify_binary_operation
3405		   when it calls us to simplify CONST operations.  */
3406		if (tem
3407		    && ! (GET_CODE (tem) == CONST
3408			  && GET_CODE (XEXP (tem, 0)) == ncode
3409			  && XEXP (XEXP (tem, 0), 0) == lhs
3410			  && XEXP (XEXP (tem, 0), 1) == rhs))
3411		  {
3412		    lneg &= rneg;
3413		    if (GET_CODE (tem) == NEG)
3414		      tem = XEXP (tem, 0), lneg = !lneg;
3415		    if (GET_CODE (tem) == CONST_INT && lneg)
3416		      tem = neg_const_int (mode, tem), lneg = 0;
3417
3418		    ops[i].op = tem;
3419		    ops[i].neg = lneg;
3420		    ops[j].op = NULL_RTX;
3421		    changed = 1;
3422		  }
3423	      }
3424	  }
3425
3426      /* Pack all the operands to the lower-numbered entries.  */
3427      for (i = 0, j = 0; j < n_ops; j++)
3428        if (ops[j].op)
3429          {
3430	    ops[i] = ops[j];
3431	    i++;
3432          }
3433      n_ops = i;
3434    }
3435  while (changed);
3436
3437  /* Create (minus -C X) instead of (neg (const (plus X C))).  */
3438  if (n_ops == 2
3439      && GET_CODE (ops[1].op) == CONST_INT
3440      && CONSTANT_P (ops[0].op)
3441      && ops[0].neg)
3442    return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3443
3444  /* We suppressed creation of trivial CONST expressions in the
3445     combination loop to avoid recursion.  Create one manually now.
3446     The combination loop should have ensured that there is exactly
3447     one CONST_INT, and the sort will have ensured that it is last
3448     in the array and that any other constant will be next-to-last.  */
3449
3450  if (n_ops > 1
3451      && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3452      && CONSTANT_P (ops[n_ops - 2].op))
3453    {
3454      rtx value = ops[n_ops - 1].op;
3455      if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3456	value = neg_const_int (mode, value);
3457      ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3458      n_ops--;
3459    }
3460
3461  /* Put a non-negated operand first, if possible.  */
3462
3463  for (i = 0; i < n_ops && ops[i].neg; i++)
3464    continue;
3465  if (i == n_ops)
3466    ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3467  else if (i != 0)
3468    {
3469      tem = ops[0].op;
3470      ops[0] = ops[i];
3471      ops[i].op = tem;
3472      ops[i].neg = 1;
3473    }
3474
3475  /* Now make the result by performing the requested operations.  */
3476  result = ops[0].op;
3477  for (i = 1; i < n_ops; i++)
3478    result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3479			     mode, result, ops[i].op);
3480
3481  return result;
3482}
3483
3484/* Check whether an operand is suitable for calling simplify_plus_minus.  */
3485static bool
3486plus_minus_operand_p (rtx x)
3487{
3488  return GET_CODE (x) == PLUS
3489         || GET_CODE (x) == MINUS
3490	 || (GET_CODE (x) == CONST
3491	     && GET_CODE (XEXP (x, 0)) == PLUS
3492	     && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3493	     && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3494}
3495
3496/* Like simplify_binary_operation except used for relational operators.
3497   MODE is the mode of the result. If MODE is VOIDmode, both operands must
3498   not also be VOIDmode.
3499
3500   CMP_MODE specifies in which mode the comparison is done in, so it is
3501   the mode of the operands.  If CMP_MODE is VOIDmode, it is taken from
3502   the operands or, if both are VOIDmode, the operands are compared in
3503   "infinite precision".  */
3504rtx
3505simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3506			       enum machine_mode cmp_mode, rtx op0, rtx op1)
3507{
3508  rtx tem, trueop0, trueop1;
3509
3510  if (cmp_mode == VOIDmode)
3511    cmp_mode = GET_MODE (op0);
3512  if (cmp_mode == VOIDmode)
3513    cmp_mode = GET_MODE (op1);
3514
3515  tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3516  if (tem)
3517    {
3518      if (SCALAR_FLOAT_MODE_P (mode))
3519	{
3520          if (tem == const0_rtx)
3521            return CONST0_RTX (mode);
3522#ifdef FLOAT_STORE_FLAG_VALUE
3523	  {
3524	    REAL_VALUE_TYPE val;
3525	    val = FLOAT_STORE_FLAG_VALUE (mode);
3526	    return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3527	  }
3528#else
3529	  return NULL_RTX;
3530#endif
3531	}
3532      if (VECTOR_MODE_P (mode))
3533	{
3534	  if (tem == const0_rtx)
3535	    return CONST0_RTX (mode);
3536#ifdef VECTOR_STORE_FLAG_VALUE
3537	  {
3538	    int i, units;
3539	    rtvec v;
3540
3541	    rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3542	    if (val == NULL_RTX)
3543	      return NULL_RTX;
3544	    if (val == const1_rtx)
3545	      return CONST1_RTX (mode);
3546
3547	    units = GET_MODE_NUNITS (mode);
3548	    v = rtvec_alloc (units);
3549	    for (i = 0; i < units; i++)
3550	      RTVEC_ELT (v, i) = val;
3551	    return gen_rtx_raw_CONST_VECTOR (mode, v);
3552	  }
3553#else
3554	  return NULL_RTX;
3555#endif
3556	}
3557
3558      return tem;
3559    }
3560
3561  /* For the following tests, ensure const0_rtx is op1.  */
3562  if (swap_commutative_operands_p (op0, op1)
3563      || (op0 == const0_rtx && op1 != const0_rtx))
3564    tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3565
3566  /* If op0 is a compare, extract the comparison arguments from it.  */
3567  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3568    return simplify_relational_operation (code, mode, VOIDmode,
3569				          XEXP (op0, 0), XEXP (op0, 1));
3570
3571  if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3572      || CC0_P (op0))
3573    return NULL_RTX;
3574
3575  trueop0 = avoid_constant_pool_reference (op0);
3576  trueop1 = avoid_constant_pool_reference (op1);
3577  return simplify_relational_operation_1 (code, mode, cmp_mode,
3578		  			  trueop0, trueop1);
3579}
3580
3581/* This part of simplify_relational_operation is only used when CMP_MODE
3582   is not in class MODE_CC (i.e. it is a real comparison).
3583
3584   MODE is the mode of the result, while CMP_MODE specifies in which
3585   mode the comparison is done in, so it is the mode of the operands.  */
3586
3587static rtx
3588simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3589				 enum machine_mode cmp_mode, rtx op0, rtx op1)
3590{
3591  enum rtx_code op0code = GET_CODE (op0);
3592
3593  if (GET_CODE (op1) == CONST_INT)
3594    {
3595      if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3596	{
3597	  /* If op0 is a comparison, extract the comparison arguments
3598	     from it.  */
3599	  if (code == NE)
3600	    {
3601	      if (GET_MODE (op0) == mode)
3602		return simplify_rtx (op0);
3603	      else
3604		return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3605					        XEXP (op0, 0), XEXP (op0, 1));
3606	    }
3607	  else if (code == EQ)
3608	    {
3609	      enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3610	      if (new_code != UNKNOWN)
3611	        return simplify_gen_relational (new_code, mode, VOIDmode,
3612					        XEXP (op0, 0), XEXP (op0, 1));
3613	    }
3614	}
3615    }
3616
3617  /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1))  */
3618  if ((code == EQ || code == NE)
3619      && (op0code == PLUS || op0code == MINUS)
3620      && CONSTANT_P (op1)
3621      && CONSTANT_P (XEXP (op0, 1))
3622      && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3623    {
3624      rtx x = XEXP (op0, 0);
3625      rtx c = XEXP (op0, 1);
3626
3627      c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3628			       cmp_mode, op1, c);
3629      return simplify_gen_relational (code, mode, cmp_mode, x, c);
3630    }
3631
3632  /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3633     the same as (zero_extract:SI FOO (const_int 1) BAR).  */
3634  if (code == NE
3635      && op1 == const0_rtx
3636      && GET_MODE_CLASS (mode) == MODE_INT
3637      && cmp_mode != VOIDmode
3638      /* ??? Work-around BImode bugs in the ia64 backend.  */
3639      && mode != BImode
3640      && cmp_mode != BImode
3641      && nonzero_bits (op0, cmp_mode) == 1
3642      && STORE_FLAG_VALUE == 1)
3643    return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3644	   ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3645	   : lowpart_subreg (mode, op0, cmp_mode);
3646
3647  /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y).  */
3648  if ((code == EQ || code == NE)
3649      && op1 == const0_rtx
3650      && op0code == XOR)
3651    return simplify_gen_relational (code, mode, cmp_mode,
3652				    XEXP (op0, 0), XEXP (op0, 1));
3653
3654  /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0).  */
3655  if ((code == EQ || code == NE)
3656      && op0code == XOR
3657      && rtx_equal_p (XEXP (op0, 0), op1)
3658      && !side_effects_p (XEXP (op0, 0)))
3659    return simplify_gen_relational (code, mode, cmp_mode,
3660				    XEXP (op0, 1), const0_rtx);
3661
3662  /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0).  */
3663  if ((code == EQ || code == NE)
3664      && op0code == XOR
3665      && rtx_equal_p (XEXP (op0, 1), op1)
3666      && !side_effects_p (XEXP (op0, 1)))
3667    return simplify_gen_relational (code, mode, cmp_mode,
3668				    XEXP (op0, 0), const0_rtx);
3669
3670  /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)).  */
3671  if ((code == EQ || code == NE)
3672      && op0code == XOR
3673      && (GET_CODE (op1) == CONST_INT
3674	  || GET_CODE (op1) == CONST_DOUBLE)
3675      && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3676	  || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3677    return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3678				    simplify_gen_binary (XOR, cmp_mode,
3679							 XEXP (op0, 1), op1));
3680
3681  return NULL_RTX;
3682}
3683
3684/* Check if the given comparison (done in the given MODE) is actually a
3685   tautology or a contradiction.
3686   If no simplification is possible, this function returns zero.
3687   Otherwise, it returns either const_true_rtx or const0_rtx.  */
3688
3689rtx
3690simplify_const_relational_operation (enum rtx_code code,
3691				     enum machine_mode mode,
3692				     rtx op0, rtx op1)
3693{
3694  int equal, op0lt, op0ltu, op1lt, op1ltu;
3695  rtx tem;
3696  rtx trueop0;
3697  rtx trueop1;
3698
3699  gcc_assert (mode != VOIDmode
3700	      || (GET_MODE (op0) == VOIDmode
3701		  && GET_MODE (op1) == VOIDmode));
3702
3703  /* If op0 is a compare, extract the comparison arguments from it.  */
3704  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3705    {
3706      op1 = XEXP (op0, 1);
3707      op0 = XEXP (op0, 0);
3708
3709      if (GET_MODE (op0) != VOIDmode)
3710	mode = GET_MODE (op0);
3711      else if (GET_MODE (op1) != VOIDmode)
3712	mode = GET_MODE (op1);
3713      else
3714	return 0;
3715    }
3716
3717  /* We can't simplify MODE_CC values since we don't know what the
3718     actual comparison is.  */
3719  if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3720    return 0;
3721
3722  /* Make sure the constant is second.  */
3723  if (swap_commutative_operands_p (op0, op1))
3724    {
3725      tem = op0, op0 = op1, op1 = tem;
3726      code = swap_condition (code);
3727    }
3728
3729  trueop0 = avoid_constant_pool_reference (op0);
3730  trueop1 = avoid_constant_pool_reference (op1);
3731
3732  /* For integer comparisons of A and B maybe we can simplify A - B and can
3733     then simplify a comparison of that with zero.  If A and B are both either
3734     a register or a CONST_INT, this can't help; testing for these cases will
3735     prevent infinite recursion here and speed things up.
3736
3737     We can only do this for EQ and NE comparisons as otherwise we may
3738     lose or introduce overflow which we cannot disregard as undefined as
3739     we do not know the signedness of the operation on either the left or
3740     the right hand side of the comparison.  */
3741
3742  if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3743      && (code == EQ || code == NE)
3744      && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3745	    && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3746      && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3747      /* We cannot do this if tem is a nonzero address.  */
3748      && ! nonzero_address_p (tem))
3749    return simplify_const_relational_operation (signed_condition (code),
3750						mode, tem, const0_rtx);
3751
3752  if (! HONOR_NANS (mode) && code == ORDERED)
3753    return const_true_rtx;
3754
3755  if (! HONOR_NANS (mode) && code == UNORDERED)
3756    return const0_rtx;
3757
3758  /* For modes without NaNs, if the two operands are equal, we know the
3759     result except if they have side-effects.  */
3760  if (! HONOR_NANS (GET_MODE (trueop0))
3761      && rtx_equal_p (trueop0, trueop1)
3762      && ! side_effects_p (trueop0))
3763    equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3764
3765  /* If the operands are floating-point constants, see if we can fold
3766     the result.  */
3767  else if (GET_CODE (trueop0) == CONST_DOUBLE
3768	   && GET_CODE (trueop1) == CONST_DOUBLE
3769	   && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3770    {
3771      REAL_VALUE_TYPE d0, d1;
3772
3773      REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3774      REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3775
3776      /* Comparisons are unordered iff at least one of the values is NaN.  */
3777      if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3778	switch (code)
3779	  {
3780	  case UNEQ:
3781	  case UNLT:
3782	  case UNGT:
3783	  case UNLE:
3784	  case UNGE:
3785	  case NE:
3786	  case UNORDERED:
3787	    return const_true_rtx;
3788	  case EQ:
3789	  case LT:
3790	  case GT:
3791	  case LE:
3792	  case GE:
3793	  case LTGT:
3794	  case ORDERED:
3795	    return const0_rtx;
3796	  default:
3797	    return 0;
3798	  }
3799
3800      equal = REAL_VALUES_EQUAL (d0, d1);
3801      op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3802      op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3803    }
3804
3805  /* Otherwise, see if the operands are both integers.  */
3806  else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3807	   && (GET_CODE (trueop0) == CONST_DOUBLE
3808	       || GET_CODE (trueop0) == CONST_INT)
3809	   && (GET_CODE (trueop1) == CONST_DOUBLE
3810	       || GET_CODE (trueop1) == CONST_INT))
3811    {
3812      int width = GET_MODE_BITSIZE (mode);
3813      HOST_WIDE_INT l0s, h0s, l1s, h1s;
3814      unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3815
3816      /* Get the two words comprising each integer constant.  */
3817      if (GET_CODE (trueop0) == CONST_DOUBLE)
3818	{
3819	  l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3820	  h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3821	}
3822      else
3823	{
3824	  l0u = l0s = INTVAL (trueop0);
3825	  h0u = h0s = HWI_SIGN_EXTEND (l0s);
3826	}
3827
3828      if (GET_CODE (trueop1) == CONST_DOUBLE)
3829	{
3830	  l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3831	  h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3832	}
3833      else
3834	{
3835	  l1u = l1s = INTVAL (trueop1);
3836	  h1u = h1s = HWI_SIGN_EXTEND (l1s);
3837	}
3838
3839      /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3840	 we have to sign or zero-extend the values.  */
3841      if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3842	{
3843	  l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844	  l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3845
3846	  if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3847	    l0s |= ((HOST_WIDE_INT) (-1) << width);
3848
3849	  if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850	    l1s |= ((HOST_WIDE_INT) (-1) << width);
3851	}
3852      if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3853	h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3854
3855      equal = (h0u == h1u && l0u == l1u);
3856      op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3857      op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3858      op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3859      op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3860    }
3861
3862  /* Otherwise, there are some code-specific tests we can make.  */
3863  else
3864    {
3865      /* Optimize comparisons with upper and lower bounds.  */
3866      if (SCALAR_INT_MODE_P (mode)
3867	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3868	{
3869	  rtx mmin, mmax;
3870	  int sign;
3871
3872	  if (code == GEU
3873	      || code == LEU
3874	      || code == GTU
3875	      || code == LTU)
3876	    sign = 0;
3877	  else
3878	    sign = 1;
3879
3880	  get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3881
3882	  tem = NULL_RTX;
3883	  switch (code)
3884	    {
3885	    case GEU:
3886	    case GE:
3887	      /* x >= min is always true.  */
3888	      if (rtx_equal_p (trueop1, mmin))
3889		tem = const_true_rtx;
3890	      else
3891	      break;
3892
3893	    case LEU:
3894	    case LE:
3895	      /* x <= max is always true.  */
3896	      if (rtx_equal_p (trueop1, mmax))
3897		tem = const_true_rtx;
3898	      break;
3899
3900	    case GTU:
3901	    case GT:
3902	      /* x > max is always false.  */
3903	      if (rtx_equal_p (trueop1, mmax))
3904		tem = const0_rtx;
3905	      break;
3906
3907	    case LTU:
3908	    case LT:
3909	      /* x < min is always false.  */
3910	      if (rtx_equal_p (trueop1, mmin))
3911		tem = const0_rtx;
3912	      break;
3913
3914	    default:
3915	      break;
3916	    }
3917	  if (tem == const0_rtx
3918	      || tem == const_true_rtx)
3919	    return tem;
3920	}
3921
3922      switch (code)
3923	{
3924	case EQ:
3925	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
3926	    return const0_rtx;
3927	  break;
3928
3929	case NE:
3930	  if (trueop1 == const0_rtx && nonzero_address_p (op0))
3931	    return const_true_rtx;
3932	  break;
3933
3934	case LT:
3935	  /* Optimize abs(x) < 0.0.  */
3936	  if (trueop1 == CONST0_RTX (mode)
3937	      && !HONOR_SNANS (mode)
3938	      && (!INTEGRAL_MODE_P (mode)
3939		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3940	    {
3941	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3942						       : trueop0;
3943	      if (GET_CODE (tem) == ABS)
3944		{
3945		  if (INTEGRAL_MODE_P (mode)
3946		      && (issue_strict_overflow_warning
3947			  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3948		    warning (OPT_Wstrict_overflow,
3949			     ("assuming signed overflow does not occur when "
3950			      "assuming abs (x) < 0 is false"));
3951		  return const0_rtx;
3952		}
3953	    }
3954	  break;
3955
3956	case GE:
3957	  /* Optimize abs(x) >= 0.0.  */
3958	  if (trueop1 == CONST0_RTX (mode)
3959	      && !HONOR_NANS (mode)
3960	      && (!INTEGRAL_MODE_P (mode)
3961		  || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3962	    {
3963	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3964						       : trueop0;
3965	      if (GET_CODE (tem) == ABS)
3966		{
3967		  if (INTEGRAL_MODE_P (mode)
3968		      && (issue_strict_overflow_warning
3969			  (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3970		    warning (OPT_Wstrict_overflow,
3971			     ("assuming signed overflow does not occur when "
3972			      "assuming abs (x) >= 0 is true"));
3973		  return const_true_rtx;
3974		}
3975	    }
3976	  break;
3977
3978	case UNGE:
3979	  /* Optimize ! (abs(x) < 0.0).  */
3980	  if (trueop1 == CONST0_RTX (mode))
3981	    {
3982	      tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3983						       : trueop0;
3984	      if (GET_CODE (tem) == ABS)
3985		return const_true_rtx;
3986	    }
3987	  break;
3988
3989	default:
3990	  break;
3991	}
3992
3993      return 0;
3994    }
3995
3996  /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3997     as appropriate.  */
3998  switch (code)
3999    {
4000    case EQ:
4001    case UNEQ:
4002      return equal ? const_true_rtx : const0_rtx;
4003    case NE:
4004    case LTGT:
4005      return ! equal ? const_true_rtx : const0_rtx;
4006    case LT:
4007    case UNLT:
4008      return op0lt ? const_true_rtx : const0_rtx;
4009    case GT:
4010    case UNGT:
4011      return op1lt ? const_true_rtx : const0_rtx;
4012    case LTU:
4013      return op0ltu ? const_true_rtx : const0_rtx;
4014    case GTU:
4015      return op1ltu ? const_true_rtx : const0_rtx;
4016    case LE:
4017    case UNLE:
4018      return equal || op0lt ? const_true_rtx : const0_rtx;
4019    case GE:
4020    case UNGE:
4021      return equal || op1lt ? const_true_rtx : const0_rtx;
4022    case LEU:
4023      return equal || op0ltu ? const_true_rtx : const0_rtx;
4024    case GEU:
4025      return equal || op1ltu ? const_true_rtx : const0_rtx;
4026    case ORDERED:
4027      return const_true_rtx;
4028    case UNORDERED:
4029      return const0_rtx;
4030    default:
4031      gcc_unreachable ();
4032    }
4033}
4034
4035/* Simplify CODE, an operation with result mode MODE and three operands,
4036   OP0, OP1, and OP2.  OP0_MODE was the mode of OP0 before it became
4037   a constant.  Return 0 if no simplifications is possible.  */
4038
4039rtx
4040simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4041			    enum machine_mode op0_mode, rtx op0, rtx op1,
4042			    rtx op2)
4043{
4044  unsigned int width = GET_MODE_BITSIZE (mode);
4045
4046  /* VOIDmode means "infinite" precision.  */
4047  if (width == 0)
4048    width = HOST_BITS_PER_WIDE_INT;
4049
4050  switch (code)
4051    {
4052    case SIGN_EXTRACT:
4053    case ZERO_EXTRACT:
4054      if (GET_CODE (op0) == CONST_INT
4055	  && GET_CODE (op1) == CONST_INT
4056	  && GET_CODE (op2) == CONST_INT
4057	  && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4058	  && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4059	{
4060	  /* Extracting a bit-field from a constant */
4061	  HOST_WIDE_INT val = INTVAL (op0);
4062
4063	  if (BITS_BIG_ENDIAN)
4064	    val >>= (GET_MODE_BITSIZE (op0_mode)
4065		     - INTVAL (op2) - INTVAL (op1));
4066	  else
4067	    val >>= INTVAL (op2);
4068
4069	  if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4070	    {
4071	      /* First zero-extend.  */
4072	      val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4073	      /* If desired, propagate sign bit.  */
4074	      if (code == SIGN_EXTRACT
4075		  && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4076		val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4077	    }
4078
4079	  /* Clear the bits that don't belong in our mode,
4080	     unless they and our sign bit are all one.
4081	     So we get either a reasonable negative value or a reasonable
4082	     unsigned value for this mode.  */
4083	  if (width < HOST_BITS_PER_WIDE_INT
4084	      && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4085		  != ((HOST_WIDE_INT) (-1) << (width - 1))))
4086	    val &= ((HOST_WIDE_INT) 1 << width) - 1;
4087
4088	  return gen_int_mode (val, mode);
4089	}
4090      break;
4091
4092    case IF_THEN_ELSE:
4093      if (GET_CODE (op0) == CONST_INT)
4094	return op0 != const0_rtx ? op1 : op2;
4095
4096      /* Convert c ? a : a into "a".  */
4097      if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4098	return op1;
4099
4100      /* Convert a != b ? a : b into "a".  */
4101      if (GET_CODE (op0) == NE
4102	  && ! side_effects_p (op0)
4103	  && ! HONOR_NANS (mode)
4104	  && ! HONOR_SIGNED_ZEROS (mode)
4105	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4106	       && rtx_equal_p (XEXP (op0, 1), op2))
4107	      || (rtx_equal_p (XEXP (op0, 0), op2)
4108		  && rtx_equal_p (XEXP (op0, 1), op1))))
4109	return op1;
4110
4111      /* Convert a == b ? a : b into "b".  */
4112      if (GET_CODE (op0) == EQ
4113	  && ! side_effects_p (op0)
4114	  && ! HONOR_NANS (mode)
4115	  && ! HONOR_SIGNED_ZEROS (mode)
4116	  && ((rtx_equal_p (XEXP (op0, 0), op1)
4117	       && rtx_equal_p (XEXP (op0, 1), op2))
4118	      || (rtx_equal_p (XEXP (op0, 0), op2)
4119		  && rtx_equal_p (XEXP (op0, 1), op1))))
4120	return op2;
4121
4122      if (COMPARISON_P (op0) && ! side_effects_p (op0))
4123	{
4124	  enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4125					? GET_MODE (XEXP (op0, 1))
4126					: GET_MODE (XEXP (op0, 0)));
4127	  rtx temp;
4128
4129	  /* Look for happy constants in op1 and op2.  */
4130	  if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4131	    {
4132	      HOST_WIDE_INT t = INTVAL (op1);
4133	      HOST_WIDE_INT f = INTVAL (op2);
4134
4135	      if (t == STORE_FLAG_VALUE && f == 0)
4136	        code = GET_CODE (op0);
4137	      else if (t == 0 && f == STORE_FLAG_VALUE)
4138		{
4139		  enum rtx_code tmp;
4140		  tmp = reversed_comparison_code (op0, NULL_RTX);
4141		  if (tmp == UNKNOWN)
4142		    break;
4143		  code = tmp;
4144		}
4145	      else
4146		break;
4147
4148	      return simplify_gen_relational (code, mode, cmp_mode,
4149					      XEXP (op0, 0), XEXP (op0, 1));
4150	    }
4151
4152	  if (cmp_mode == VOIDmode)
4153	    cmp_mode = op0_mode;
4154	  temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4155			  			cmp_mode, XEXP (op0, 0),
4156						XEXP (op0, 1));
4157
4158	  /* See if any simplifications were possible.  */
4159	  if (temp)
4160	    {
4161	      if (GET_CODE (temp) == CONST_INT)
4162		return temp == const0_rtx ? op2 : op1;
4163	      else if (temp)
4164	        return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4165	    }
4166	}
4167      break;
4168
4169    case VEC_MERGE:
4170      gcc_assert (GET_MODE (op0) == mode);
4171      gcc_assert (GET_MODE (op1) == mode);
4172      gcc_assert (VECTOR_MODE_P (mode));
4173      op2 = avoid_constant_pool_reference (op2);
4174      if (GET_CODE (op2) == CONST_INT)
4175	{
4176          int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4177	  unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4178	  int mask = (1 << n_elts) - 1;
4179
4180	  if (!(INTVAL (op2) & mask))
4181	    return op1;
4182	  if ((INTVAL (op2) & mask) == mask)
4183	    return op0;
4184
4185	  op0 = avoid_constant_pool_reference (op0);
4186	  op1 = avoid_constant_pool_reference (op1);
4187	  if (GET_CODE (op0) == CONST_VECTOR
4188	      && GET_CODE (op1) == CONST_VECTOR)
4189	    {
4190	      rtvec v = rtvec_alloc (n_elts);
4191	      unsigned int i;
4192
4193	      for (i = 0; i < n_elts; i++)
4194		RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4195				    ? CONST_VECTOR_ELT (op0, i)
4196				    : CONST_VECTOR_ELT (op1, i));
4197	      return gen_rtx_CONST_VECTOR (mode, v);
4198	    }
4199	}
4200      break;
4201
4202    default:
4203      gcc_unreachable ();
4204    }
4205
4206  return 0;
4207}
4208
4209/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4210   returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4211
4212   Works by unpacking OP into a collection of 8-bit values
4213   represented as a little-endian array of 'unsigned char', selecting by BYTE,
4214   and then repacking them again for OUTERMODE.  */
4215
4216static rtx
4217simplify_immed_subreg (enum machine_mode outermode, rtx op,
4218		       enum machine_mode innermode, unsigned int byte)
4219{
4220  /* We support up to 512-bit values (for V8DFmode).  */
4221  enum {
4222    max_bitsize = 512,
4223    value_bit = 8,
4224    value_mask = (1 << value_bit) - 1
4225  };
4226  unsigned char value[max_bitsize / value_bit];
4227  int value_start;
4228  int i;
4229  int elem;
4230
4231  int num_elem;
4232  rtx * elems;
4233  int elem_bitsize;
4234  rtx result_s;
4235  rtvec result_v = NULL;
4236  enum mode_class outer_class;
4237  enum machine_mode outer_submode;
4238
4239  /* Some ports misuse CCmode.  */
4240  if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4241    return op;
4242
4243  /* We have no way to represent a complex constant at the rtl level.  */
4244  if (COMPLEX_MODE_P (outermode))
4245    return NULL_RTX;
4246
4247  /* Unpack the value.  */
4248
4249  if (GET_CODE (op) == CONST_VECTOR)
4250    {
4251      num_elem = CONST_VECTOR_NUNITS (op);
4252      elems = &CONST_VECTOR_ELT (op, 0);
4253      elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4254    }
4255  else
4256    {
4257      num_elem = 1;
4258      elems = &op;
4259      elem_bitsize = max_bitsize;
4260    }
4261  /* If this asserts, it is too complicated; reducing value_bit may help.  */
4262  gcc_assert (BITS_PER_UNIT % value_bit == 0);
4263  /* I don't know how to handle endianness of sub-units.  */
4264  gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4265
4266  for (elem = 0; elem < num_elem; elem++)
4267    {
4268      unsigned char * vp;
4269      rtx el = elems[elem];
4270
4271      /* Vectors are kept in target memory order.  (This is probably
4272	 a mistake.)  */
4273      {
4274	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4275	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4276			  / BITS_PER_UNIT);
4277	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4278	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4279	unsigned bytele = (subword_byte % UNITS_PER_WORD
4280			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4281	vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4282      }
4283
4284      switch (GET_CODE (el))
4285	{
4286	case CONST_INT:
4287	  for (i = 0;
4288	       i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4289	       i += value_bit)
4290	    *vp++ = INTVAL (el) >> i;
4291	  /* CONST_INTs are always logically sign-extended.  */
4292	  for (; i < elem_bitsize; i += value_bit)
4293	    *vp++ = INTVAL (el) < 0 ? -1 : 0;
4294	  break;
4295
4296	case CONST_DOUBLE:
4297	  if (GET_MODE (el) == VOIDmode)
4298	    {
4299	      /* If this triggers, someone should have generated a
4300		 CONST_INT instead.  */
4301	      gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4302
4303	      for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4304		*vp++ = CONST_DOUBLE_LOW (el) >> i;
4305	      while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4306		{
4307		  *vp++
4308		    = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4309		  i += value_bit;
4310		}
4311	      /* It shouldn't matter what's done here, so fill it with
4312		 zero.  */
4313	      for (; i < elem_bitsize; i += value_bit)
4314		*vp++ = 0;
4315	    }
4316	  else
4317	    {
4318	      long tmp[max_bitsize / 32];
4319	      int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4320
4321	      gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4322	      gcc_assert (bitsize <= elem_bitsize);
4323	      gcc_assert (bitsize % value_bit == 0);
4324
4325	      real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4326			      GET_MODE (el));
4327
4328	      /* real_to_target produces its result in words affected by
4329		 FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4330		 and use WORDS_BIG_ENDIAN instead; see the documentation
4331	         of SUBREG in rtl.texi.  */
4332	      for (i = 0; i < bitsize; i += value_bit)
4333		{
4334		  int ibase;
4335		  if (WORDS_BIG_ENDIAN)
4336		    ibase = bitsize - 1 - i;
4337		  else
4338		    ibase = i;
4339		  *vp++ = tmp[ibase / 32] >> i % 32;
4340		}
4341
4342	      /* It shouldn't matter what's done here, so fill it with
4343		 zero.  */
4344	      for (; i < elem_bitsize; i += value_bit)
4345		*vp++ = 0;
4346	    }
4347	  break;
4348
4349	default:
4350	  gcc_unreachable ();
4351	}
4352    }
4353
4354  /* Now, pick the right byte to start with.  */
4355  /* Renumber BYTE so that the least-significant byte is byte 0.  A special
4356     case is paradoxical SUBREGs, which shouldn't be adjusted since they
4357     will already have offset 0.  */
4358  if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4359    {
4360      unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4361			- byte);
4362      unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4363      unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4364      byte = (subword_byte % UNITS_PER_WORD
4365	      + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4366    }
4367
4368  /* BYTE should still be inside OP.  (Note that BYTE is unsigned,
4369     so if it's become negative it will instead be very large.)  */
4370  gcc_assert (byte < GET_MODE_SIZE (innermode));
4371
4372  /* Convert from bytes to chunks of size value_bit.  */
4373  value_start = byte * (BITS_PER_UNIT / value_bit);
4374
4375  /* Re-pack the value.  */
4376
4377  if (VECTOR_MODE_P (outermode))
4378    {
4379      num_elem = GET_MODE_NUNITS (outermode);
4380      result_v = rtvec_alloc (num_elem);
4381      elems = &RTVEC_ELT (result_v, 0);
4382      outer_submode = GET_MODE_INNER (outermode);
4383    }
4384  else
4385    {
4386      num_elem = 1;
4387      elems = &result_s;
4388      outer_submode = outermode;
4389    }
4390
4391  outer_class = GET_MODE_CLASS (outer_submode);
4392  elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4393
4394  gcc_assert (elem_bitsize % value_bit == 0);
4395  gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4396
4397  for (elem = 0; elem < num_elem; elem++)
4398    {
4399      unsigned char *vp;
4400
4401      /* Vectors are stored in target memory order.  (This is probably
4402	 a mistake.)  */
4403      {
4404	unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4405	unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4406			  / BITS_PER_UNIT);
4407	unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4408	unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4409	unsigned bytele = (subword_byte % UNITS_PER_WORD
4410			 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4411	vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4412      }
4413
4414      switch (outer_class)
4415	{
4416	case MODE_INT:
4417	case MODE_PARTIAL_INT:
4418	  {
4419	    unsigned HOST_WIDE_INT hi = 0, lo = 0;
4420
4421	    for (i = 0;
4422		 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4423		 i += value_bit)
4424	      lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4425	    for (; i < elem_bitsize; i += value_bit)
4426	      hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4427		     << (i - HOST_BITS_PER_WIDE_INT));
4428
4429	    /* immed_double_const doesn't call trunc_int_for_mode.  I don't
4430	       know why.  */
4431	    if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4432	      elems[elem] = gen_int_mode (lo, outer_submode);
4433	    else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4434	      elems[elem] = immed_double_const (lo, hi, outer_submode);
4435	    else
4436	      return NULL_RTX;
4437	  }
4438	  break;
4439
4440	case MODE_FLOAT:
4441	case MODE_DECIMAL_FLOAT:
4442	  {
4443	    REAL_VALUE_TYPE r;
4444	    long tmp[max_bitsize / 32];
4445
4446	    /* real_from_target wants its input in words affected by
4447	       FLOAT_WORDS_BIG_ENDIAN.  However, we ignore this,
4448	       and use WORDS_BIG_ENDIAN instead; see the documentation
4449	       of SUBREG in rtl.texi.  */
4450	    for (i = 0; i < max_bitsize / 32; i++)
4451	      tmp[i] = 0;
4452	    for (i = 0; i < elem_bitsize; i += value_bit)
4453	      {
4454		int ibase;
4455		if (WORDS_BIG_ENDIAN)
4456		  ibase = elem_bitsize - 1 - i;
4457		else
4458		  ibase = i;
4459		tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4460	      }
4461
4462	    real_from_target (&r, tmp, outer_submode);
4463	    elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4464	  }
4465	  break;
4466
4467	default:
4468	  gcc_unreachable ();
4469	}
4470    }
4471  if (VECTOR_MODE_P (outermode))
4472    return gen_rtx_CONST_VECTOR (outermode, result_v);
4473  else
4474    return result_s;
4475}
4476
4477/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4478   Return 0 if no simplifications are possible.  */
4479rtx
4480simplify_subreg (enum machine_mode outermode, rtx op,
4481		 enum machine_mode innermode, unsigned int byte)
4482{
4483  /* Little bit of sanity checking.  */
4484  gcc_assert (innermode != VOIDmode);
4485  gcc_assert (outermode != VOIDmode);
4486  gcc_assert (innermode != BLKmode);
4487  gcc_assert (outermode != BLKmode);
4488
4489  gcc_assert (GET_MODE (op) == innermode
4490	      || GET_MODE (op) == VOIDmode);
4491
4492  gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4493  gcc_assert (byte < GET_MODE_SIZE (innermode));
4494
4495  if (outermode == innermode && !byte)
4496    return op;
4497
4498  if (GET_CODE (op) == CONST_INT
4499      || GET_CODE (op) == CONST_DOUBLE
4500      || GET_CODE (op) == CONST_VECTOR)
4501    return simplify_immed_subreg (outermode, op, innermode, byte);
4502
4503  /* Changing mode twice with SUBREG => just change it once,
4504     or not at all if changing back op starting mode.  */
4505  if (GET_CODE (op) == SUBREG)
4506    {
4507      enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4508      int final_offset = byte + SUBREG_BYTE (op);
4509      rtx newx;
4510
4511      if (outermode == innermostmode
4512	  && byte == 0 && SUBREG_BYTE (op) == 0)
4513	return SUBREG_REG (op);
4514
4515      /* The SUBREG_BYTE represents offset, as if the value were stored
4516	 in memory.  Irritating exception is paradoxical subreg, where
4517	 we define SUBREG_BYTE to be 0.  On big endian machines, this
4518	 value should be negative.  For a moment, undo this exception.  */
4519      if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4520	{
4521	  int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4522	  if (WORDS_BIG_ENDIAN)
4523	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4524	  if (BYTES_BIG_ENDIAN)
4525	    final_offset += difference % UNITS_PER_WORD;
4526	}
4527      if (SUBREG_BYTE (op) == 0
4528	  && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4529	{
4530	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4531	  if (WORDS_BIG_ENDIAN)
4532	    final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4533	  if (BYTES_BIG_ENDIAN)
4534	    final_offset += difference % UNITS_PER_WORD;
4535	}
4536
4537      /* See whether resulting subreg will be paradoxical.  */
4538      if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4539	{
4540	  /* In nonparadoxical subregs we can't handle negative offsets.  */
4541	  if (final_offset < 0)
4542	    return NULL_RTX;
4543	  /* Bail out in case resulting subreg would be incorrect.  */
4544	  if (final_offset % GET_MODE_SIZE (outermode)
4545	      || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4546	    return NULL_RTX;
4547	}
4548      else
4549	{
4550	  int offset = 0;
4551	  int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4552
4553	  /* In paradoxical subreg, see if we are still looking on lower part.
4554	     If so, our SUBREG_BYTE will be 0.  */
4555	  if (WORDS_BIG_ENDIAN)
4556	    offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4557	  if (BYTES_BIG_ENDIAN)
4558	    offset += difference % UNITS_PER_WORD;
4559	  if (offset == final_offset)
4560	    final_offset = 0;
4561	  else
4562	    return NULL_RTX;
4563	}
4564
4565      /* Recurse for further possible simplifications.  */
4566      newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4567			      final_offset);
4568      if (newx)
4569	return newx;
4570      if (validate_subreg (outermode, innermostmode,
4571			   SUBREG_REG (op), final_offset))
4572        return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4573      return NULL_RTX;
4574    }
4575
4576  /* Merge implicit and explicit truncations.  */
4577
4578  if (GET_CODE (op) == TRUNCATE
4579      && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4580      && subreg_lowpart_offset (outermode, innermode) == byte)
4581    return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4582			       GET_MODE (XEXP (op, 0)));
4583
4584  /* SUBREG of a hard register => just change the register number
4585     and/or mode.  If the hard register is not valid in that mode,
4586     suppress this simplification.  If the hard register is the stack,
4587     frame, or argument pointer, leave this as a SUBREG.  */
4588
4589  if (REG_P (op)
4590      && REGNO (op) < FIRST_PSEUDO_REGISTER
4591#ifdef CANNOT_CHANGE_MODE_CLASS
4592      && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4593	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4594	    && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4595#endif
4596      && ((reload_completed && !frame_pointer_needed)
4597	  || (REGNO (op) != FRAME_POINTER_REGNUM
4598#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4599	      && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4600#endif
4601	     ))
4602#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4603      && REGNO (op) != ARG_POINTER_REGNUM
4604#endif
4605      && REGNO (op) != STACK_POINTER_REGNUM
4606      && subreg_offset_representable_p (REGNO (op), innermode,
4607					byte, outermode))
4608    {
4609      unsigned int regno = REGNO (op);
4610      unsigned int final_regno
4611	= regno + subreg_regno_offset (regno, innermode, byte, outermode);
4612
4613      /* ??? We do allow it if the current REG is not valid for
4614	 its mode.  This is a kludge to work around how float/complex
4615	 arguments are passed on 32-bit SPARC and should be fixed.  */
4616      if (HARD_REGNO_MODE_OK (final_regno, outermode)
4617	  || ! HARD_REGNO_MODE_OK (regno, innermode))
4618	{
4619	  rtx x;
4620	  int final_offset = byte;
4621
4622	  /* Adjust offset for paradoxical subregs.  */
4623	  if (byte == 0
4624	      && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4625	    {
4626	      int difference = (GET_MODE_SIZE (innermode)
4627				- GET_MODE_SIZE (outermode));
4628	      if (WORDS_BIG_ENDIAN)
4629		final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4630	      if (BYTES_BIG_ENDIAN)
4631		final_offset += difference % UNITS_PER_WORD;
4632	    }
4633
4634	  x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4635
4636	  /* Propagate original regno.  We don't have any way to specify
4637	     the offset inside original regno, so do so only for lowpart.
4638	     The information is used only by alias analysis that can not
4639	     grog partial register anyway.  */
4640
4641	  if (subreg_lowpart_offset (outermode, innermode) == byte)
4642	    ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4643	  return x;
4644	}
4645    }
4646
4647  /* If we have a SUBREG of a register that we are replacing and we are
4648     replacing it with a MEM, make a new MEM and try replacing the
4649     SUBREG with it.  Don't do this if the MEM has a mode-dependent address
4650     or if we would be widening it.  */
4651
4652  if (MEM_P (op)
4653      && ! mode_dependent_address_p (XEXP (op, 0))
4654      /* Allow splitting of volatile memory references in case we don't
4655         have instruction to move the whole thing.  */
4656      && (! MEM_VOLATILE_P (op)
4657	  || ! have_insn_for (SET, innermode))
4658      && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4659    return adjust_address_nv (op, outermode, byte);
4660
4661  /* Handle complex values represented as CONCAT
4662     of real and imaginary part.  */
4663  if (GET_CODE (op) == CONCAT)
4664    {
4665      unsigned int inner_size, final_offset;
4666      rtx part, res;
4667
4668      inner_size = GET_MODE_UNIT_SIZE (innermode);
4669      part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4670      final_offset = byte % inner_size;
4671      if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4672	return NULL_RTX;
4673
4674      res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4675      if (res)
4676	return res;
4677      if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4678	return gen_rtx_SUBREG (outermode, part, final_offset);
4679      return NULL_RTX;
4680    }
4681
4682  /* Optimize SUBREG truncations of zero and sign extended values.  */
4683  if ((GET_CODE (op) == ZERO_EXTEND
4684       || GET_CODE (op) == SIGN_EXTEND)
4685      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4686    {
4687      unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4688
4689      /* If we're requesting the lowpart of a zero or sign extension,
4690	 there are three possibilities.  If the outermode is the same
4691	 as the origmode, we can omit both the extension and the subreg.
4692	 If the outermode is not larger than the origmode, we can apply
4693	 the truncation without the extension.  Finally, if the outermode
4694	 is larger than the origmode, but both are integer modes, we
4695	 can just extend to the appropriate mode.  */
4696      if (bitpos == 0)
4697	{
4698	  enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4699	  if (outermode == origmode)
4700	    return XEXP (op, 0);
4701	  if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4702	    return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4703					subreg_lowpart_offset (outermode,
4704							       origmode));
4705	  if (SCALAR_INT_MODE_P (outermode))
4706	    return simplify_gen_unary (GET_CODE (op), outermode,
4707				       XEXP (op, 0), origmode);
4708	}
4709
4710      /* A SUBREG resulting from a zero extension may fold to zero if
4711	 it extracts higher bits that the ZERO_EXTEND's source bits.  */
4712      if (GET_CODE (op) == ZERO_EXTEND
4713	  && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4714	return CONST0_RTX (outermode);
4715    }
4716
4717  /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4718     to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4719     the outer subreg is effectively a truncation to the original mode.  */
4720  if ((GET_CODE (op) == LSHIFTRT
4721       || GET_CODE (op) == ASHIFTRT)
4722      && SCALAR_INT_MODE_P (outermode)
4723      /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4724	 to avoid the possibility that an outer LSHIFTRT shifts by more
4725	 than the sign extension's sign_bit_copies and introduces zeros
4726	 into the high bits of the result.  */
4727      && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4728      && GET_CODE (XEXP (op, 1)) == CONST_INT
4729      && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4730      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4731      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4732      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4733    return simplify_gen_binary (ASHIFTRT, outermode,
4734				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4735
4736  /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4737     to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4738     the outer subreg is effectively a truncation to the original mode.  */
4739  if ((GET_CODE (op) == LSHIFTRT
4740       || GET_CODE (op) == ASHIFTRT)
4741      && SCALAR_INT_MODE_P (outermode)
4742      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4743      && GET_CODE (XEXP (op, 1)) == CONST_INT
4744      && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4745      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4746      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4747      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4748    return simplify_gen_binary (LSHIFTRT, outermode,
4749				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4750
4751  /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4752     to (ashift:QI (x:QI) C), where C is a suitable small constant and
4753     the outer subreg is effectively a truncation to the original mode.  */
4754  if (GET_CODE (op) == ASHIFT
4755      && SCALAR_INT_MODE_P (outermode)
4756      && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4757      && GET_CODE (XEXP (op, 1)) == CONST_INT
4758      && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4759	  || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4760      && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4761      && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4762      && subreg_lsb_1 (outermode, innermode, byte) == 0)
4763    return simplify_gen_binary (ASHIFT, outermode,
4764				XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4765
4766  return NULL_RTX;
4767}
4768
4769/* Make a SUBREG operation or equivalent if it folds.  */
4770
4771rtx
4772simplify_gen_subreg (enum machine_mode outermode, rtx op,
4773		     enum machine_mode innermode, unsigned int byte)
4774{
4775  rtx newx;
4776
4777  newx = simplify_subreg (outermode, op, innermode, byte);
4778  if (newx)
4779    return newx;
4780
4781  if (GET_CODE (op) == SUBREG
4782      || GET_CODE (op) == CONCAT
4783      || GET_MODE (op) == VOIDmode)
4784    return NULL_RTX;
4785
4786  if (validate_subreg (outermode, innermode, op, byte))
4787    return gen_rtx_SUBREG (outermode, op, byte);
4788
4789  return NULL_RTX;
4790}
4791
4792/* Simplify X, an rtx expression.
4793
4794   Return the simplified expression or NULL if no simplifications
4795   were possible.
4796
4797   This is the preferred entry point into the simplification routines;
4798   however, we still allow passes to call the more specific routines.
4799
4800   Right now GCC has three (yes, three) major bodies of RTL simplification
4801   code that need to be unified.
4802
4803	1. fold_rtx in cse.c.  This code uses various CSE specific
4804	   information to aid in RTL simplification.
4805
4806	2. simplify_rtx in combine.c.  Similar to fold_rtx, except that
4807	   it uses combine specific information to aid in RTL
4808	   simplification.
4809
4810	3. The routines in this file.
4811
4812
4813   Long term we want to only have one body of simplification code; to
4814   get to that state I recommend the following steps:
4815
4816	1. Pour over fold_rtx & simplify_rtx and move any simplifications
4817	   which are not pass dependent state into these routines.
4818
4819	2. As code is moved by #1, change fold_rtx & simplify_rtx to
4820	   use this routine whenever possible.
4821
4822	3. Allow for pass dependent state to be provided to these
4823	   routines and add simplifications based on the pass dependent
4824	   state.  Remove code from cse.c & combine.c that becomes
4825	   redundant/dead.
4826
4827    It will take time, but ultimately the compiler will be easier to
4828    maintain and improve.  It's totally silly that when we add a
4829    simplification that it needs to be added to 4 places (3 for RTL
4830    simplification and 1 for tree simplification.  */
4831
4832rtx
4833simplify_rtx (rtx x)
4834{
4835  enum rtx_code code = GET_CODE (x);
4836  enum machine_mode mode = GET_MODE (x);
4837
4838  switch (GET_RTX_CLASS (code))
4839    {
4840    case RTX_UNARY:
4841      return simplify_unary_operation (code, mode,
4842				       XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4843    case RTX_COMM_ARITH:
4844      if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4845	return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4846
4847      /* Fall through....  */
4848
4849    case RTX_BIN_ARITH:
4850      return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4851
4852    case RTX_TERNARY:
4853    case RTX_BITFIELD_OPS:
4854      return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4855					 XEXP (x, 0), XEXP (x, 1),
4856					 XEXP (x, 2));
4857
4858    case RTX_COMPARE:
4859    case RTX_COMM_COMPARE:
4860      return simplify_relational_operation (code, mode,
4861                                            ((GET_MODE (XEXP (x, 0))
4862                                             != VOIDmode)
4863                                            ? GET_MODE (XEXP (x, 0))
4864                                            : GET_MODE (XEXP (x, 1))),
4865                                            XEXP (x, 0),
4866                                            XEXP (x, 1));
4867
4868    case RTX_EXTRA:
4869      if (code == SUBREG)
4870	return simplify_gen_subreg (mode, SUBREG_REG (x),
4871				    GET_MODE (SUBREG_REG (x)),
4872				    SUBREG_BYTE (x));
4873      break;
4874
4875    case RTX_OBJ:
4876      if (code == LO_SUM)
4877	{
4878	  /* Convert (lo_sum (high FOO) FOO) to FOO.  */
4879	  if (GET_CODE (XEXP (x, 0)) == HIGH
4880	      && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4881	  return XEXP (x, 1);
4882	}
4883      break;
4884
4885    default:
4886      break;
4887    }
4888  return NULL;
4889}
4890
4891