Deleted Added
full compact
simplify-rtx.c (169690) simplify-rtx.c (220150)
1/* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING. If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA. */
22
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"
31#include "regs.h"
32#include "hard-reg-set.h"
33#include "flags.h"
34#include "real.h"
35#include "insn-config.h"
36#include "recog.h"
37#include "function.h"
38#include "expr.h"
39#include "toplev.h"
40#include "output.h"
41#include "ggc.h"
42#include "target.h"
43
44/* Simplification and canonicalization of RTL. */
45
46/* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50#define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53static rtx neg_const_int (enum machine_mode, rtx);
54static bool plus_minus_operand_p (rtx);
55static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66
67/* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69static rtx
70neg_const_int (enum machine_mode mode, rtx i)
71{
72 return gen_int_mode (- INTVAL (i), mode);
73}
74
75/* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78bool
79mode_signbit_p (enum machine_mode mode, rtx x)
80{
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107}
108
109/* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112rtx
113simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115{
116 rtx tem;
117
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
122
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
127
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
129}
130
131/* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
133rtx
134avoid_constant_pool_reference (rtx x)
135{
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
139
140 switch (GET_CODE (x))
141 {
142 case MEM:
143 break;
144
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 {
151 REAL_VALUE_TYPE d;
152
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 }
156 return x;
157
158 default:
159 return x;
160 }
161
162 addr = XEXP (x, 0);
163
164 /* Call target hook to avoid the effects of -fpic etc.... */
165 addr = targetm.delegitimize_address (addr);
166
167 /* Split the address into a base and integer offset. */
168 if (GET_CODE (addr) == CONST
169 && GET_CODE (XEXP (addr, 0)) == PLUS
170 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 {
172 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173 addr = XEXP (XEXP (addr, 0), 0);
174 }
175
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
178
179 /* If this is a constant pool reference, we can turn it into its
180 constant and hope that simplifications happen. */
181 if (GET_CODE (addr) == SYMBOL_REF
182 && CONSTANT_POOL_ADDRESS_P (addr))
183 {
184 c = get_pool_constant (addr);
185 cmode = get_pool_mode (addr);
186
187 /* If we're accessing the constant in a different mode than it was
188 originally stored, attempt to fix that up via subreg simplifications.
189 If that fails we have no choice but to return the original memory. */
190 if (offset != 0 || cmode != GET_MODE (x))
191 {
192 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193 if (tem && CONSTANT_P (tem))
194 return tem;
195 }
196 else
197 return c;
198 }
199
200 return x;
201}
202
203/* Return true if X is a MEM referencing the constant pool. */
204
205bool
206constant_pool_reference_p (rtx x)
207{
208 return avoid_constant_pool_reference (x) != x;
209}
210
211/* Make a unary operation by first seeing if it folds and otherwise making
212 the specified operation. */
213
214rtx
215simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216 enum machine_mode op_mode)
217{
218 rtx tem;
219
220 /* If this simplifies, use it. */
221 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
222 return tem;
223
224 return gen_rtx_fmt_e (code, mode, op);
225}
226
227/* Likewise for ternary operations. */
228
229rtx
230simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232{
233 rtx tem;
234
235 /* If this simplifies, use it. */
236 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
237 op0, op1, op2)))
238 return tem;
239
240 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
241}
242
243/* Likewise, for relational operations.
244 CMP_MODE specifies mode comparison is done in. */
245
246rtx
247simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248 enum machine_mode cmp_mode, rtx op0, rtx op1)
249{
250 rtx tem;
251
252 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
253 op0, op1)))
254 return tem;
255
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
257}
258
259/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
261
262rtx
263simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264{
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
268 rtx op0, op1, op2;
269
270 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
273
274 if (x == old_rtx)
275 return new_rtx;
276
277 switch (GET_RTX_CLASS (code))
278 {
279 case RTX_UNARY:
280 op0 = XEXP (x, 0);
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283 if (op0 == XEXP (x, 0))
284 return x;
285 return simplify_gen_unary (code, mode, op0, op_mode);
286
287 case RTX_BIN_ARITH:
288 case RTX_COMM_ARITH:
289 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return x;
293 return simplify_gen_binary (code, mode, op0, op1);
294
295 case RTX_COMPARE:
296 case RTX_COMM_COMPARE:
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306 case RTX_TERNARY:
307 case RTX_BITFIELD_OPS:
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319 case RTX_EXTRA:
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
322 {
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
330 }
331 break;
332
333 case RTX_OBJ:
334 if (code == MEM)
335 {
336 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
340 }
341 else if (code == LO_SUM)
342 {
343 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
349
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
353 }
354 else if (code == REG)
355 {
356 if (rtx_equal_p (x, old_rtx))
357 return new_rtx;
358 }
359 break;
360
361 default:
362 break;
363 }
364 return x;
365}
366
367/* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
370rtx
371simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
373{
374 rtx trueop, tem;
375
376 if (GET_CODE (op) == CONST)
377 op = XEXP (op, 0);
378
379 trueop = avoid_constant_pool_reference (op);
380
381 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
382 if (tem)
383 return tem;
384
385 return simplify_unary_operation_1 (code, mode, op);
386}
387
388/* Perform some simplifications we can do even if the operands
389 aren't constant. */
390static rtx
391simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392{
393 enum rtx_code reversed;
394 rtx temp;
395
396 switch (code)
397 {
398 case NOT:
399 /* (not (not X)) == X. */
400 if (GET_CODE (op) == NOT)
401 return XEXP (op, 0);
402
403 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404 comparison is all ones. */
405 if (COMPARISON_P (op)
406 && (mode == BImode || STORE_FLAG_VALUE == -1)
407 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408 return simplify_gen_relational (reversed, mode, VOIDmode,
409 XEXP (op, 0), XEXP (op, 1));
410
411 /* (not (plus X -1)) can become (neg X). */
412 if (GET_CODE (op) == PLUS
413 && XEXP (op, 1) == constm1_rtx)
414 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415
416 /* Similarly, (not (neg X)) is (plus X -1). */
417 if (GET_CODE (op) == NEG)
418 return plus_constant (XEXP (op, 0), -1);
419
420 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
421 if (GET_CODE (op) == XOR
422 && GET_CODE (XEXP (op, 1)) == CONST_INT
423 && (temp = simplify_unary_operation (NOT, mode,
424 XEXP (op, 1), mode)) != 0)
425 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426
427 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
428 if (GET_CODE (op) == PLUS
429 && GET_CODE (XEXP (op, 1)) == CONST_INT
430 && mode_signbit_p (mode, XEXP (op, 1))
431 && (temp = simplify_unary_operation (NOT, mode,
432 XEXP (op, 1), mode)) != 0)
433 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434
435
436 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
437 operands other than 1, but that is not valid. We could do a
438 similar simplification for (not (lshiftrt C X)) where C is
439 just the sign bit, but this doesn't seem common enough to
440 bother with. */
441 if (GET_CODE (op) == ASHIFT
442 && XEXP (op, 0) == const1_rtx)
443 {
444 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446 }
447
448 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450 so we can perform the above simplification. */
451
452 if (STORE_FLAG_VALUE == -1
453 && GET_CODE (op) == ASHIFTRT
454 && GET_CODE (XEXP (op, 1)) == CONST_INT
455 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456 return simplify_gen_relational (GE, mode, VOIDmode,
457 XEXP (op, 0), const0_rtx);
458
459
460 if (GET_CODE (op) == SUBREG
461 && subreg_lowpart_p (op)
462 && (GET_MODE_SIZE (GET_MODE (op))
463 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464 && GET_CODE (SUBREG_REG (op)) == ASHIFT
465 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 {
467 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
468 rtx x;
469
470 x = gen_rtx_ROTATE (inner_mode,
471 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 inner_mode),
473 XEXP (SUBREG_REG (op), 1));
474 return rtl_hooks.gen_lowpart_no_emit (mode, x);
475 }
476
477 /* Apply De Morgan's laws to reduce number of patterns for machines
478 with negating logical insns (and-not, nand, etc.). If result has
479 only one NOT, put it first, since that is how the patterns are
480 coded. */
481
482 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 {
484 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485 enum machine_mode op_mode;
486
487 op_mode = GET_MODE (in1);
488 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489
490 op_mode = GET_MODE (in2);
491 if (op_mode == VOIDmode)
492 op_mode = mode;
493 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494
495 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 {
497 rtx tem = in2;
498 in2 = in1; in1 = tem;
499 }
500
501 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
502 mode, in1, in2);
503 }
504 break;
505
506 case NEG:
507 /* (neg (neg X)) == X. */
508 if (GET_CODE (op) == NEG)
509 return XEXP (op, 0);
510
511 /* (neg (plus X 1)) can become (not X). */
512 if (GET_CODE (op) == PLUS
513 && XEXP (op, 1) == const1_rtx)
514 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515
516 /* Similarly, (neg (not X)) is (plus X 1). */
517 if (GET_CODE (op) == NOT)
518 return plus_constant (XEXP (op, 0), 1);
519
520 /* (neg (minus X Y)) can become (minus Y X). This transformation
521 isn't safe for modes with signed zeros, since if X and Y are
522 both +0, (minus Y X) is the same as (minus X Y). If the
523 rounding mode is towards +infinity (or -infinity) then the two
524 expressions will be rounded differently. */
525 if (GET_CODE (op) == MINUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529
530 if (GET_CODE (op) == PLUS
531 && !HONOR_SIGNED_ZEROS (mode)
532 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 {
534 /* (neg (plus A C)) is simplified to (minus -C A). */
535 if (GET_CODE (XEXP (op, 1)) == CONST_INT
536 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 {
538 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 if (temp)
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
541 }
542
543 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
544 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
546 }
547
548 /* (neg (mult A B)) becomes (mult (neg A) B).
549 This works even for floating-point values. */
550 if (GET_CODE (op) == MULT
551 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 {
553 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
555 }
556
557 /* NEG commutes with ASHIFT since it is multiplication. Only do
558 this if we can then eliminate the NEG (e.g., if the operand
559 is a constant). */
560 if (GET_CODE (op) == ASHIFT)
561 {
562 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 if (temp)
564 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
565 }
566
567 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568 C is equal to the width of MODE minus 1. */
569 if (GET_CODE (op) == ASHIFTRT
570 && GET_CODE (XEXP (op, 1)) == CONST_INT
571 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572 return simplify_gen_binary (LSHIFTRT, mode,
573 XEXP (op, 0), XEXP (op, 1));
574
575 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576 C is equal to the width of MODE minus 1. */
577 if (GET_CODE (op) == LSHIFTRT
578 && GET_CODE (XEXP (op, 1)) == CONST_INT
579 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580 return simplify_gen_binary (ASHIFTRT, mode,
581 XEXP (op, 0), XEXP (op, 1));
582
583 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
584 if (GET_CODE (op) == XOR
585 && XEXP (op, 1) == const1_rtx
586 && nonzero_bits (XEXP (op, 0), mode) == 1)
587 return plus_constant (XEXP (op, 0), -1);
588
589 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
590 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
591 if (GET_CODE (op) == LT
1/* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 2, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING. If not, write to the Free
20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2102110-1301, USA. */
22
23
24#include "config.h"
25#include "system.h"
26#include "coretypes.h"
27#include "tm.h"
28#include "rtl.h"
29#include "tree.h"
30#include "tm_p.h"
31#include "regs.h"
32#include "hard-reg-set.h"
33#include "flags.h"
34#include "real.h"
35#include "insn-config.h"
36#include "recog.h"
37#include "function.h"
38#include "expr.h"
39#include "toplev.h"
40#include "output.h"
41#include "ggc.h"
42#include "target.h"
43
44/* Simplification and canonicalization of RTL. */
45
46/* Much code operates on (low, high) pairs; the low value is an
47 unsigned wide int, the high value a signed wide int. We
48 occasionally need to sign extend from low to high as if low were a
49 signed wide int. */
50#define HWI_SIGN_EXTEND(low) \
51 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52
53static rtx neg_const_int (enum machine_mode, rtx);
54static bool plus_minus_operand_p (rtx);
55static int simplify_plus_minus_op_data_cmp (const void *, const void *);
56static rtx simplify_plus_minus (enum rtx_code, enum machine_mode, rtx, rtx);
57static rtx simplify_immed_subreg (enum machine_mode, rtx, enum machine_mode,
58 unsigned int);
59static rtx simplify_associative_operation (enum rtx_code, enum machine_mode,
60 rtx, rtx);
61static rtx simplify_relational_operation_1 (enum rtx_code, enum machine_mode,
62 enum machine_mode, rtx, rtx);
63static rtx simplify_unary_operation_1 (enum rtx_code, enum machine_mode, rtx);
64static rtx simplify_binary_operation_1 (enum rtx_code, enum machine_mode,
65 rtx, rtx, rtx, rtx);
66
67/* Negate a CONST_INT rtx, truncating (because a conversion from a
68 maximally negative number can overflow). */
69static rtx
70neg_const_int (enum machine_mode mode, rtx i)
71{
72 return gen_int_mode (- INTVAL (i), mode);
73}
74
75/* Test whether expression, X, is an immediate constant that represents
76 the most significant bit of machine mode MODE. */
77
78bool
79mode_signbit_p (enum machine_mode mode, rtx x)
80{
81 unsigned HOST_WIDE_INT val;
82 unsigned int width;
83
84 if (GET_MODE_CLASS (mode) != MODE_INT)
85 return false;
86
87 width = GET_MODE_BITSIZE (mode);
88 if (width == 0)
89 return false;
90
91 if (width <= HOST_BITS_PER_WIDE_INT
92 && GET_CODE (x) == CONST_INT)
93 val = INTVAL (x);
94 else if (width <= 2 * HOST_BITS_PER_WIDE_INT
95 && GET_CODE (x) == CONST_DOUBLE
96 && CONST_DOUBLE_LOW (x) == 0)
97 {
98 val = CONST_DOUBLE_HIGH (x);
99 width -= HOST_BITS_PER_WIDE_INT;
100 }
101 else
102 return false;
103
104 if (width < HOST_BITS_PER_WIDE_INT)
105 val &= ((unsigned HOST_WIDE_INT) 1 << width) - 1;
106 return val == ((unsigned HOST_WIDE_INT) 1 << (width - 1));
107}
108
109/* Make a binary operation by properly ordering the operands and
110 seeing if the expression folds. */
111
112rtx
113simplify_gen_binary (enum rtx_code code, enum machine_mode mode, rtx op0,
114 rtx op1)
115{
116 rtx tem;
117
118 /* If this simplifies, do it. */
119 tem = simplify_binary_operation (code, mode, op0, op1);
120 if (tem)
121 return tem;
122
123 /* Put complex operands first and constants second if commutative. */
124 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
125 && swap_commutative_operands_p (op0, op1))
126 tem = op0, op0 = op1, op1 = tem;
127
128 return gen_rtx_fmt_ee (code, mode, op0, op1);
129}
130
131/* If X is a MEM referencing the constant pool, return the real value.
132 Otherwise return X. */
133rtx
134avoid_constant_pool_reference (rtx x)
135{
136 rtx c, tmp, addr;
137 enum machine_mode cmode;
138 HOST_WIDE_INT offset = 0;
139
140 switch (GET_CODE (x))
141 {
142 case MEM:
143 break;
144
145 case FLOAT_EXTEND:
146 /* Handle float extensions of constant pool references. */
147 tmp = XEXP (x, 0);
148 c = avoid_constant_pool_reference (tmp);
149 if (c != tmp && GET_CODE (c) == CONST_DOUBLE)
150 {
151 REAL_VALUE_TYPE d;
152
153 REAL_VALUE_FROM_CONST_DOUBLE (d, c);
154 return CONST_DOUBLE_FROM_REAL_VALUE (d, GET_MODE (x));
155 }
156 return x;
157
158 default:
159 return x;
160 }
161
162 addr = XEXP (x, 0);
163
164 /* Call target hook to avoid the effects of -fpic etc.... */
165 addr = targetm.delegitimize_address (addr);
166
167 /* Split the address into a base and integer offset. */
168 if (GET_CODE (addr) == CONST
169 && GET_CODE (XEXP (addr, 0)) == PLUS
170 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
171 {
172 offset = INTVAL (XEXP (XEXP (addr, 0), 1));
173 addr = XEXP (XEXP (addr, 0), 0);
174 }
175
176 if (GET_CODE (addr) == LO_SUM)
177 addr = XEXP (addr, 1);
178
179 /* If this is a constant pool reference, we can turn it into its
180 constant and hope that simplifications happen. */
181 if (GET_CODE (addr) == SYMBOL_REF
182 && CONSTANT_POOL_ADDRESS_P (addr))
183 {
184 c = get_pool_constant (addr);
185 cmode = get_pool_mode (addr);
186
187 /* If we're accessing the constant in a different mode than it was
188 originally stored, attempt to fix that up via subreg simplifications.
189 If that fails we have no choice but to return the original memory. */
190 if (offset != 0 || cmode != GET_MODE (x))
191 {
192 rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
193 if (tem && CONSTANT_P (tem))
194 return tem;
195 }
196 else
197 return c;
198 }
199
200 return x;
201}
202
203/* Return true if X is a MEM referencing the constant pool. */
204
205bool
206constant_pool_reference_p (rtx x)
207{
208 return avoid_constant_pool_reference (x) != x;
209}
210
211/* Make a unary operation by first seeing if it folds and otherwise making
212 the specified operation. */
213
214rtx
215simplify_gen_unary (enum rtx_code code, enum machine_mode mode, rtx op,
216 enum machine_mode op_mode)
217{
218 rtx tem;
219
220 /* If this simplifies, use it. */
221 if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
222 return tem;
223
224 return gen_rtx_fmt_e (code, mode, op);
225}
226
227/* Likewise for ternary operations. */
228
229rtx
230simplify_gen_ternary (enum rtx_code code, enum machine_mode mode,
231 enum machine_mode op0_mode, rtx op0, rtx op1, rtx op2)
232{
233 rtx tem;
234
235 /* If this simplifies, use it. */
236 if (0 != (tem = simplify_ternary_operation (code, mode, op0_mode,
237 op0, op1, op2)))
238 return tem;
239
240 return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
241}
242
243/* Likewise, for relational operations.
244 CMP_MODE specifies mode comparison is done in. */
245
246rtx
247simplify_gen_relational (enum rtx_code code, enum machine_mode mode,
248 enum machine_mode cmp_mode, rtx op0, rtx op1)
249{
250 rtx tem;
251
252 if (0 != (tem = simplify_relational_operation (code, mode, cmp_mode,
253 op0, op1)))
254 return tem;
255
256 return gen_rtx_fmt_ee (code, mode, op0, op1);
257}
258
259/* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
261
262rtx
263simplify_replace_rtx (rtx x, rtx old_rtx, rtx new_rtx)
264{
265 enum rtx_code code = GET_CODE (x);
266 enum machine_mode mode = GET_MODE (x);
267 enum machine_mode op_mode;
268 rtx op0, op1, op2;
269
270 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
271 to build a new expression substituting recursively. If we can't do
272 anything, return our input. */
273
274 if (x == old_rtx)
275 return new_rtx;
276
277 switch (GET_RTX_CLASS (code))
278 {
279 case RTX_UNARY:
280 op0 = XEXP (x, 0);
281 op_mode = GET_MODE (op0);
282 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
283 if (op0 == XEXP (x, 0))
284 return x;
285 return simplify_gen_unary (code, mode, op0, op_mode);
286
287 case RTX_BIN_ARITH:
288 case RTX_COMM_ARITH:
289 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
290 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
291 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
292 return x;
293 return simplify_gen_binary (code, mode, op0, op1);
294
295 case RTX_COMPARE:
296 case RTX_COMM_COMPARE:
297 op0 = XEXP (x, 0);
298 op1 = XEXP (x, 1);
299 op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
300 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
301 op1 = simplify_replace_rtx (op1, old_rtx, new_rtx);
302 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
303 return x;
304 return simplify_gen_relational (code, mode, op_mode, op0, op1);
305
306 case RTX_TERNARY:
307 case RTX_BITFIELD_OPS:
308 op0 = XEXP (x, 0);
309 op_mode = GET_MODE (op0);
310 op0 = simplify_replace_rtx (op0, old_rtx, new_rtx);
311 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
312 op2 = simplify_replace_rtx (XEXP (x, 2), old_rtx, new_rtx);
313 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
314 return x;
315 if (op_mode == VOIDmode)
316 op_mode = GET_MODE (op0);
317 return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
318
319 case RTX_EXTRA:
320 /* The only case we try to handle is a SUBREG. */
321 if (code == SUBREG)
322 {
323 op0 = simplify_replace_rtx (SUBREG_REG (x), old_rtx, new_rtx);
324 if (op0 == SUBREG_REG (x))
325 return x;
326 op0 = simplify_gen_subreg (GET_MODE (x), op0,
327 GET_MODE (SUBREG_REG (x)),
328 SUBREG_BYTE (x));
329 return op0 ? op0 : x;
330 }
331 break;
332
333 case RTX_OBJ:
334 if (code == MEM)
335 {
336 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
337 if (op0 == XEXP (x, 0))
338 return x;
339 return replace_equiv_address_nv (x, op0);
340 }
341 else if (code == LO_SUM)
342 {
343 op0 = simplify_replace_rtx (XEXP (x, 0), old_rtx, new_rtx);
344 op1 = simplify_replace_rtx (XEXP (x, 1), old_rtx, new_rtx);
345
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0) == HIGH && rtx_equal_p (XEXP (op0, 0), op1))
348 return op1;
349
350 if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
351 return x;
352 return gen_rtx_LO_SUM (mode, op0, op1);
353 }
354 else if (code == REG)
355 {
356 if (rtx_equal_p (x, old_rtx))
357 return new_rtx;
358 }
359 break;
360
361 default:
362 break;
363 }
364 return x;
365}
366
367/* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
370rtx
371simplify_unary_operation (enum rtx_code code, enum machine_mode mode,
372 rtx op, enum machine_mode op_mode)
373{
374 rtx trueop, tem;
375
376 if (GET_CODE (op) == CONST)
377 op = XEXP (op, 0);
378
379 trueop = avoid_constant_pool_reference (op);
380
381 tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
382 if (tem)
383 return tem;
384
385 return simplify_unary_operation_1 (code, mode, op);
386}
387
388/* Perform some simplifications we can do even if the operands
389 aren't constant. */
390static rtx
391simplify_unary_operation_1 (enum rtx_code code, enum machine_mode mode, rtx op)
392{
393 enum rtx_code reversed;
394 rtx temp;
395
396 switch (code)
397 {
398 case NOT:
399 /* (not (not X)) == X. */
400 if (GET_CODE (op) == NOT)
401 return XEXP (op, 0);
402
403 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
404 comparison is all ones. */
405 if (COMPARISON_P (op)
406 && (mode == BImode || STORE_FLAG_VALUE == -1)
407 && ((reversed = reversed_comparison_code (op, NULL_RTX)) != UNKNOWN))
408 return simplify_gen_relational (reversed, mode, VOIDmode,
409 XEXP (op, 0), XEXP (op, 1));
410
411 /* (not (plus X -1)) can become (neg X). */
412 if (GET_CODE (op) == PLUS
413 && XEXP (op, 1) == constm1_rtx)
414 return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
415
416 /* Similarly, (not (neg X)) is (plus X -1). */
417 if (GET_CODE (op) == NEG)
418 return plus_constant (XEXP (op, 0), -1);
419
420 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
421 if (GET_CODE (op) == XOR
422 && GET_CODE (XEXP (op, 1)) == CONST_INT
423 && (temp = simplify_unary_operation (NOT, mode,
424 XEXP (op, 1), mode)) != 0)
425 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
426
427 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
428 if (GET_CODE (op) == PLUS
429 && GET_CODE (XEXP (op, 1)) == CONST_INT
430 && mode_signbit_p (mode, XEXP (op, 1))
431 && (temp = simplify_unary_operation (NOT, mode,
432 XEXP (op, 1), mode)) != 0)
433 return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
434
435
436 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
437 operands other than 1, but that is not valid. We could do a
438 similar simplification for (not (lshiftrt C X)) where C is
439 just the sign bit, but this doesn't seem common enough to
440 bother with. */
441 if (GET_CODE (op) == ASHIFT
442 && XEXP (op, 0) == const1_rtx)
443 {
444 temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
445 return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
446 }
447
448 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
449 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
450 so we can perform the above simplification. */
451
452 if (STORE_FLAG_VALUE == -1
453 && GET_CODE (op) == ASHIFTRT
454 && GET_CODE (XEXP (op, 1)) == CONST_INT
455 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
456 return simplify_gen_relational (GE, mode, VOIDmode,
457 XEXP (op, 0), const0_rtx);
458
459
460 if (GET_CODE (op) == SUBREG
461 && subreg_lowpart_p (op)
462 && (GET_MODE_SIZE (GET_MODE (op))
463 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op))))
464 && GET_CODE (SUBREG_REG (op)) == ASHIFT
465 && XEXP (SUBREG_REG (op), 0) == const1_rtx)
466 {
467 enum machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
468 rtx x;
469
470 x = gen_rtx_ROTATE (inner_mode,
471 simplify_gen_unary (NOT, inner_mode, const1_rtx,
472 inner_mode),
473 XEXP (SUBREG_REG (op), 1));
474 return rtl_hooks.gen_lowpart_no_emit (mode, x);
475 }
476
477 /* Apply De Morgan's laws to reduce number of patterns for machines
478 with negating logical insns (and-not, nand, etc.). If result has
479 only one NOT, put it first, since that is how the patterns are
480 coded. */
481
482 if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
483 {
484 rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
485 enum machine_mode op_mode;
486
487 op_mode = GET_MODE (in1);
488 in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
489
490 op_mode = GET_MODE (in2);
491 if (op_mode == VOIDmode)
492 op_mode = mode;
493 in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
494
495 if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
496 {
497 rtx tem = in2;
498 in2 = in1; in1 = tem;
499 }
500
501 return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
502 mode, in1, in2);
503 }
504 break;
505
506 case NEG:
507 /* (neg (neg X)) == X. */
508 if (GET_CODE (op) == NEG)
509 return XEXP (op, 0);
510
511 /* (neg (plus X 1)) can become (not X). */
512 if (GET_CODE (op) == PLUS
513 && XEXP (op, 1) == const1_rtx)
514 return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
515
516 /* Similarly, (neg (not X)) is (plus X 1). */
517 if (GET_CODE (op) == NOT)
518 return plus_constant (XEXP (op, 0), 1);
519
520 /* (neg (minus X Y)) can become (minus Y X). This transformation
521 isn't safe for modes with signed zeros, since if X and Y are
522 both +0, (minus Y X) is the same as (minus X Y). If the
523 rounding mode is towards +infinity (or -infinity) then the two
524 expressions will be rounded differently. */
525 if (GET_CODE (op) == MINUS
526 && !HONOR_SIGNED_ZEROS (mode)
527 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
528 return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
529
530 if (GET_CODE (op) == PLUS
531 && !HONOR_SIGNED_ZEROS (mode)
532 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
533 {
534 /* (neg (plus A C)) is simplified to (minus -C A). */
535 if (GET_CODE (XEXP (op, 1)) == CONST_INT
536 || GET_CODE (XEXP (op, 1)) == CONST_DOUBLE)
537 {
538 temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
539 if (temp)
540 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
541 }
542
543 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
544 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
545 return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
546 }
547
548 /* (neg (mult A B)) becomes (mult (neg A) B).
549 This works even for floating-point values. */
550 if (GET_CODE (op) == MULT
551 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
552 {
553 temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
554 return simplify_gen_binary (MULT, mode, temp, XEXP (op, 1));
555 }
556
557 /* NEG commutes with ASHIFT since it is multiplication. Only do
558 this if we can then eliminate the NEG (e.g., if the operand
559 is a constant). */
560 if (GET_CODE (op) == ASHIFT)
561 {
562 temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
563 if (temp)
564 return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
565 }
566
567 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
568 C is equal to the width of MODE minus 1. */
569 if (GET_CODE (op) == ASHIFTRT
570 && GET_CODE (XEXP (op, 1)) == CONST_INT
571 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
572 return simplify_gen_binary (LSHIFTRT, mode,
573 XEXP (op, 0), XEXP (op, 1));
574
575 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
576 C is equal to the width of MODE minus 1. */
577 if (GET_CODE (op) == LSHIFTRT
578 && GET_CODE (XEXP (op, 1)) == CONST_INT
579 && INTVAL (XEXP (op, 1)) == GET_MODE_BITSIZE (mode) - 1)
580 return simplify_gen_binary (ASHIFTRT, mode,
581 XEXP (op, 0), XEXP (op, 1));
582
583 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
584 if (GET_CODE (op) == XOR
585 && XEXP (op, 1) == const1_rtx
586 && nonzero_bits (XEXP (op, 0), mode) == 1)
587 return plus_constant (XEXP (op, 0), -1);
588
589 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
590 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
591 if (GET_CODE (op) == LT
592 && XEXP (op, 1) == const0_rtx)
592 && XEXP (op, 1) == const0_rtx
593 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op, 0))))
593 {
594 enum machine_mode inner = GET_MODE (XEXP (op, 0));
595 int isize = GET_MODE_BITSIZE (inner);
596 if (STORE_FLAG_VALUE == 1)
597 {
598 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
599 GEN_INT (isize - 1));
600 if (mode == inner)
601 return temp;
602 if (GET_MODE_BITSIZE (mode) > isize)
603 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
604 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
605 }
606 else if (STORE_FLAG_VALUE == -1)
607 {
608 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
609 GEN_INT (isize - 1));
610 if (mode == inner)
611 return temp;
612 if (GET_MODE_BITSIZE (mode) > isize)
613 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
614 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
615 }
616 }
617 break;
618
619 case TRUNCATE:
620 /* We can't handle truncation to a partial integer mode here
621 because we don't know the real bitsize of the partial
622 integer mode. */
623 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
624 break;
625
626 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
627 if ((GET_CODE (op) == SIGN_EXTEND
628 || GET_CODE (op) == ZERO_EXTEND)
629 && GET_MODE (XEXP (op, 0)) == mode)
630 return XEXP (op, 0);
631
632 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
633 (OP:SI foo:SI) if OP is NEG or ABS. */
634 if ((GET_CODE (op) == ABS
635 || GET_CODE (op) == NEG)
636 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
637 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
638 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
639 return simplify_gen_unary (GET_CODE (op), mode,
640 XEXP (XEXP (op, 0), 0), mode);
641
642 /* (truncate:A (subreg:B (truncate:C X) 0)) is
643 (truncate:A X). */
644 if (GET_CODE (op) == SUBREG
645 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
646 && subreg_lowpart_p (op))
647 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
648 GET_MODE (XEXP (SUBREG_REG (op), 0)));
649
650 /* If we know that the value is already truncated, we can
651 replace the TRUNCATE with a SUBREG. Note that this is also
652 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
653 modes we just have to apply a different definition for
654 truncation. But don't do this for an (LSHIFTRT (MULT ...))
655 since this will cause problems with the umulXi3_highpart
656 patterns. */
657 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
658 GET_MODE_BITSIZE (GET_MODE (op)))
659 ? (num_sign_bit_copies (op, GET_MODE (op))
660 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
661 - GET_MODE_BITSIZE (mode)))
662 : truncated_to_mode (mode, op))
663 && ! (GET_CODE (op) == LSHIFTRT
664 && GET_CODE (XEXP (op, 0)) == MULT))
665 return rtl_hooks.gen_lowpart_no_emit (mode, op);
666
667 /* A truncate of a comparison can be replaced with a subreg if
668 STORE_FLAG_VALUE permits. This is like the previous test,
669 but it works even if the comparison is done in a mode larger
670 than HOST_BITS_PER_WIDE_INT. */
671 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
672 && COMPARISON_P (op)
673 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
674 return rtl_hooks.gen_lowpart_no_emit (mode, op);
675 break;
676
677 case FLOAT_TRUNCATE:
678 if (DECIMAL_FLOAT_MODE_P (mode))
679 break;
680
681 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
682 if (GET_CODE (op) == FLOAT_EXTEND
683 && GET_MODE (XEXP (op, 0)) == mode)
684 return XEXP (op, 0);
685
686 /* (float_truncate:SF (float_truncate:DF foo:XF))
687 = (float_truncate:SF foo:XF).
688 This may eliminate double rounding, so it is unsafe.
689
690 (float_truncate:SF (float_extend:XF foo:DF))
691 = (float_truncate:SF foo:DF).
692
693 (float_truncate:DF (float_extend:XF foo:SF))
694 = (float_extend:SF foo:DF). */
695 if ((GET_CODE (op) == FLOAT_TRUNCATE
696 && flag_unsafe_math_optimizations)
697 || GET_CODE (op) == FLOAT_EXTEND)
698 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
699 0)))
700 > GET_MODE_SIZE (mode)
701 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
702 mode,
703 XEXP (op, 0), mode);
704
705 /* (float_truncate (float x)) is (float x) */
706 if (GET_CODE (op) == FLOAT
707 && (flag_unsafe_math_optimizations
708 || ((unsigned)significand_size (GET_MODE (op))
709 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
710 - num_sign_bit_copies (XEXP (op, 0),
711 GET_MODE (XEXP (op, 0)))))))
712 return simplify_gen_unary (FLOAT, mode,
713 XEXP (op, 0),
714 GET_MODE (XEXP (op, 0)));
715
716 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
717 (OP:SF foo:SF) if OP is NEG or ABS. */
718 if ((GET_CODE (op) == ABS
719 || GET_CODE (op) == NEG)
720 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
721 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
722 return simplify_gen_unary (GET_CODE (op), mode,
723 XEXP (XEXP (op, 0), 0), mode);
724
725 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
726 is (float_truncate:SF x). */
727 if (GET_CODE (op) == SUBREG
728 && subreg_lowpart_p (op)
729 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
730 return SUBREG_REG (op);
731 break;
732
733 case FLOAT_EXTEND:
734 if (DECIMAL_FLOAT_MODE_P (mode))
735 break;
736
737 /* (float_extend (float_extend x)) is (float_extend x)
738
739 (float_extend (float x)) is (float x) assuming that double
740 rounding can't happen.
741 */
742 if (GET_CODE (op) == FLOAT_EXTEND
743 || (GET_CODE (op) == FLOAT
744 && ((unsigned)significand_size (GET_MODE (op))
745 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
746 - num_sign_bit_copies (XEXP (op, 0),
747 GET_MODE (XEXP (op, 0)))))))
748 return simplify_gen_unary (GET_CODE (op), mode,
749 XEXP (op, 0),
750 GET_MODE (XEXP (op, 0)));
751
752 break;
753
754 case ABS:
755 /* (abs (neg <foo>)) -> (abs <foo>) */
756 if (GET_CODE (op) == NEG)
757 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
758 GET_MODE (XEXP (op, 0)));
759
760 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
761 do nothing. */
762 if (GET_MODE (op) == VOIDmode)
763 break;
764
765 /* If operand is something known to be positive, ignore the ABS. */
766 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
767 || ((GET_MODE_BITSIZE (GET_MODE (op))
768 <= HOST_BITS_PER_WIDE_INT)
769 && ((nonzero_bits (op, GET_MODE (op))
770 & ((HOST_WIDE_INT) 1
771 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
772 == 0)))
773 return op;
774
775 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
776 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
777 return gen_rtx_NEG (mode, op);
778
779 break;
780
781 case FFS:
782 /* (ffs (*_extend <X>)) = (ffs <X>) */
783 if (GET_CODE (op) == SIGN_EXTEND
784 || GET_CODE (op) == ZERO_EXTEND)
785 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
786 GET_MODE (XEXP (op, 0)));
787 break;
788
789 case POPCOUNT:
790 case PARITY:
791 /* (pop* (zero_extend <X>)) = (pop* <X>) */
792 if (GET_CODE (op) == ZERO_EXTEND)
793 return simplify_gen_unary (code, mode, XEXP (op, 0),
794 GET_MODE (XEXP (op, 0)));
795 break;
796
797 case FLOAT:
798 /* (float (sign_extend <X>)) = (float <X>). */
799 if (GET_CODE (op) == SIGN_EXTEND)
800 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
801 GET_MODE (XEXP (op, 0)));
802 break;
803
804 case SIGN_EXTEND:
805 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
806 becomes just the MINUS if its mode is MODE. This allows
807 folding switch statements on machines using casesi (such as
808 the VAX). */
809 if (GET_CODE (op) == TRUNCATE
810 && GET_MODE (XEXP (op, 0)) == mode
811 && GET_CODE (XEXP (op, 0)) == MINUS
812 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
813 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
814 return XEXP (op, 0);
815
816 /* Check for a sign extension of a subreg of a promoted
817 variable, where the promotion is sign-extended, and the
818 target mode is the same as the variable's promotion. */
819 if (GET_CODE (op) == SUBREG
820 && SUBREG_PROMOTED_VAR_P (op)
821 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
822 && GET_MODE (XEXP (op, 0)) == mode)
823 return XEXP (op, 0);
824
825#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
826 if (! POINTERS_EXTEND_UNSIGNED
827 && mode == Pmode && GET_MODE (op) == ptr_mode
828 && (CONSTANT_P (op)
829 || (GET_CODE (op) == SUBREG
830 && REG_P (SUBREG_REG (op))
831 && REG_POINTER (SUBREG_REG (op))
832 && GET_MODE (SUBREG_REG (op)) == Pmode)))
833 return convert_memory_address (Pmode, op);
834#endif
835 break;
836
837 case ZERO_EXTEND:
838 /* Check for a zero extension of a subreg of a promoted
839 variable, where the promotion is zero-extended, and the
840 target mode is the same as the variable's promotion. */
841 if (GET_CODE (op) == SUBREG
842 && SUBREG_PROMOTED_VAR_P (op)
843 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
844 && GET_MODE (XEXP (op, 0)) == mode)
845 return XEXP (op, 0);
846
847#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
848 if (POINTERS_EXTEND_UNSIGNED > 0
849 && mode == Pmode && GET_MODE (op) == ptr_mode
850 && (CONSTANT_P (op)
851 || (GET_CODE (op) == SUBREG
852 && REG_P (SUBREG_REG (op))
853 && REG_POINTER (SUBREG_REG (op))
854 && GET_MODE (SUBREG_REG (op)) == Pmode)))
855 return convert_memory_address (Pmode, op);
856#endif
857 break;
858
859 default:
860 break;
861 }
862
863 return 0;
864}
865
866/* Try to compute the value of a unary operation CODE whose output mode is to
867 be MODE with input operand OP whose mode was originally OP_MODE.
868 Return zero if the value cannot be computed. */
869rtx
870simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
871 rtx op, enum machine_mode op_mode)
872{
873 unsigned int width = GET_MODE_BITSIZE (mode);
874
875 if (code == VEC_DUPLICATE)
876 {
877 gcc_assert (VECTOR_MODE_P (mode));
878 if (GET_MODE (op) != VOIDmode)
879 {
880 if (!VECTOR_MODE_P (GET_MODE (op)))
881 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
882 else
883 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
884 (GET_MODE (op)));
885 }
886 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
887 || GET_CODE (op) == CONST_VECTOR)
888 {
889 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
890 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
891 rtvec v = rtvec_alloc (n_elts);
892 unsigned int i;
893
894 if (GET_CODE (op) != CONST_VECTOR)
895 for (i = 0; i < n_elts; i++)
896 RTVEC_ELT (v, i) = op;
897 else
898 {
899 enum machine_mode inmode = GET_MODE (op);
900 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
901 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
902
903 gcc_assert (in_n_elts < n_elts);
904 gcc_assert ((n_elts % in_n_elts) == 0);
905 for (i = 0; i < n_elts; i++)
906 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
907 }
908 return gen_rtx_CONST_VECTOR (mode, v);
909 }
910 }
911
912 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
913 {
914 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
915 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
916 enum machine_mode opmode = GET_MODE (op);
917 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
918 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
919 rtvec v = rtvec_alloc (n_elts);
920 unsigned int i;
921
922 gcc_assert (op_n_elts == n_elts);
923 for (i = 0; i < n_elts; i++)
924 {
925 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
926 CONST_VECTOR_ELT (op, i),
927 GET_MODE_INNER (opmode));
928 if (!x)
929 return 0;
930 RTVEC_ELT (v, i) = x;
931 }
932 return gen_rtx_CONST_VECTOR (mode, v);
933 }
934
935 /* The order of these tests is critical so that, for example, we don't
936 check the wrong mode (input vs. output) for a conversion operation,
937 such as FIX. At some point, this should be simplified. */
938
939 if (code == FLOAT && GET_MODE (op) == VOIDmode
940 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
941 {
942 HOST_WIDE_INT hv, lv;
943 REAL_VALUE_TYPE d;
944
945 if (GET_CODE (op) == CONST_INT)
946 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
947 else
948 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
949
950 REAL_VALUE_FROM_INT (d, lv, hv, mode);
951 d = real_value_truncate (mode, d);
952 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
953 }
954 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
955 && (GET_CODE (op) == CONST_DOUBLE
956 || GET_CODE (op) == CONST_INT))
957 {
958 HOST_WIDE_INT hv, lv;
959 REAL_VALUE_TYPE d;
960
961 if (GET_CODE (op) == CONST_INT)
962 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
963 else
964 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
965
966 if (op_mode == VOIDmode)
967 {
968 /* We don't know how to interpret negative-looking numbers in
969 this case, so don't try to fold those. */
970 if (hv < 0)
971 return 0;
972 }
973 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
974 ;
975 else
976 hv = 0, lv &= GET_MODE_MASK (op_mode);
977
978 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
979 d = real_value_truncate (mode, d);
980 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
981 }
982
983 if (GET_CODE (op) == CONST_INT
984 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
985 {
986 HOST_WIDE_INT arg0 = INTVAL (op);
987 HOST_WIDE_INT val;
988
989 switch (code)
990 {
991 case NOT:
992 val = ~ arg0;
993 break;
994
995 case NEG:
996 val = - arg0;
997 break;
998
999 case ABS:
1000 val = (arg0 >= 0 ? arg0 : - arg0);
1001 break;
1002
1003 case FFS:
1004 /* Don't use ffs here. Instead, get low order bit and then its
1005 number. If arg0 is zero, this will return 0, as desired. */
1006 arg0 &= GET_MODE_MASK (mode);
1007 val = exact_log2 (arg0 & (- arg0)) + 1;
1008 break;
1009
1010 case CLZ:
1011 arg0 &= GET_MODE_MASK (mode);
1012 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1013 ;
1014 else
1015 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1016 break;
1017
1018 case CTZ:
1019 arg0 &= GET_MODE_MASK (mode);
1020 if (arg0 == 0)
1021 {
1022 /* Even if the value at zero is undefined, we have to come
1023 up with some replacement. Seems good enough. */
1024 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1025 val = GET_MODE_BITSIZE (mode);
1026 }
1027 else
1028 val = exact_log2 (arg0 & -arg0);
1029 break;
1030
1031 case POPCOUNT:
1032 arg0 &= GET_MODE_MASK (mode);
1033 val = 0;
1034 while (arg0)
1035 val++, arg0 &= arg0 - 1;
1036 break;
1037
1038 case PARITY:
1039 arg0 &= GET_MODE_MASK (mode);
1040 val = 0;
1041 while (arg0)
1042 val++, arg0 &= arg0 - 1;
1043 val &= 1;
1044 break;
1045
1046 case TRUNCATE:
1047 val = arg0;
1048 break;
1049
1050 case ZERO_EXTEND:
1051 /* When zero-extending a CONST_INT, we need to know its
1052 original mode. */
1053 gcc_assert (op_mode != VOIDmode);
1054 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1055 {
1056 /* If we were really extending the mode,
1057 we would have to distinguish between zero-extension
1058 and sign-extension. */
1059 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1060 val = arg0;
1061 }
1062 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1063 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1064 else
1065 return 0;
1066 break;
1067
1068 case SIGN_EXTEND:
1069 if (op_mode == VOIDmode)
1070 op_mode = mode;
1071 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1072 {
1073 /* If we were really extending the mode,
1074 we would have to distinguish between zero-extension
1075 and sign-extension. */
1076 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1077 val = arg0;
1078 }
1079 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1080 {
1081 val
1082 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1083 if (val
1084 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1085 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1086 }
1087 else
1088 return 0;
1089 break;
1090
1091 case SQRT:
1092 case FLOAT_EXTEND:
1093 case FLOAT_TRUNCATE:
1094 case SS_TRUNCATE:
1095 case US_TRUNCATE:
1096 case SS_NEG:
1097 return 0;
1098
1099 default:
1100 gcc_unreachable ();
1101 }
1102
1103 return gen_int_mode (val, mode);
1104 }
1105
1106 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1107 for a DImode operation on a CONST_INT. */
1108 else if (GET_MODE (op) == VOIDmode
1109 && width <= HOST_BITS_PER_WIDE_INT * 2
1110 && (GET_CODE (op) == CONST_DOUBLE
1111 || GET_CODE (op) == CONST_INT))
1112 {
1113 unsigned HOST_WIDE_INT l1, lv;
1114 HOST_WIDE_INT h1, hv;
1115
1116 if (GET_CODE (op) == CONST_DOUBLE)
1117 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1118 else
1119 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1120
1121 switch (code)
1122 {
1123 case NOT:
1124 lv = ~ l1;
1125 hv = ~ h1;
1126 break;
1127
1128 case NEG:
1129 neg_double (l1, h1, &lv, &hv);
1130 break;
1131
1132 case ABS:
1133 if (h1 < 0)
1134 neg_double (l1, h1, &lv, &hv);
1135 else
1136 lv = l1, hv = h1;
1137 break;
1138
1139 case FFS:
1140 hv = 0;
1141 if (l1 == 0)
1142 {
1143 if (h1 == 0)
1144 lv = 0;
1145 else
1146 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1147 }
1148 else
1149 lv = exact_log2 (l1 & -l1) + 1;
1150 break;
1151
1152 case CLZ:
1153 hv = 0;
1154 if (h1 != 0)
1155 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1156 - HOST_BITS_PER_WIDE_INT;
1157 else if (l1 != 0)
1158 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1159 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1160 lv = GET_MODE_BITSIZE (mode);
1161 break;
1162
1163 case CTZ:
1164 hv = 0;
1165 if (l1 != 0)
1166 lv = exact_log2 (l1 & -l1);
1167 else if (h1 != 0)
1168 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1169 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1170 lv = GET_MODE_BITSIZE (mode);
1171 break;
1172
1173 case POPCOUNT:
1174 hv = 0;
1175 lv = 0;
1176 while (l1)
1177 lv++, l1 &= l1 - 1;
1178 while (h1)
1179 lv++, h1 &= h1 - 1;
1180 break;
1181
1182 case PARITY:
1183 hv = 0;
1184 lv = 0;
1185 while (l1)
1186 lv++, l1 &= l1 - 1;
1187 while (h1)
1188 lv++, h1 &= h1 - 1;
1189 lv &= 1;
1190 break;
1191
1192 case TRUNCATE:
1193 /* This is just a change-of-mode, so do nothing. */
1194 lv = l1, hv = h1;
1195 break;
1196
1197 case ZERO_EXTEND:
1198 gcc_assert (op_mode != VOIDmode);
1199
1200 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1201 return 0;
1202
1203 hv = 0;
1204 lv = l1 & GET_MODE_MASK (op_mode);
1205 break;
1206
1207 case SIGN_EXTEND:
1208 if (op_mode == VOIDmode
1209 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1210 return 0;
1211 else
1212 {
1213 lv = l1 & GET_MODE_MASK (op_mode);
1214 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1215 && (lv & ((HOST_WIDE_INT) 1
1216 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1217 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1218
1219 hv = HWI_SIGN_EXTEND (lv);
1220 }
1221 break;
1222
1223 case SQRT:
1224 return 0;
1225
1226 default:
1227 return 0;
1228 }
1229
1230 return immed_double_const (lv, hv, mode);
1231 }
1232
1233 else if (GET_CODE (op) == CONST_DOUBLE
1234 && SCALAR_FLOAT_MODE_P (mode))
1235 {
1236 REAL_VALUE_TYPE d, t;
1237 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1238
1239 switch (code)
1240 {
1241 case SQRT:
1242 if (HONOR_SNANS (mode) && real_isnan (&d))
1243 return 0;
1244 real_sqrt (&t, mode, &d);
1245 d = t;
1246 break;
1247 case ABS:
1248 d = REAL_VALUE_ABS (d);
1249 break;
1250 case NEG:
1251 d = REAL_VALUE_NEGATE (d);
1252 break;
1253 case FLOAT_TRUNCATE:
1254 d = real_value_truncate (mode, d);
1255 break;
1256 case FLOAT_EXTEND:
1257 /* All this does is change the mode. */
1258 break;
1259 case FIX:
1260 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1261 break;
1262 case NOT:
1263 {
1264 long tmp[4];
1265 int i;
1266
1267 real_to_target (tmp, &d, GET_MODE (op));
1268 for (i = 0; i < 4; i++)
1269 tmp[i] = ~tmp[i];
1270 real_from_target (&d, tmp, mode);
1271 break;
1272 }
1273 default:
1274 gcc_unreachable ();
1275 }
1276 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1277 }
1278
1279 else if (GET_CODE (op) == CONST_DOUBLE
1280 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1281 && GET_MODE_CLASS (mode) == MODE_INT
1282 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1283 {
1284 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1285 operators are intentionally left unspecified (to ease implementation
1286 by target backends), for consistency, this routine implements the
1287 same semantics for constant folding as used by the middle-end. */
1288
1289 /* This was formerly used only for non-IEEE float.
1290 eggert@twinsun.com says it is safe for IEEE also. */
1291 HOST_WIDE_INT xh, xl, th, tl;
1292 REAL_VALUE_TYPE x, t;
1293 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1294 switch (code)
1295 {
1296 case FIX:
1297 if (REAL_VALUE_ISNAN (x))
1298 return const0_rtx;
1299
1300 /* Test against the signed upper bound. */
1301 if (width > HOST_BITS_PER_WIDE_INT)
1302 {
1303 th = ((unsigned HOST_WIDE_INT) 1
1304 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1305 tl = -1;
1306 }
1307 else
1308 {
1309 th = 0;
1310 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1311 }
1312 real_from_integer (&t, VOIDmode, tl, th, 0);
1313 if (REAL_VALUES_LESS (t, x))
1314 {
1315 xh = th;
1316 xl = tl;
1317 break;
1318 }
1319
1320 /* Test against the signed lower bound. */
1321 if (width > HOST_BITS_PER_WIDE_INT)
1322 {
1323 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1324 tl = 0;
1325 }
1326 else
1327 {
1328 th = -1;
1329 tl = (HOST_WIDE_INT) -1 << (width - 1);
1330 }
1331 real_from_integer (&t, VOIDmode, tl, th, 0);
1332 if (REAL_VALUES_LESS (x, t))
1333 {
1334 xh = th;
1335 xl = tl;
1336 break;
1337 }
1338 REAL_VALUE_TO_INT (&xl, &xh, x);
1339 break;
1340
1341 case UNSIGNED_FIX:
1342 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1343 return const0_rtx;
1344
1345 /* Test against the unsigned upper bound. */
1346 if (width == 2*HOST_BITS_PER_WIDE_INT)
1347 {
1348 th = -1;
1349 tl = -1;
1350 }
1351 else if (width >= HOST_BITS_PER_WIDE_INT)
1352 {
1353 th = ((unsigned HOST_WIDE_INT) 1
1354 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1355 tl = -1;
1356 }
1357 else
1358 {
1359 th = 0;
1360 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1361 }
1362 real_from_integer (&t, VOIDmode, tl, th, 1);
1363 if (REAL_VALUES_LESS (t, x))
1364 {
1365 xh = th;
1366 xl = tl;
1367 break;
1368 }
1369
1370 REAL_VALUE_TO_INT (&xl, &xh, x);
1371 break;
1372
1373 default:
1374 gcc_unreachable ();
1375 }
1376 return immed_double_const (xl, xh, mode);
1377 }
1378
1379 return NULL_RTX;
1380}
1381
1382/* Subroutine of simplify_binary_operation to simplify a commutative,
1383 associative binary operation CODE with result mode MODE, operating
1384 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1385 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1386 canonicalization is possible. */
1387
1388static rtx
1389simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1390 rtx op0, rtx op1)
1391{
1392 rtx tem;
1393
1394 /* Linearize the operator to the left. */
1395 if (GET_CODE (op1) == code)
1396 {
1397 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1398 if (GET_CODE (op0) == code)
1399 {
1400 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1401 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1402 }
1403
1404 /* "a op (b op c)" becomes "(b op c) op a". */
1405 if (! swap_commutative_operands_p (op1, op0))
1406 return simplify_gen_binary (code, mode, op1, op0);
1407
1408 tem = op0;
1409 op0 = op1;
1410 op1 = tem;
1411 }
1412
1413 if (GET_CODE (op0) == code)
1414 {
1415 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1416 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1417 {
1418 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1419 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1420 }
1421
1422 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1423 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1424 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1425 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1426 if (tem != 0)
1427 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1428
1429 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1430 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1431 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1432 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1433 if (tem != 0)
1434 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1435 }
1436
1437 return 0;
1438}
1439
1440
1441/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1442 and OP1. Return 0 if no simplification is possible.
1443
1444 Don't use this for relational operations such as EQ or LT.
1445 Use simplify_relational_operation instead. */
1446rtx
1447simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1448 rtx op0, rtx op1)
1449{
1450 rtx trueop0, trueop1;
1451 rtx tem;
1452
1453 /* Relational operations don't work here. We must know the mode
1454 of the operands in order to do the comparison correctly.
1455 Assuming a full word can give incorrect results.
1456 Consider comparing 128 with -128 in QImode. */
1457 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1458 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1459
1460 /* Make sure the constant is second. */
1461 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1462 && swap_commutative_operands_p (op0, op1))
1463 {
1464 tem = op0, op0 = op1, op1 = tem;
1465 }
1466
1467 trueop0 = avoid_constant_pool_reference (op0);
1468 trueop1 = avoid_constant_pool_reference (op1);
1469
1470 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1471 if (tem)
1472 return tem;
1473 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1474}
1475
1476/* Subroutine of simplify_binary_operation. Simplify a binary operation
1477 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1478 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1479 actual constants. */
1480
1481static rtx
1482simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1483 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1484{
1485 rtx tem, reversed, opleft, opright;
1486 HOST_WIDE_INT val;
1487 unsigned int width = GET_MODE_BITSIZE (mode);
1488
1489 /* Even if we can't compute a constant result,
1490 there are some cases worth simplifying. */
1491
1492 switch (code)
1493 {
1494 case PLUS:
1495 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1496 when x is NaN, infinite, or finite and nonzero. They aren't
1497 when x is -0 and the rounding mode is not towards -infinity,
1498 since (-0) + 0 is then 0. */
1499 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1500 return op0;
1501
1502 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1503 transformations are safe even for IEEE. */
1504 if (GET_CODE (op0) == NEG)
1505 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1506 else if (GET_CODE (op1) == NEG)
1507 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1508
1509 /* (~a) + 1 -> -a */
1510 if (INTEGRAL_MODE_P (mode)
1511 && GET_CODE (op0) == NOT
1512 && trueop1 == const1_rtx)
1513 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1514
1515 /* Handle both-operands-constant cases. We can only add
1516 CONST_INTs to constants since the sum of relocatable symbols
1517 can't be handled by most assemblers. Don't add CONST_INT
1518 to CONST_INT since overflow won't be computed properly if wider
1519 than HOST_BITS_PER_WIDE_INT. */
1520
1521 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1522 && GET_CODE (op1) == CONST_INT)
1523 return plus_constant (op0, INTVAL (op1));
1524 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1525 && GET_CODE (op0) == CONST_INT)
1526 return plus_constant (op1, INTVAL (op0));
1527
1528 /* See if this is something like X * C - X or vice versa or
1529 if the multiplication is written as a shift. If so, we can
1530 distribute and make a new multiply, shift, or maybe just
1531 have X (if C is 2 in the example above). But don't make
1532 something more expensive than we had before. */
1533
1534 if (SCALAR_INT_MODE_P (mode))
1535 {
1536 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1537 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1538 rtx lhs = op0, rhs = op1;
1539
1540 if (GET_CODE (lhs) == NEG)
1541 {
1542 coeff0l = -1;
1543 coeff0h = -1;
1544 lhs = XEXP (lhs, 0);
1545 }
1546 else if (GET_CODE (lhs) == MULT
1547 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1548 {
1549 coeff0l = INTVAL (XEXP (lhs, 1));
1550 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1551 lhs = XEXP (lhs, 0);
1552 }
1553 else if (GET_CODE (lhs) == ASHIFT
1554 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1555 && INTVAL (XEXP (lhs, 1)) >= 0
1556 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1557 {
1558 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1559 coeff0h = 0;
1560 lhs = XEXP (lhs, 0);
1561 }
1562
1563 if (GET_CODE (rhs) == NEG)
1564 {
1565 coeff1l = -1;
1566 coeff1h = -1;
1567 rhs = XEXP (rhs, 0);
1568 }
1569 else if (GET_CODE (rhs) == MULT
1570 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1571 {
1572 coeff1l = INTVAL (XEXP (rhs, 1));
1573 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1574 rhs = XEXP (rhs, 0);
1575 }
1576 else if (GET_CODE (rhs) == ASHIFT
1577 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1578 && INTVAL (XEXP (rhs, 1)) >= 0
1579 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1580 {
1581 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1582 coeff1h = 0;
1583 rhs = XEXP (rhs, 0);
1584 }
1585
1586 if (rtx_equal_p (lhs, rhs))
1587 {
1588 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1589 rtx coeff;
1590 unsigned HOST_WIDE_INT l;
1591 HOST_WIDE_INT h;
1592
1593 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1594 coeff = immed_double_const (l, h, mode);
1595
1596 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1597 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1598 ? tem : 0;
1599 }
1600 }
1601
1602 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1603 if ((GET_CODE (op1) == CONST_INT
1604 || GET_CODE (op1) == CONST_DOUBLE)
1605 && GET_CODE (op0) == XOR
1606 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1607 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1608 && mode_signbit_p (mode, op1))
1609 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1610 simplify_gen_binary (XOR, mode, op1,
1611 XEXP (op0, 1)));
1612
1613 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1614 if (GET_CODE (op0) == MULT
1615 && GET_CODE (XEXP (op0, 0)) == NEG)
1616 {
1617 rtx in1, in2;
1618
1619 in1 = XEXP (XEXP (op0, 0), 0);
1620 in2 = XEXP (op0, 1);
1621 return simplify_gen_binary (MINUS, mode, op1,
1622 simplify_gen_binary (MULT, mode,
1623 in1, in2));
1624 }
1625
1626 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1627 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1628 is 1. */
1629 if (COMPARISON_P (op0)
1630 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1631 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1632 && (reversed = reversed_comparison (op0, mode)))
1633 return
1634 simplify_gen_unary (NEG, mode, reversed, mode);
1635
1636 /* If one of the operands is a PLUS or a MINUS, see if we can
1637 simplify this by the associative law.
1638 Don't use the associative law for floating point.
1639 The inaccuracy makes it nonassociative,
1640 and subtle programs can break if operations are associated. */
1641
1642 if (INTEGRAL_MODE_P (mode)
1643 && (plus_minus_operand_p (op0)
1644 || plus_minus_operand_p (op1))
1645 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1646 return tem;
1647
1648 /* Reassociate floating point addition only when the user
1649 specifies unsafe math optimizations. */
1650 if (FLOAT_MODE_P (mode)
1651 && flag_unsafe_math_optimizations)
1652 {
1653 tem = simplify_associative_operation (code, mode, op0, op1);
1654 if (tem)
1655 return tem;
1656 }
1657 break;
1658
1659 case COMPARE:
1660#ifdef HAVE_cc0
1661 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1662 using cc0, in which case we want to leave it as a COMPARE
1663 so we can distinguish it from a register-register-copy.
1664
1665 In IEEE floating point, x-0 is not the same as x. */
1666
1667 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1668 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1669 && trueop1 == CONST0_RTX (mode))
1670 return op0;
1671#endif
1672
1673 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1674 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1675 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1676 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1677 {
1678 rtx xop00 = XEXP (op0, 0);
1679 rtx xop10 = XEXP (op1, 0);
1680
1681#ifdef HAVE_cc0
1682 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1683#else
1684 if (REG_P (xop00) && REG_P (xop10)
1685 && GET_MODE (xop00) == GET_MODE (xop10)
1686 && REGNO (xop00) == REGNO (xop10)
1687 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1688 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1689#endif
1690 return xop00;
1691 }
1692 break;
1693
1694 case MINUS:
1695 /* We can't assume x-x is 0 even with non-IEEE floating point,
1696 but since it is zero except in very strange circumstances, we
1697 will treat it as zero with -funsafe-math-optimizations. */
1698 if (rtx_equal_p (trueop0, trueop1)
1699 && ! side_effects_p (op0)
1700 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1701 return CONST0_RTX (mode);
1702
1703 /* Change subtraction from zero into negation. (0 - x) is the
1704 same as -x when x is NaN, infinite, or finite and nonzero.
1705 But if the mode has signed zeros, and does not round towards
1706 -infinity, then 0 - 0 is 0, not -0. */
1707 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1708 return simplify_gen_unary (NEG, mode, op1, mode);
1709
1710 /* (-1 - a) is ~a. */
1711 if (trueop0 == constm1_rtx)
1712 return simplify_gen_unary (NOT, mode, op1, mode);
1713
1714 /* Subtracting 0 has no effect unless the mode has signed zeros
1715 and supports rounding towards -infinity. In such a case,
1716 0 - 0 is -0. */
1717 if (!(HONOR_SIGNED_ZEROS (mode)
1718 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1719 && trueop1 == CONST0_RTX (mode))
1720 return op0;
1721
1722 /* See if this is something like X * C - X or vice versa or
1723 if the multiplication is written as a shift. If so, we can
1724 distribute and make a new multiply, shift, or maybe just
1725 have X (if C is 2 in the example above). But don't make
1726 something more expensive than we had before. */
1727
1728 if (SCALAR_INT_MODE_P (mode))
1729 {
1730 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1731 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1732 rtx lhs = op0, rhs = op1;
1733
1734 if (GET_CODE (lhs) == NEG)
1735 {
1736 coeff0l = -1;
1737 coeff0h = -1;
1738 lhs = XEXP (lhs, 0);
1739 }
1740 else if (GET_CODE (lhs) == MULT
1741 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1742 {
1743 coeff0l = INTVAL (XEXP (lhs, 1));
1744 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1745 lhs = XEXP (lhs, 0);
1746 }
1747 else if (GET_CODE (lhs) == ASHIFT
1748 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1749 && INTVAL (XEXP (lhs, 1)) >= 0
1750 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1751 {
1752 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1753 coeff0h = 0;
1754 lhs = XEXP (lhs, 0);
1755 }
1756
1757 if (GET_CODE (rhs) == NEG)
1758 {
1759 negcoeff1l = 1;
1760 negcoeff1h = 0;
1761 rhs = XEXP (rhs, 0);
1762 }
1763 else if (GET_CODE (rhs) == MULT
1764 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1765 {
1766 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1767 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1768 rhs = XEXP (rhs, 0);
1769 }
1770 else if (GET_CODE (rhs) == ASHIFT
1771 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1772 && INTVAL (XEXP (rhs, 1)) >= 0
1773 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1774 {
1775 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1776 negcoeff1h = -1;
1777 rhs = XEXP (rhs, 0);
1778 }
1779
1780 if (rtx_equal_p (lhs, rhs))
1781 {
1782 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1783 rtx coeff;
1784 unsigned HOST_WIDE_INT l;
1785 HOST_WIDE_INT h;
1786
1787 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1788 coeff = immed_double_const (l, h, mode);
1789
1790 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1791 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1792 ? tem : 0;
1793 }
1794 }
1795
1796 /* (a - (-b)) -> (a + b). True even for IEEE. */
1797 if (GET_CODE (op1) == NEG)
1798 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1799
1800 /* (-x - c) may be simplified as (-c - x). */
1801 if (GET_CODE (op0) == NEG
1802 && (GET_CODE (op1) == CONST_INT
1803 || GET_CODE (op1) == CONST_DOUBLE))
1804 {
1805 tem = simplify_unary_operation (NEG, mode, op1, mode);
1806 if (tem)
1807 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1808 }
1809
1810 /* Don't let a relocatable value get a negative coeff. */
1811 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1812 return simplify_gen_binary (PLUS, mode,
1813 op0,
1814 neg_const_int (mode, op1));
1815
1816 /* (x - (x & y)) -> (x & ~y) */
1817 if (GET_CODE (op1) == AND)
1818 {
1819 if (rtx_equal_p (op0, XEXP (op1, 0)))
1820 {
1821 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1822 GET_MODE (XEXP (op1, 1)));
1823 return simplify_gen_binary (AND, mode, op0, tem);
1824 }
1825 if (rtx_equal_p (op0, XEXP (op1, 1)))
1826 {
1827 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1828 GET_MODE (XEXP (op1, 0)));
1829 return simplify_gen_binary (AND, mode, op0, tem);
1830 }
1831 }
1832
1833 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1834 by reversing the comparison code if valid. */
1835 if (STORE_FLAG_VALUE == 1
1836 && trueop0 == const1_rtx
1837 && COMPARISON_P (op1)
1838 && (reversed = reversed_comparison (op1, mode)))
1839 return reversed;
1840
1841 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1842 if (GET_CODE (op1) == MULT
1843 && GET_CODE (XEXP (op1, 0)) == NEG)
1844 {
1845 rtx in1, in2;
1846
1847 in1 = XEXP (XEXP (op1, 0), 0);
1848 in2 = XEXP (op1, 1);
1849 return simplify_gen_binary (PLUS, mode,
1850 simplify_gen_binary (MULT, mode,
1851 in1, in2),
1852 op0);
1853 }
1854
1855 /* Canonicalize (minus (neg A) (mult B C)) to
1856 (minus (mult (neg B) C) A). */
1857 if (GET_CODE (op1) == MULT
1858 && GET_CODE (op0) == NEG)
1859 {
1860 rtx in1, in2;
1861
1862 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1863 in2 = XEXP (op1, 1);
1864 return simplify_gen_binary (MINUS, mode,
1865 simplify_gen_binary (MULT, mode,
1866 in1, in2),
1867 XEXP (op0, 0));
1868 }
1869
1870 /* If one of the operands is a PLUS or a MINUS, see if we can
1871 simplify this by the associative law. This will, for example,
1872 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1873 Don't use the associative law for floating point.
1874 The inaccuracy makes it nonassociative,
1875 and subtle programs can break if operations are associated. */
1876
1877 if (INTEGRAL_MODE_P (mode)
1878 && (plus_minus_operand_p (op0)
1879 || plus_minus_operand_p (op1))
1880 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1881 return tem;
1882 break;
1883
1884 case MULT:
1885 if (trueop1 == constm1_rtx)
1886 return simplify_gen_unary (NEG, mode, op0, mode);
1887
1888 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1889 x is NaN, since x * 0 is then also NaN. Nor is it valid
1890 when the mode has signed zeros, since multiplying a negative
1891 number by 0 will give -0, not 0. */
1892 if (!HONOR_NANS (mode)
1893 && !HONOR_SIGNED_ZEROS (mode)
1894 && trueop1 == CONST0_RTX (mode)
1895 && ! side_effects_p (op0))
1896 return op1;
1897
1898 /* In IEEE floating point, x*1 is not equivalent to x for
1899 signalling NaNs. */
1900 if (!HONOR_SNANS (mode)
1901 && trueop1 == CONST1_RTX (mode))
1902 return op0;
1903
1904 /* Convert multiply by constant power of two into shift unless
1905 we are still generating RTL. This test is a kludge. */
1906 if (GET_CODE (trueop1) == CONST_INT
1907 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1908 /* If the mode is larger than the host word size, and the
1909 uppermost bit is set, then this isn't a power of two due
1910 to implicit sign extension. */
1911 && (width <= HOST_BITS_PER_WIDE_INT
1912 || val != HOST_BITS_PER_WIDE_INT - 1))
1913 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1914
1915 /* Likewise for multipliers wider than a word. */
1916 if (GET_CODE (trueop1) == CONST_DOUBLE
1917 && (GET_MODE (trueop1) == VOIDmode
1918 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1919 && GET_MODE (op0) == mode
1920 && CONST_DOUBLE_LOW (trueop1) == 0
1921 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1922 return simplify_gen_binary (ASHIFT, mode, op0,
1923 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1924
1925 /* x*2 is x+x and x*(-1) is -x */
1926 if (GET_CODE (trueop1) == CONST_DOUBLE
1927 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1928 && GET_MODE (op0) == mode)
1929 {
1930 REAL_VALUE_TYPE d;
1931 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1932
1933 if (REAL_VALUES_EQUAL (d, dconst2))
1934 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1935
1936 if (!HONOR_SNANS (mode)
1937 && REAL_VALUES_EQUAL (d, dconstm1))
1938 return simplify_gen_unary (NEG, mode, op0, mode);
1939 }
1940
1941 /* Optimize -x * -x as x * x. */
1942 if (FLOAT_MODE_P (mode)
1943 && GET_CODE (op0) == NEG
1944 && GET_CODE (op1) == NEG
1945 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1946 && !side_effects_p (XEXP (op0, 0)))
1947 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1948
1949 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1950 if (SCALAR_FLOAT_MODE_P (mode)
1951 && GET_CODE (op0) == ABS
1952 && GET_CODE (op1) == ABS
1953 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1954 && !side_effects_p (XEXP (op0, 0)))
1955 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1956
1957 /* Reassociate multiplication, but for floating point MULTs
1958 only when the user specifies unsafe math optimizations. */
1959 if (! FLOAT_MODE_P (mode)
1960 || flag_unsafe_math_optimizations)
1961 {
1962 tem = simplify_associative_operation (code, mode, op0, op1);
1963 if (tem)
1964 return tem;
1965 }
1966 break;
1967
1968 case IOR:
1969 if (trueop1 == const0_rtx)
1970 return op0;
1971 if (GET_CODE (trueop1) == CONST_INT
1972 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1973 == GET_MODE_MASK (mode)))
1974 return op1;
1975 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1976 return op0;
1977 /* A | (~A) -> -1 */
1978 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1979 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1980 && ! side_effects_p (op0)
1981 && SCALAR_INT_MODE_P (mode))
1982 return constm1_rtx;
1983
1984 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1985 if (GET_CODE (op1) == CONST_INT
1986 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1987 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1988 return op1;
1989
1990 /* Convert (A & B) | A to A. */
1991 if (GET_CODE (op0) == AND
1992 && (rtx_equal_p (XEXP (op0, 0), op1)
1993 || rtx_equal_p (XEXP (op0, 1), op1))
1994 && ! side_effects_p (XEXP (op0, 0))
1995 && ! side_effects_p (XEXP (op0, 1)))
1996 return op1;
1997
1998 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
1999 mode size to (rotate A CX). */
2000
2001 if (GET_CODE (op1) == ASHIFT
2002 || GET_CODE (op1) == SUBREG)
2003 {
2004 opleft = op1;
2005 opright = op0;
2006 }
2007 else
2008 {
2009 opright = op1;
2010 opleft = op0;
2011 }
2012
2013 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2014 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2015 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2016 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2017 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2018 == GET_MODE_BITSIZE (mode)))
2019 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2020
2021 /* Same, but for ashift that has been "simplified" to a wider mode
2022 by simplify_shift_const. */
2023
2024 if (GET_CODE (opleft) == SUBREG
2025 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2026 && GET_CODE (opright) == LSHIFTRT
2027 && GET_CODE (XEXP (opright, 0)) == SUBREG
2028 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2029 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2030 && (GET_MODE_SIZE (GET_MODE (opleft))
2031 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2032 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2033 SUBREG_REG (XEXP (opright, 0)))
2034 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2035 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2036 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2037 == GET_MODE_BITSIZE (mode)))
2038 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2039 XEXP (SUBREG_REG (opleft), 1));
2040
2041 /* If we have (ior (and (X C1) C2)), simplify this by making
2042 C1 as small as possible if C1 actually changes. */
2043 if (GET_CODE (op1) == CONST_INT
2044 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2045 || INTVAL (op1) > 0)
2046 && GET_CODE (op0) == AND
2047 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2048 && GET_CODE (op1) == CONST_INT
2049 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2050 return simplify_gen_binary (IOR, mode,
2051 simplify_gen_binary
2052 (AND, mode, XEXP (op0, 0),
2053 GEN_INT (INTVAL (XEXP (op0, 1))
2054 & ~INTVAL (op1))),
2055 op1);
2056
2057 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2058 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2059 the PLUS does not affect any of the bits in OP1: then we can do
2060 the IOR as a PLUS and we can associate. This is valid if OP1
2061 can be safely shifted left C bits. */
2062 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2063 && GET_CODE (XEXP (op0, 0)) == PLUS
2064 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2065 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2066 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2067 {
2068 int count = INTVAL (XEXP (op0, 1));
2069 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2070
2071 if (mask >> count == INTVAL (trueop1)
2072 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2073 return simplify_gen_binary (ASHIFTRT, mode,
2074 plus_constant (XEXP (op0, 0), mask),
2075 XEXP (op0, 1));
2076 }
2077
2078 tem = simplify_associative_operation (code, mode, op0, op1);
2079 if (tem)
2080 return tem;
2081 break;
2082
2083 case XOR:
2084 if (trueop1 == const0_rtx)
2085 return op0;
2086 if (GET_CODE (trueop1) == CONST_INT
2087 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2088 == GET_MODE_MASK (mode)))
2089 return simplify_gen_unary (NOT, mode, op0, mode);
2090 if (rtx_equal_p (trueop0, trueop1)
2091 && ! side_effects_p (op0)
2092 && GET_MODE_CLASS (mode) != MODE_CC)
2093 return CONST0_RTX (mode);
2094
2095 /* Canonicalize XOR of the most significant bit to PLUS. */
2096 if ((GET_CODE (op1) == CONST_INT
2097 || GET_CODE (op1) == CONST_DOUBLE)
2098 && mode_signbit_p (mode, op1))
2099 return simplify_gen_binary (PLUS, mode, op0, op1);
2100 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2101 if ((GET_CODE (op1) == CONST_INT
2102 || GET_CODE (op1) == CONST_DOUBLE)
2103 && GET_CODE (op0) == PLUS
2104 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2105 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2106 && mode_signbit_p (mode, XEXP (op0, 1)))
2107 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2108 simplify_gen_binary (XOR, mode, op1,
2109 XEXP (op0, 1)));
2110
2111 /* If we are XORing two things that have no bits in common,
2112 convert them into an IOR. This helps to detect rotation encoded
2113 using those methods and possibly other simplifications. */
2114
2115 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2116 && (nonzero_bits (op0, mode)
2117 & nonzero_bits (op1, mode)) == 0)
2118 return (simplify_gen_binary (IOR, mode, op0, op1));
2119
2120 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2121 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2122 (NOT y). */
2123 {
2124 int num_negated = 0;
2125
2126 if (GET_CODE (op0) == NOT)
2127 num_negated++, op0 = XEXP (op0, 0);
2128 if (GET_CODE (op1) == NOT)
2129 num_negated++, op1 = XEXP (op1, 0);
2130
2131 if (num_negated == 2)
2132 return simplify_gen_binary (XOR, mode, op0, op1);
2133 else if (num_negated == 1)
2134 return simplify_gen_unary (NOT, mode,
2135 simplify_gen_binary (XOR, mode, op0, op1),
2136 mode);
2137 }
2138
2139 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2140 correspond to a machine insn or result in further simplifications
2141 if B is a constant. */
2142
2143 if (GET_CODE (op0) == AND
2144 && rtx_equal_p (XEXP (op0, 1), op1)
2145 && ! side_effects_p (op1))
2146 return simplify_gen_binary (AND, mode,
2147 simplify_gen_unary (NOT, mode,
2148 XEXP (op0, 0), mode),
2149 op1);
2150
2151 else if (GET_CODE (op0) == AND
2152 && rtx_equal_p (XEXP (op0, 0), op1)
2153 && ! side_effects_p (op1))
2154 return simplify_gen_binary (AND, mode,
2155 simplify_gen_unary (NOT, mode,
2156 XEXP (op0, 1), mode),
2157 op1);
2158
2159 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2160 comparison if STORE_FLAG_VALUE is 1. */
2161 if (STORE_FLAG_VALUE == 1
2162 && trueop1 == const1_rtx
2163 && COMPARISON_P (op0)
2164 && (reversed = reversed_comparison (op0, mode)))
2165 return reversed;
2166
2167 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2168 is (lt foo (const_int 0)), so we can perform the above
2169 simplification if STORE_FLAG_VALUE is 1. */
2170
2171 if (STORE_FLAG_VALUE == 1
2172 && trueop1 == const1_rtx
2173 && GET_CODE (op0) == LSHIFTRT
2174 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2175 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2176 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2177
2178 /* (xor (comparison foo bar) (const_int sign-bit))
2179 when STORE_FLAG_VALUE is the sign bit. */
2180 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2181 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2182 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2183 && trueop1 == const_true_rtx
2184 && COMPARISON_P (op0)
2185 && (reversed = reversed_comparison (op0, mode)))
2186 return reversed;
2187
2188 break;
2189
2190 tem = simplify_associative_operation (code, mode, op0, op1);
2191 if (tem)
2192 return tem;
2193 break;
2194
2195 case AND:
2196 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2197 return trueop1;
2198 /* If we are turning off bits already known off in OP0, we need
2199 not do an AND. */
2200 if (GET_CODE (trueop1) == CONST_INT
2201 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2202 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2203 return op0;
2204 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2205 && GET_MODE_CLASS (mode) != MODE_CC)
2206 return op0;
2207 /* A & (~A) -> 0 */
2208 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2209 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2210 && ! side_effects_p (op0)
2211 && GET_MODE_CLASS (mode) != MODE_CC)
2212 return CONST0_RTX (mode);
2213
2214 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2215 there are no nonzero bits of C outside of X's mode. */
2216 if ((GET_CODE (op0) == SIGN_EXTEND
2217 || GET_CODE (op0) == ZERO_EXTEND)
2218 && GET_CODE (trueop1) == CONST_INT
2219 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2220 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2221 & INTVAL (trueop1)) == 0)
2222 {
2223 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2224 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2225 gen_int_mode (INTVAL (trueop1),
2226 imode));
2227 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2228 }
2229
2230 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2231 insn (and may simplify more). */
2232 if (GET_CODE (op0) == XOR
2233 && rtx_equal_p (XEXP (op0, 0), op1)
2234 && ! side_effects_p (op1))
2235 return simplify_gen_binary (AND, mode,
2236 simplify_gen_unary (NOT, mode,
2237 XEXP (op0, 1), mode),
2238 op1);
2239
2240 if (GET_CODE (op0) == XOR
2241 && rtx_equal_p (XEXP (op0, 1), op1)
2242 && ! side_effects_p (op1))
2243 return simplify_gen_binary (AND, mode,
2244 simplify_gen_unary (NOT, mode,
2245 XEXP (op0, 0), mode),
2246 op1);
2247
2248 /* Similarly for (~(A ^ B)) & A. */
2249 if (GET_CODE (op0) == NOT
2250 && GET_CODE (XEXP (op0, 0)) == XOR
2251 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2252 && ! side_effects_p (op1))
2253 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2254
2255 if (GET_CODE (op0) == NOT
2256 && GET_CODE (XEXP (op0, 0)) == XOR
2257 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2258 && ! side_effects_p (op1))
2259 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2260
2261 /* Convert (A | B) & A to A. */
2262 if (GET_CODE (op0) == IOR
2263 && (rtx_equal_p (XEXP (op0, 0), op1)
2264 || rtx_equal_p (XEXP (op0, 1), op1))
2265 && ! side_effects_p (XEXP (op0, 0))
2266 && ! side_effects_p (XEXP (op0, 1)))
2267 return op1;
2268
2269 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2270 ((A & N) + B) & M -> (A + B) & M
2271 Similarly if (N & M) == 0,
2272 ((A | N) + B) & M -> (A + B) & M
2273 and for - instead of + and/or ^ instead of |. */
2274 if (GET_CODE (trueop1) == CONST_INT
2275 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2276 && ~INTVAL (trueop1)
2277 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2278 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2279 {
2280 rtx pmop[2];
2281 int which;
2282
2283 pmop[0] = XEXP (op0, 0);
2284 pmop[1] = XEXP (op0, 1);
2285
2286 for (which = 0; which < 2; which++)
2287 {
2288 tem = pmop[which];
2289 switch (GET_CODE (tem))
2290 {
2291 case AND:
2292 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2293 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2294 == INTVAL (trueop1))
2295 pmop[which] = XEXP (tem, 0);
2296 break;
2297 case IOR:
2298 case XOR:
2299 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2300 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2301 pmop[which] = XEXP (tem, 0);
2302 break;
2303 default:
2304 break;
2305 }
2306 }
2307
2308 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2309 {
2310 tem = simplify_gen_binary (GET_CODE (op0), mode,
2311 pmop[0], pmop[1]);
2312 return simplify_gen_binary (code, mode, tem, op1);
2313 }
2314 }
2315 tem = simplify_associative_operation (code, mode, op0, op1);
2316 if (tem)
2317 return tem;
2318 break;
2319
2320 case UDIV:
2321 /* 0/x is 0 (or x&0 if x has side-effects). */
2322 if (trueop0 == CONST0_RTX (mode))
2323 {
2324 if (side_effects_p (op1))
2325 return simplify_gen_binary (AND, mode, op1, trueop0);
2326 return trueop0;
2327 }
2328 /* x/1 is x. */
2329 if (trueop1 == CONST1_RTX (mode))
2330 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2331 /* Convert divide by power of two into shift. */
2332 if (GET_CODE (trueop1) == CONST_INT
2333 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2334 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2335 break;
2336
2337 case DIV:
2338 /* Handle floating point and integers separately. */
2339 if (SCALAR_FLOAT_MODE_P (mode))
2340 {
2341 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2342 safe for modes with NaNs, since 0.0 / 0.0 will then be
2343 NaN rather than 0.0. Nor is it safe for modes with signed
2344 zeros, since dividing 0 by a negative number gives -0.0 */
2345 if (trueop0 == CONST0_RTX (mode)
2346 && !HONOR_NANS (mode)
2347 && !HONOR_SIGNED_ZEROS (mode)
2348 && ! side_effects_p (op1))
2349 return op0;
2350 /* x/1.0 is x. */
2351 if (trueop1 == CONST1_RTX (mode)
2352 && !HONOR_SNANS (mode))
2353 return op0;
2354
2355 if (GET_CODE (trueop1) == CONST_DOUBLE
2356 && trueop1 != CONST0_RTX (mode))
2357 {
2358 REAL_VALUE_TYPE d;
2359 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2360
2361 /* x/-1.0 is -x. */
2362 if (REAL_VALUES_EQUAL (d, dconstm1)
2363 && !HONOR_SNANS (mode))
2364 return simplify_gen_unary (NEG, mode, op0, mode);
2365
2366 /* Change FP division by a constant into multiplication.
2367 Only do this with -funsafe-math-optimizations. */
2368 if (flag_unsafe_math_optimizations
2369 && !REAL_VALUES_EQUAL (d, dconst0))
2370 {
2371 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2372 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2373 return simplify_gen_binary (MULT, mode, op0, tem);
2374 }
2375 }
2376 }
2377 else
2378 {
2379 /* 0/x is 0 (or x&0 if x has side-effects). */
2380 if (trueop0 == CONST0_RTX (mode))
2381 {
2382 if (side_effects_p (op1))
2383 return simplify_gen_binary (AND, mode, op1, trueop0);
2384 return trueop0;
2385 }
2386 /* x/1 is x. */
2387 if (trueop1 == CONST1_RTX (mode))
2388 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2389 /* x/-1 is -x. */
2390 if (trueop1 == constm1_rtx)
2391 {
2392 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2393 return simplify_gen_unary (NEG, mode, x, mode);
2394 }
2395 }
2396 break;
2397
2398 case UMOD:
2399 /* 0%x is 0 (or x&0 if x has side-effects). */
2400 if (trueop0 == CONST0_RTX (mode))
2401 {
2402 if (side_effects_p (op1))
2403 return simplify_gen_binary (AND, mode, op1, trueop0);
2404 return trueop0;
2405 }
2406 /* x%1 is 0 (of x&0 if x has side-effects). */
2407 if (trueop1 == CONST1_RTX (mode))
2408 {
2409 if (side_effects_p (op0))
2410 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2411 return CONST0_RTX (mode);
2412 }
2413 /* Implement modulus by power of two as AND. */
2414 if (GET_CODE (trueop1) == CONST_INT
2415 && exact_log2 (INTVAL (trueop1)) > 0)
2416 return simplify_gen_binary (AND, mode, op0,
2417 GEN_INT (INTVAL (op1) - 1));
2418 break;
2419
2420 case MOD:
2421 /* 0%x is 0 (or x&0 if x has side-effects). */
2422 if (trueop0 == CONST0_RTX (mode))
2423 {
2424 if (side_effects_p (op1))
2425 return simplify_gen_binary (AND, mode, op1, trueop0);
2426 return trueop0;
2427 }
2428 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2429 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2430 {
2431 if (side_effects_p (op0))
2432 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2433 return CONST0_RTX (mode);
2434 }
2435 break;
2436
2437 case ROTATERT:
2438 case ROTATE:
2439 case ASHIFTRT:
2440 if (trueop1 == CONST0_RTX (mode))
2441 return op0;
2442 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2443 return op0;
2444 /* Rotating ~0 always results in ~0. */
2445 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2446 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2447 && ! side_effects_p (op1))
2448 return op0;
2449 break;
2450
2451 case ASHIFT:
2452 case SS_ASHIFT:
2453 if (trueop1 == CONST0_RTX (mode))
2454 return op0;
2455 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2456 return op0;
2457 break;
2458
2459 case LSHIFTRT:
2460 if (trueop1 == CONST0_RTX (mode))
2461 return op0;
2462 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2463 return op0;
2464 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2465 if (GET_CODE (op0) == CLZ
2466 && GET_CODE (trueop1) == CONST_INT
2467 && STORE_FLAG_VALUE == 1
2468 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2469 {
2470 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2471 unsigned HOST_WIDE_INT zero_val = 0;
2472
2473 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2474 && zero_val == GET_MODE_BITSIZE (imode)
2475 && INTVAL (trueop1) == exact_log2 (zero_val))
2476 return simplify_gen_relational (EQ, mode, imode,
2477 XEXP (op0, 0), const0_rtx);
2478 }
2479 break;
2480
2481 case SMIN:
2482 if (width <= HOST_BITS_PER_WIDE_INT
2483 && GET_CODE (trueop1) == CONST_INT
2484 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2485 && ! side_effects_p (op0))
2486 return op1;
2487 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2488 return op0;
2489 tem = simplify_associative_operation (code, mode, op0, op1);
2490 if (tem)
2491 return tem;
2492 break;
2493
2494 case SMAX:
2495 if (width <= HOST_BITS_PER_WIDE_INT
2496 && GET_CODE (trueop1) == CONST_INT
2497 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2498 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2499 && ! side_effects_p (op0))
2500 return op1;
2501 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2502 return op0;
2503 tem = simplify_associative_operation (code, mode, op0, op1);
2504 if (tem)
2505 return tem;
2506 break;
2507
2508 case UMIN:
2509 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2510 return op1;
2511 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2512 return op0;
2513 tem = simplify_associative_operation (code, mode, op0, op1);
2514 if (tem)
2515 return tem;
2516 break;
2517
2518 case UMAX:
2519 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2520 return op1;
2521 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2522 return op0;
2523 tem = simplify_associative_operation (code, mode, op0, op1);
2524 if (tem)
2525 return tem;
2526 break;
2527
2528 case SS_PLUS:
2529 case US_PLUS:
2530 case SS_MINUS:
2531 case US_MINUS:
2532 /* ??? There are simplifications that can be done. */
2533 return 0;
2534
2535 case VEC_SELECT:
2536 if (!VECTOR_MODE_P (mode))
2537 {
2538 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2539 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2540 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2541 gcc_assert (XVECLEN (trueop1, 0) == 1);
2542 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2543
2544 if (GET_CODE (trueop0) == CONST_VECTOR)
2545 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2546 (trueop1, 0, 0)));
2547 }
2548 else
2549 {
2550 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2551 gcc_assert (GET_MODE_INNER (mode)
2552 == GET_MODE_INNER (GET_MODE (trueop0)));
2553 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2554
2555 if (GET_CODE (trueop0) == CONST_VECTOR)
2556 {
2557 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2558 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2559 rtvec v = rtvec_alloc (n_elts);
2560 unsigned int i;
2561
2562 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2563 for (i = 0; i < n_elts; i++)
2564 {
2565 rtx x = XVECEXP (trueop1, 0, i);
2566
2567 gcc_assert (GET_CODE (x) == CONST_INT);
2568 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2569 INTVAL (x));
2570 }
2571
2572 return gen_rtx_CONST_VECTOR (mode, v);
2573 }
2574 }
2575
2576 if (XVECLEN (trueop1, 0) == 1
2577 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2578 && GET_CODE (trueop0) == VEC_CONCAT)
2579 {
2580 rtx vec = trueop0;
2581 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2582
2583 /* Try to find the element in the VEC_CONCAT. */
2584 while (GET_MODE (vec) != mode
2585 && GET_CODE (vec) == VEC_CONCAT)
2586 {
2587 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2588 if (offset < vec_size)
2589 vec = XEXP (vec, 0);
2590 else
2591 {
2592 offset -= vec_size;
2593 vec = XEXP (vec, 1);
2594 }
2595 vec = avoid_constant_pool_reference (vec);
2596 }
2597
2598 if (GET_MODE (vec) == mode)
2599 return vec;
2600 }
2601
2602 return 0;
2603 case VEC_CONCAT:
2604 {
2605 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2606 ? GET_MODE (trueop0)
2607 : GET_MODE_INNER (mode));
2608 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2609 ? GET_MODE (trueop1)
2610 : GET_MODE_INNER (mode));
2611
2612 gcc_assert (VECTOR_MODE_P (mode));
2613 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2614 == GET_MODE_SIZE (mode));
2615
2616 if (VECTOR_MODE_P (op0_mode))
2617 gcc_assert (GET_MODE_INNER (mode)
2618 == GET_MODE_INNER (op0_mode));
2619 else
2620 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2621
2622 if (VECTOR_MODE_P (op1_mode))
2623 gcc_assert (GET_MODE_INNER (mode)
2624 == GET_MODE_INNER (op1_mode));
2625 else
2626 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2627
2628 if ((GET_CODE (trueop0) == CONST_VECTOR
2629 || GET_CODE (trueop0) == CONST_INT
2630 || GET_CODE (trueop0) == CONST_DOUBLE)
2631 && (GET_CODE (trueop1) == CONST_VECTOR
2632 || GET_CODE (trueop1) == CONST_INT
2633 || GET_CODE (trueop1) == CONST_DOUBLE))
2634 {
2635 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2636 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2637 rtvec v = rtvec_alloc (n_elts);
2638 unsigned int i;
2639 unsigned in_n_elts = 1;
2640
2641 if (VECTOR_MODE_P (op0_mode))
2642 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2643 for (i = 0; i < n_elts; i++)
2644 {
2645 if (i < in_n_elts)
2646 {
2647 if (!VECTOR_MODE_P (op0_mode))
2648 RTVEC_ELT (v, i) = trueop0;
2649 else
2650 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2651 }
2652 else
2653 {
2654 if (!VECTOR_MODE_P (op1_mode))
2655 RTVEC_ELT (v, i) = trueop1;
2656 else
2657 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2658 i - in_n_elts);
2659 }
2660 }
2661
2662 return gen_rtx_CONST_VECTOR (mode, v);
2663 }
2664 }
2665 return 0;
2666
2667 default:
2668 gcc_unreachable ();
2669 }
2670
2671 return 0;
2672}
2673
2674rtx
2675simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2676 rtx op0, rtx op1)
2677{
2678 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2679 HOST_WIDE_INT val;
2680 unsigned int width = GET_MODE_BITSIZE (mode);
2681
2682 if (VECTOR_MODE_P (mode)
2683 && code != VEC_CONCAT
2684 && GET_CODE (op0) == CONST_VECTOR
2685 && GET_CODE (op1) == CONST_VECTOR)
2686 {
2687 unsigned n_elts = GET_MODE_NUNITS (mode);
2688 enum machine_mode op0mode = GET_MODE (op0);
2689 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2690 enum machine_mode op1mode = GET_MODE (op1);
2691 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2692 rtvec v = rtvec_alloc (n_elts);
2693 unsigned int i;
2694
2695 gcc_assert (op0_n_elts == n_elts);
2696 gcc_assert (op1_n_elts == n_elts);
2697 for (i = 0; i < n_elts; i++)
2698 {
2699 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2700 CONST_VECTOR_ELT (op0, i),
2701 CONST_VECTOR_ELT (op1, i));
2702 if (!x)
2703 return 0;
2704 RTVEC_ELT (v, i) = x;
2705 }
2706
2707 return gen_rtx_CONST_VECTOR (mode, v);
2708 }
2709
2710 if (VECTOR_MODE_P (mode)
2711 && code == VEC_CONCAT
2712 && CONSTANT_P (op0) && CONSTANT_P (op1))
2713 {
2714 unsigned n_elts = GET_MODE_NUNITS (mode);
2715 rtvec v = rtvec_alloc (n_elts);
2716
2717 gcc_assert (n_elts >= 2);
2718 if (n_elts == 2)
2719 {
2720 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2721 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2722
2723 RTVEC_ELT (v, 0) = op0;
2724 RTVEC_ELT (v, 1) = op1;
2725 }
2726 else
2727 {
2728 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2729 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2730 unsigned i;
2731
2732 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2733 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2734 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2735
2736 for (i = 0; i < op0_n_elts; ++i)
2737 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2738 for (i = 0; i < op1_n_elts; ++i)
2739 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2740 }
2741
2742 return gen_rtx_CONST_VECTOR (mode, v);
2743 }
2744
2745 if (SCALAR_FLOAT_MODE_P (mode)
2746 && GET_CODE (op0) == CONST_DOUBLE
2747 && GET_CODE (op1) == CONST_DOUBLE
2748 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2749 {
2750 if (code == AND
2751 || code == IOR
2752 || code == XOR)
2753 {
2754 long tmp0[4];
2755 long tmp1[4];
2756 REAL_VALUE_TYPE r;
2757 int i;
2758
2759 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2760 GET_MODE (op0));
2761 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2762 GET_MODE (op1));
2763 for (i = 0; i < 4; i++)
2764 {
2765 switch (code)
2766 {
2767 case AND:
2768 tmp0[i] &= tmp1[i];
2769 break;
2770 case IOR:
2771 tmp0[i] |= tmp1[i];
2772 break;
2773 case XOR:
2774 tmp0[i] ^= tmp1[i];
2775 break;
2776 default:
2777 gcc_unreachable ();
2778 }
2779 }
2780 real_from_target (&r, tmp0, mode);
2781 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2782 }
2783 else
2784 {
2785 REAL_VALUE_TYPE f0, f1, value, result;
2786 bool inexact;
2787
2788 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2789 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2790 real_convert (&f0, mode, &f0);
2791 real_convert (&f1, mode, &f1);
2792
2793 if (HONOR_SNANS (mode)
2794 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2795 return 0;
2796
2797 if (code == DIV
2798 && REAL_VALUES_EQUAL (f1, dconst0)
2799 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2800 return 0;
2801
2802 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2803 && flag_trapping_math
2804 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2805 {
2806 int s0 = REAL_VALUE_NEGATIVE (f0);
2807 int s1 = REAL_VALUE_NEGATIVE (f1);
2808
2809 switch (code)
2810 {
2811 case PLUS:
2812 /* Inf + -Inf = NaN plus exception. */
2813 if (s0 != s1)
2814 return 0;
2815 break;
2816 case MINUS:
2817 /* Inf - Inf = NaN plus exception. */
2818 if (s0 == s1)
2819 return 0;
2820 break;
2821 case DIV:
2822 /* Inf / Inf = NaN plus exception. */
2823 return 0;
2824 default:
2825 break;
2826 }
2827 }
2828
2829 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2830 && flag_trapping_math
2831 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2832 || (REAL_VALUE_ISINF (f1)
2833 && REAL_VALUES_EQUAL (f0, dconst0))))
2834 /* Inf * 0 = NaN plus exception. */
2835 return 0;
2836
2837 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2838 &f0, &f1);
2839 real_convert (&result, mode, &value);
2840
2841 /* Don't constant fold this floating point operation if
2842 the result has overflowed and flag_trapping_math. */
2843
2844 if (flag_trapping_math
2845 && MODE_HAS_INFINITIES (mode)
2846 && REAL_VALUE_ISINF (result)
2847 && !REAL_VALUE_ISINF (f0)
2848 && !REAL_VALUE_ISINF (f1))
2849 /* Overflow plus exception. */
2850 return 0;
2851
2852 /* Don't constant fold this floating point operation if the
2853 result may dependent upon the run-time rounding mode and
2854 flag_rounding_math is set, or if GCC's software emulation
2855 is unable to accurately represent the result. */
2856
2857 if ((flag_rounding_math
2858 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2859 && !flag_unsafe_math_optimizations))
2860 && (inexact || !real_identical (&result, &value)))
2861 return NULL_RTX;
2862
2863 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2864 }
2865 }
2866
2867 /* We can fold some multi-word operations. */
2868 if (GET_MODE_CLASS (mode) == MODE_INT
2869 && width == HOST_BITS_PER_WIDE_INT * 2
2870 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2871 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2872 {
2873 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2874 HOST_WIDE_INT h1, h2, hv, ht;
2875
2876 if (GET_CODE (op0) == CONST_DOUBLE)
2877 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2878 else
2879 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2880
2881 if (GET_CODE (op1) == CONST_DOUBLE)
2882 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2883 else
2884 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2885
2886 switch (code)
2887 {
2888 case MINUS:
2889 /* A - B == A + (-B). */
2890 neg_double (l2, h2, &lv, &hv);
2891 l2 = lv, h2 = hv;
2892
2893 /* Fall through.... */
2894
2895 case PLUS:
2896 add_double (l1, h1, l2, h2, &lv, &hv);
2897 break;
2898
2899 case MULT:
2900 mul_double (l1, h1, l2, h2, &lv, &hv);
2901 break;
2902
2903 case DIV:
2904 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2905 &lv, &hv, &lt, &ht))
2906 return 0;
2907 break;
2908
2909 case MOD:
2910 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2911 &lt, &ht, &lv, &hv))
2912 return 0;
2913 break;
2914
2915 case UDIV:
2916 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2917 &lv, &hv, &lt, &ht))
2918 return 0;
2919 break;
2920
2921 case UMOD:
2922 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2923 &lt, &ht, &lv, &hv))
2924 return 0;
2925 break;
2926
2927 case AND:
2928 lv = l1 & l2, hv = h1 & h2;
2929 break;
2930
2931 case IOR:
2932 lv = l1 | l2, hv = h1 | h2;
2933 break;
2934
2935 case XOR:
2936 lv = l1 ^ l2, hv = h1 ^ h2;
2937 break;
2938
2939 case SMIN:
2940 if (h1 < h2
2941 || (h1 == h2
2942 && ((unsigned HOST_WIDE_INT) l1
2943 < (unsigned HOST_WIDE_INT) l2)))
2944 lv = l1, hv = h1;
2945 else
2946 lv = l2, hv = h2;
2947 break;
2948
2949 case SMAX:
2950 if (h1 > h2
2951 || (h1 == h2
2952 && ((unsigned HOST_WIDE_INT) l1
2953 > (unsigned HOST_WIDE_INT) l2)))
2954 lv = l1, hv = h1;
2955 else
2956 lv = l2, hv = h2;
2957 break;
2958
2959 case UMIN:
2960 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2961 || (h1 == h2
2962 && ((unsigned HOST_WIDE_INT) l1
2963 < (unsigned HOST_WIDE_INT) l2)))
2964 lv = l1, hv = h1;
2965 else
2966 lv = l2, hv = h2;
2967 break;
2968
2969 case UMAX:
2970 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2971 || (h1 == h2
2972 && ((unsigned HOST_WIDE_INT) l1
2973 > (unsigned HOST_WIDE_INT) l2)))
2974 lv = l1, hv = h1;
2975 else
2976 lv = l2, hv = h2;
2977 break;
2978
2979 case LSHIFTRT: case ASHIFTRT:
2980 case ASHIFT:
2981 case ROTATE: case ROTATERT:
2982 if (SHIFT_COUNT_TRUNCATED)
2983 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2984
2985 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2986 return 0;
2987
2988 if (code == LSHIFTRT || code == ASHIFTRT)
2989 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2990 code == ASHIFTRT);
2991 else if (code == ASHIFT)
2992 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2993 else if (code == ROTATE)
2994 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2995 else /* code == ROTATERT */
2996 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2997 break;
2998
2999 default:
3000 return 0;
3001 }
3002
3003 return immed_double_const (lv, hv, mode);
3004 }
3005
3006 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3007 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3008 {
3009 /* Get the integer argument values in two forms:
3010 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3011
3012 arg0 = INTVAL (op0);
3013 arg1 = INTVAL (op1);
3014
3015 if (width < HOST_BITS_PER_WIDE_INT)
3016 {
3017 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3018 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019
3020 arg0s = arg0;
3021 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3022 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3023
3024 arg1s = arg1;
3025 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3026 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3027 }
3028 else
3029 {
3030 arg0s = arg0;
3031 arg1s = arg1;
3032 }
3033
3034 /* Compute the value of the arithmetic. */
3035
3036 switch (code)
3037 {
3038 case PLUS:
3039 val = arg0s + arg1s;
3040 break;
3041
3042 case MINUS:
3043 val = arg0s - arg1s;
3044 break;
3045
3046 case MULT:
3047 val = arg0s * arg1s;
3048 break;
3049
3050 case DIV:
3051 if (arg1s == 0
3052 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3053 && arg1s == -1))
3054 return 0;
3055 val = arg0s / arg1s;
3056 break;
3057
3058 case MOD:
3059 if (arg1s == 0
3060 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3061 && arg1s == -1))
3062 return 0;
3063 val = arg0s % arg1s;
3064 break;
3065
3066 case UDIV:
3067 if (arg1 == 0
3068 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3069 && arg1s == -1))
3070 return 0;
3071 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3072 break;
3073
3074 case UMOD:
3075 if (arg1 == 0
3076 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3077 && arg1s == -1))
3078 return 0;
3079 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3080 break;
3081
3082 case AND:
3083 val = arg0 & arg1;
3084 break;
3085
3086 case IOR:
3087 val = arg0 | arg1;
3088 break;
3089
3090 case XOR:
3091 val = arg0 ^ arg1;
3092 break;
3093
3094 case LSHIFTRT:
3095 case ASHIFT:
3096 case ASHIFTRT:
3097 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3098 the value is in range. We can't return any old value for
3099 out-of-range arguments because either the middle-end (via
3100 shift_truncation_mask) or the back-end might be relying on
3101 target-specific knowledge. Nor can we rely on
3102 shift_truncation_mask, since the shift might not be part of an
3103 ashlM3, lshrM3 or ashrM3 instruction. */
3104 if (SHIFT_COUNT_TRUNCATED)
3105 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3106 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3107 return 0;
3108
3109 val = (code == ASHIFT
3110 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3111 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3112
3113 /* Sign-extend the result for arithmetic right shifts. */
3114 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3115 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3116 break;
3117
3118 case ROTATERT:
3119 if (arg1 < 0)
3120 return 0;
3121
3122 arg1 %= width;
3123 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3124 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3125 break;
3126
3127 case ROTATE:
3128 if (arg1 < 0)
3129 return 0;
3130
3131 arg1 %= width;
3132 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3133 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3134 break;
3135
3136 case COMPARE:
3137 /* Do nothing here. */
3138 return 0;
3139
3140 case SMIN:
3141 val = arg0s <= arg1s ? arg0s : arg1s;
3142 break;
3143
3144 case UMIN:
3145 val = ((unsigned HOST_WIDE_INT) arg0
3146 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3147 break;
3148
3149 case SMAX:
3150 val = arg0s > arg1s ? arg0s : arg1s;
3151 break;
3152
3153 case UMAX:
3154 val = ((unsigned HOST_WIDE_INT) arg0
3155 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3156 break;
3157
3158 case SS_PLUS:
3159 case US_PLUS:
3160 case SS_MINUS:
3161 case US_MINUS:
3162 case SS_ASHIFT:
3163 /* ??? There are simplifications that can be done. */
3164 return 0;
3165
3166 default:
3167 gcc_unreachable ();
3168 }
3169
3170 return gen_int_mode (val, mode);
3171 }
3172
3173 return NULL_RTX;
3174}
3175
3176
3177
3178/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3179 PLUS or MINUS.
3180
3181 Rather than test for specific case, we do this by a brute-force method
3182 and do all possible simplifications until no more changes occur. Then
3183 we rebuild the operation. */
3184
3185struct simplify_plus_minus_op_data
3186{
3187 rtx op;
3188 short neg;
3189};
3190
3191static int
3192simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3193{
3194 const struct simplify_plus_minus_op_data *d1 = p1;
3195 const struct simplify_plus_minus_op_data *d2 = p2;
3196 int result;
3197
3198 result = (commutative_operand_precedence (d2->op)
3199 - commutative_operand_precedence (d1->op));
3200 if (result)
3201 return result;
3202
3203 /* Group together equal REGs to do more simplification. */
3204 if (REG_P (d1->op) && REG_P (d2->op))
3205 return REGNO (d1->op) - REGNO (d2->op);
3206 else
3207 return 0;
3208}
3209
3210static rtx
3211simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3212 rtx op1)
3213{
3214 struct simplify_plus_minus_op_data ops[8];
3215 rtx result, tem;
3216 int n_ops = 2, input_ops = 2;
3217 int changed, n_constants = 0, canonicalized = 0;
3218 int i, j;
3219
3220 memset (ops, 0, sizeof ops);
3221
3222 /* Set up the two operands and then expand them until nothing has been
3223 changed. If we run out of room in our array, give up; this should
3224 almost never happen. */
3225
3226 ops[0].op = op0;
3227 ops[0].neg = 0;
3228 ops[1].op = op1;
3229 ops[1].neg = (code == MINUS);
3230
3231 do
3232 {
3233 changed = 0;
3234
3235 for (i = 0; i < n_ops; i++)
3236 {
3237 rtx this_op = ops[i].op;
3238 int this_neg = ops[i].neg;
3239 enum rtx_code this_code = GET_CODE (this_op);
3240
3241 switch (this_code)
3242 {
3243 case PLUS:
3244 case MINUS:
3245 if (n_ops == 7)
3246 return NULL_RTX;
3247
3248 ops[n_ops].op = XEXP (this_op, 1);
3249 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3250 n_ops++;
3251
3252 ops[i].op = XEXP (this_op, 0);
3253 input_ops++;
3254 changed = 1;
3255 canonicalized |= this_neg;
3256 break;
3257
3258 case NEG:
3259 ops[i].op = XEXP (this_op, 0);
3260 ops[i].neg = ! this_neg;
3261 changed = 1;
3262 canonicalized = 1;
3263 break;
3264
3265 case CONST:
3266 if (n_ops < 7
3267 && GET_CODE (XEXP (this_op, 0)) == PLUS
3268 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3269 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3270 {
3271 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3272 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3273 ops[n_ops].neg = this_neg;
3274 n_ops++;
3275 changed = 1;
3276 canonicalized = 1;
3277 }
3278 break;
3279
3280 case NOT:
3281 /* ~a -> (-a - 1) */
3282 if (n_ops != 7)
3283 {
3284 ops[n_ops].op = constm1_rtx;
3285 ops[n_ops++].neg = this_neg;
3286 ops[i].op = XEXP (this_op, 0);
3287 ops[i].neg = !this_neg;
3288 changed = 1;
3289 canonicalized = 1;
3290 }
3291 break;
3292
3293 case CONST_INT:
3294 n_constants++;
3295 if (this_neg)
3296 {
3297 ops[i].op = neg_const_int (mode, this_op);
3298 ops[i].neg = 0;
3299 changed = 1;
3300 canonicalized = 1;
3301 }
3302 break;
3303
3304 default:
3305 break;
3306 }
3307 }
3308 }
3309 while (changed);
3310
3311 if (n_constants > 1)
3312 canonicalized = 1;
3313
3314 gcc_assert (n_ops >= 2);
3315
3316 /* If we only have two operands, we can avoid the loops. */
3317 if (n_ops == 2)
3318 {
3319 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3320 rtx lhs, rhs;
3321
3322 /* Get the two operands. Be careful with the order, especially for
3323 the cases where code == MINUS. */
3324 if (ops[0].neg && ops[1].neg)
3325 {
3326 lhs = gen_rtx_NEG (mode, ops[0].op);
3327 rhs = ops[1].op;
3328 }
3329 else if (ops[0].neg)
3330 {
3331 lhs = ops[1].op;
3332 rhs = ops[0].op;
3333 }
3334 else
3335 {
3336 lhs = ops[0].op;
3337 rhs = ops[1].op;
3338 }
3339
3340 return simplify_const_binary_operation (code, mode, lhs, rhs);
3341 }
3342
3343 /* Now simplify each pair of operands until nothing changes. */
3344 do
3345 {
3346 /* Insertion sort is good enough for an eight-element array. */
3347 for (i = 1; i < n_ops; i++)
3348 {
3349 struct simplify_plus_minus_op_data save;
3350 j = i - 1;
3351 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3352 continue;
3353
3354 canonicalized = 1;
3355 save = ops[i];
3356 do
3357 ops[j + 1] = ops[j];
3358 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3359 ops[j + 1] = save;
3360 }
3361
3362 /* This is only useful the first time through. */
3363 if (!canonicalized)
3364 return NULL_RTX;
3365
3366 changed = 0;
3367 for (i = n_ops - 1; i > 0; i--)
3368 for (j = i - 1; j >= 0; j--)
3369 {
3370 rtx lhs = ops[j].op, rhs = ops[i].op;
3371 int lneg = ops[j].neg, rneg = ops[i].neg;
3372
3373 if (lhs != 0 && rhs != 0)
3374 {
3375 enum rtx_code ncode = PLUS;
3376
3377 if (lneg != rneg)
3378 {
3379 ncode = MINUS;
3380 if (lneg)
3381 tem = lhs, lhs = rhs, rhs = tem;
3382 }
3383 else if (swap_commutative_operands_p (lhs, rhs))
3384 tem = lhs, lhs = rhs, rhs = tem;
3385
3386 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3387 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3388 {
3389 rtx tem_lhs, tem_rhs;
3390
3391 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3392 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3393 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3394
3395 if (tem && !CONSTANT_P (tem))
3396 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3397 }
3398 else
3399 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3400
3401 /* Reject "simplifications" that just wrap the two
3402 arguments in a CONST. Failure to do so can result
3403 in infinite recursion with simplify_binary_operation
3404 when it calls us to simplify CONST operations. */
3405 if (tem
3406 && ! (GET_CODE (tem) == CONST
3407 && GET_CODE (XEXP (tem, 0)) == ncode
3408 && XEXP (XEXP (tem, 0), 0) == lhs
3409 && XEXP (XEXP (tem, 0), 1) == rhs))
3410 {
3411 lneg &= rneg;
3412 if (GET_CODE (tem) == NEG)
3413 tem = XEXP (tem, 0), lneg = !lneg;
3414 if (GET_CODE (tem) == CONST_INT && lneg)
3415 tem = neg_const_int (mode, tem), lneg = 0;
3416
3417 ops[i].op = tem;
3418 ops[i].neg = lneg;
3419 ops[j].op = NULL_RTX;
3420 changed = 1;
3421 }
3422 }
3423 }
3424
3425 /* Pack all the operands to the lower-numbered entries. */
3426 for (i = 0, j = 0; j < n_ops; j++)
3427 if (ops[j].op)
3428 {
3429 ops[i] = ops[j];
3430 i++;
3431 }
3432 n_ops = i;
3433 }
3434 while (changed);
3435
3436 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3437 if (n_ops == 2
3438 && GET_CODE (ops[1].op) == CONST_INT
3439 && CONSTANT_P (ops[0].op)
3440 && ops[0].neg)
3441 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3442
3443 /* We suppressed creation of trivial CONST expressions in the
3444 combination loop to avoid recursion. Create one manually now.
3445 The combination loop should have ensured that there is exactly
3446 one CONST_INT, and the sort will have ensured that it is last
3447 in the array and that any other constant will be next-to-last. */
3448
3449 if (n_ops > 1
3450 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3451 && CONSTANT_P (ops[n_ops - 2].op))
3452 {
3453 rtx value = ops[n_ops - 1].op;
3454 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3455 value = neg_const_int (mode, value);
3456 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3457 n_ops--;
3458 }
3459
3460 /* Put a non-negated operand first, if possible. */
3461
3462 for (i = 0; i < n_ops && ops[i].neg; i++)
3463 continue;
3464 if (i == n_ops)
3465 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3466 else if (i != 0)
3467 {
3468 tem = ops[0].op;
3469 ops[0] = ops[i];
3470 ops[i].op = tem;
3471 ops[i].neg = 1;
3472 }
3473
3474 /* Now make the result by performing the requested operations. */
3475 result = ops[0].op;
3476 for (i = 1; i < n_ops; i++)
3477 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3478 mode, result, ops[i].op);
3479
3480 return result;
3481}
3482
3483/* Check whether an operand is suitable for calling simplify_plus_minus. */
3484static bool
3485plus_minus_operand_p (rtx x)
3486{
3487 return GET_CODE (x) == PLUS
3488 || GET_CODE (x) == MINUS
3489 || (GET_CODE (x) == CONST
3490 && GET_CODE (XEXP (x, 0)) == PLUS
3491 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3492 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3493}
3494
3495/* Like simplify_binary_operation except used for relational operators.
3496 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3497 not also be VOIDmode.
3498
3499 CMP_MODE specifies in which mode the comparison is done in, so it is
3500 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3501 the operands or, if both are VOIDmode, the operands are compared in
3502 "infinite precision". */
3503rtx
3504simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3505 enum machine_mode cmp_mode, rtx op0, rtx op1)
3506{
3507 rtx tem, trueop0, trueop1;
3508
3509 if (cmp_mode == VOIDmode)
3510 cmp_mode = GET_MODE (op0);
3511 if (cmp_mode == VOIDmode)
3512 cmp_mode = GET_MODE (op1);
3513
3514 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3515 if (tem)
3516 {
3517 if (SCALAR_FLOAT_MODE_P (mode))
3518 {
3519 if (tem == const0_rtx)
3520 return CONST0_RTX (mode);
3521#ifdef FLOAT_STORE_FLAG_VALUE
3522 {
3523 REAL_VALUE_TYPE val;
3524 val = FLOAT_STORE_FLAG_VALUE (mode);
3525 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3526 }
3527#else
3528 return NULL_RTX;
3529#endif
3530 }
3531 if (VECTOR_MODE_P (mode))
3532 {
3533 if (tem == const0_rtx)
3534 return CONST0_RTX (mode);
3535#ifdef VECTOR_STORE_FLAG_VALUE
3536 {
3537 int i, units;
3538 rtvec v;
3539
3540 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3541 if (val == NULL_RTX)
3542 return NULL_RTX;
3543 if (val == const1_rtx)
3544 return CONST1_RTX (mode);
3545
3546 units = GET_MODE_NUNITS (mode);
3547 v = rtvec_alloc (units);
3548 for (i = 0; i < units; i++)
3549 RTVEC_ELT (v, i) = val;
3550 return gen_rtx_raw_CONST_VECTOR (mode, v);
3551 }
3552#else
3553 return NULL_RTX;
3554#endif
3555 }
3556
3557 return tem;
3558 }
3559
3560 /* For the following tests, ensure const0_rtx is op1. */
3561 if (swap_commutative_operands_p (op0, op1)
3562 || (op0 == const0_rtx && op1 != const0_rtx))
3563 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3564
3565 /* If op0 is a compare, extract the comparison arguments from it. */
3566 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3567 return simplify_relational_operation (code, mode, VOIDmode,
3568 XEXP (op0, 0), XEXP (op0, 1));
3569
3570 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3571 || CC0_P (op0))
3572 return NULL_RTX;
3573
3574 trueop0 = avoid_constant_pool_reference (op0);
3575 trueop1 = avoid_constant_pool_reference (op1);
3576 return simplify_relational_operation_1 (code, mode, cmp_mode,
3577 trueop0, trueop1);
3578}
3579
3580/* This part of simplify_relational_operation is only used when CMP_MODE
3581 is not in class MODE_CC (i.e. it is a real comparison).
3582
3583 MODE is the mode of the result, while CMP_MODE specifies in which
3584 mode the comparison is done in, so it is the mode of the operands. */
3585
3586static rtx
3587simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3588 enum machine_mode cmp_mode, rtx op0, rtx op1)
3589{
3590 enum rtx_code op0code = GET_CODE (op0);
3591
3592 if (GET_CODE (op1) == CONST_INT)
3593 {
3594 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3595 {
3596 /* If op0 is a comparison, extract the comparison arguments
3597 from it. */
3598 if (code == NE)
3599 {
3600 if (GET_MODE (op0) == mode)
3601 return simplify_rtx (op0);
3602 else
3603 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3604 XEXP (op0, 0), XEXP (op0, 1));
3605 }
3606 else if (code == EQ)
3607 {
3608 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3609 if (new_code != UNKNOWN)
3610 return simplify_gen_relational (new_code, mode, VOIDmode,
3611 XEXP (op0, 0), XEXP (op0, 1));
3612 }
3613 }
3614 }
3615
3616 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3617 if ((code == EQ || code == NE)
3618 && (op0code == PLUS || op0code == MINUS)
3619 && CONSTANT_P (op1)
3620 && CONSTANT_P (XEXP (op0, 1))
3621 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3622 {
3623 rtx x = XEXP (op0, 0);
3624 rtx c = XEXP (op0, 1);
3625
3626 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3627 cmp_mode, op1, c);
3628 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3629 }
3630
3631 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3632 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3633 if (code == NE
3634 && op1 == const0_rtx
3635 && GET_MODE_CLASS (mode) == MODE_INT
3636 && cmp_mode != VOIDmode
3637 /* ??? Work-around BImode bugs in the ia64 backend. */
3638 && mode != BImode
3639 && cmp_mode != BImode
3640 && nonzero_bits (op0, cmp_mode) == 1
3641 && STORE_FLAG_VALUE == 1)
3642 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3643 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3644 : lowpart_subreg (mode, op0, cmp_mode);
3645
3646 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3647 if ((code == EQ || code == NE)
3648 && op1 == const0_rtx
3649 && op0code == XOR)
3650 return simplify_gen_relational (code, mode, cmp_mode,
3651 XEXP (op0, 0), XEXP (op0, 1));
3652
3653 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3654 if ((code == EQ || code == NE)
3655 && op0code == XOR
3656 && rtx_equal_p (XEXP (op0, 0), op1)
3657 && !side_effects_p (XEXP (op0, 0)))
3658 return simplify_gen_relational (code, mode, cmp_mode,
3659 XEXP (op0, 1), const0_rtx);
3660
3661 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3662 if ((code == EQ || code == NE)
3663 && op0code == XOR
3664 && rtx_equal_p (XEXP (op0, 1), op1)
3665 && !side_effects_p (XEXP (op0, 1)))
3666 return simplify_gen_relational (code, mode, cmp_mode,
3667 XEXP (op0, 0), const0_rtx);
3668
3669 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3670 if ((code == EQ || code == NE)
3671 && op0code == XOR
3672 && (GET_CODE (op1) == CONST_INT
3673 || GET_CODE (op1) == CONST_DOUBLE)
3674 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3675 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3676 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3677 simplify_gen_binary (XOR, cmp_mode,
3678 XEXP (op0, 1), op1));
3679
3680 return NULL_RTX;
3681}
3682
3683/* Check if the given comparison (done in the given MODE) is actually a
3684 tautology or a contradiction.
3685 If no simplification is possible, this function returns zero.
3686 Otherwise, it returns either const_true_rtx or const0_rtx. */
3687
3688rtx
3689simplify_const_relational_operation (enum rtx_code code,
3690 enum machine_mode mode,
3691 rtx op0, rtx op1)
3692{
3693 int equal, op0lt, op0ltu, op1lt, op1ltu;
3694 rtx tem;
3695 rtx trueop0;
3696 rtx trueop1;
3697
3698 gcc_assert (mode != VOIDmode
3699 || (GET_MODE (op0) == VOIDmode
3700 && GET_MODE (op1) == VOIDmode));
3701
3702 /* If op0 is a compare, extract the comparison arguments from it. */
3703 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3704 {
3705 op1 = XEXP (op0, 1);
3706 op0 = XEXP (op0, 0);
3707
3708 if (GET_MODE (op0) != VOIDmode)
3709 mode = GET_MODE (op0);
3710 else if (GET_MODE (op1) != VOIDmode)
3711 mode = GET_MODE (op1);
3712 else
3713 return 0;
3714 }
3715
3716 /* We can't simplify MODE_CC values since we don't know what the
3717 actual comparison is. */
3718 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3719 return 0;
3720
3721 /* Make sure the constant is second. */
3722 if (swap_commutative_operands_p (op0, op1))
3723 {
3724 tem = op0, op0 = op1, op1 = tem;
3725 code = swap_condition (code);
3726 }
3727
3728 trueop0 = avoid_constant_pool_reference (op0);
3729 trueop1 = avoid_constant_pool_reference (op1);
3730
3731 /* For integer comparisons of A and B maybe we can simplify A - B and can
3732 then simplify a comparison of that with zero. If A and B are both either
3733 a register or a CONST_INT, this can't help; testing for these cases will
3734 prevent infinite recursion here and speed things up.
3735
3736 We can only do this for EQ and NE comparisons as otherwise we may
3737 lose or introduce overflow which we cannot disregard as undefined as
3738 we do not know the signedness of the operation on either the left or
3739 the right hand side of the comparison. */
3740
3741 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3742 && (code == EQ || code == NE)
3743 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3744 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3745 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3746 /* We cannot do this if tem is a nonzero address. */
3747 && ! nonzero_address_p (tem))
3748 return simplify_const_relational_operation (signed_condition (code),
3749 mode, tem, const0_rtx);
3750
3751 if (! HONOR_NANS (mode) && code == ORDERED)
3752 return const_true_rtx;
3753
3754 if (! HONOR_NANS (mode) && code == UNORDERED)
3755 return const0_rtx;
3756
3757 /* For modes without NaNs, if the two operands are equal, we know the
3758 result except if they have side-effects. */
3759 if (! HONOR_NANS (GET_MODE (trueop0))
3760 && rtx_equal_p (trueop0, trueop1)
3761 && ! side_effects_p (trueop0))
3762 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3763
3764 /* If the operands are floating-point constants, see if we can fold
3765 the result. */
3766 else if (GET_CODE (trueop0) == CONST_DOUBLE
3767 && GET_CODE (trueop1) == CONST_DOUBLE
3768 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3769 {
3770 REAL_VALUE_TYPE d0, d1;
3771
3772 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3773 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3774
3775 /* Comparisons are unordered iff at least one of the values is NaN. */
3776 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3777 switch (code)
3778 {
3779 case UNEQ:
3780 case UNLT:
3781 case UNGT:
3782 case UNLE:
3783 case UNGE:
3784 case NE:
3785 case UNORDERED:
3786 return const_true_rtx;
3787 case EQ:
3788 case LT:
3789 case GT:
3790 case LE:
3791 case GE:
3792 case LTGT:
3793 case ORDERED:
3794 return const0_rtx;
3795 default:
3796 return 0;
3797 }
3798
3799 equal = REAL_VALUES_EQUAL (d0, d1);
3800 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3801 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3802 }
3803
3804 /* Otherwise, see if the operands are both integers. */
3805 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3806 && (GET_CODE (trueop0) == CONST_DOUBLE
3807 || GET_CODE (trueop0) == CONST_INT)
3808 && (GET_CODE (trueop1) == CONST_DOUBLE
3809 || GET_CODE (trueop1) == CONST_INT))
3810 {
3811 int width = GET_MODE_BITSIZE (mode);
3812 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3813 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3814
3815 /* Get the two words comprising each integer constant. */
3816 if (GET_CODE (trueop0) == CONST_DOUBLE)
3817 {
3818 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3819 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3820 }
3821 else
3822 {
3823 l0u = l0s = INTVAL (trueop0);
3824 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3825 }
3826
3827 if (GET_CODE (trueop1) == CONST_DOUBLE)
3828 {
3829 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3830 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3831 }
3832 else
3833 {
3834 l1u = l1s = INTVAL (trueop1);
3835 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3836 }
3837
3838 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3839 we have to sign or zero-extend the values. */
3840 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3841 {
3842 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3843 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844
3845 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3846 l0s |= ((HOST_WIDE_INT) (-1) << width);
3847
3848 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3849 l1s |= ((HOST_WIDE_INT) (-1) << width);
3850 }
3851 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3852 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3853
3854 equal = (h0u == h1u && l0u == l1u);
3855 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3856 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3857 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3858 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3859 }
3860
3861 /* Otherwise, there are some code-specific tests we can make. */
3862 else
3863 {
3864 /* Optimize comparisons with upper and lower bounds. */
3865 if (SCALAR_INT_MODE_P (mode)
3866 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3867 {
3868 rtx mmin, mmax;
3869 int sign;
3870
3871 if (code == GEU
3872 || code == LEU
3873 || code == GTU
3874 || code == LTU)
3875 sign = 0;
3876 else
3877 sign = 1;
3878
3879 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3880
3881 tem = NULL_RTX;
3882 switch (code)
3883 {
3884 case GEU:
3885 case GE:
3886 /* x >= min is always true. */
3887 if (rtx_equal_p (trueop1, mmin))
3888 tem = const_true_rtx;
3889 else
3890 break;
3891
3892 case LEU:
3893 case LE:
3894 /* x <= max is always true. */
3895 if (rtx_equal_p (trueop1, mmax))
3896 tem = const_true_rtx;
3897 break;
3898
3899 case GTU:
3900 case GT:
3901 /* x > max is always false. */
3902 if (rtx_equal_p (trueop1, mmax))
3903 tem = const0_rtx;
3904 break;
3905
3906 case LTU:
3907 case LT:
3908 /* x < min is always false. */
3909 if (rtx_equal_p (trueop1, mmin))
3910 tem = const0_rtx;
3911 break;
3912
3913 default:
3914 break;
3915 }
3916 if (tem == const0_rtx
3917 || tem == const_true_rtx)
3918 return tem;
3919 }
3920
3921 switch (code)
3922 {
3923 case EQ:
3924 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3925 return const0_rtx;
3926 break;
3927
3928 case NE:
3929 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3930 return const_true_rtx;
3931 break;
3932
3933 case LT:
3934 /* Optimize abs(x) < 0.0. */
3935 if (trueop1 == CONST0_RTX (mode)
3936 && !HONOR_SNANS (mode)
3937 && (!INTEGRAL_MODE_P (mode)
3938 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3939 {
3940 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3941 : trueop0;
3942 if (GET_CODE (tem) == ABS)
3943 {
3944 if (INTEGRAL_MODE_P (mode)
3945 && (issue_strict_overflow_warning
3946 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3947 warning (OPT_Wstrict_overflow,
3948 ("assuming signed overflow does not occur when "
3949 "assuming abs (x) < 0 is false"));
3950 return const0_rtx;
3951 }
3952 }
3953 break;
3954
3955 case GE:
3956 /* Optimize abs(x) >= 0.0. */
3957 if (trueop1 == CONST0_RTX (mode)
3958 && !HONOR_NANS (mode)
3959 && (!INTEGRAL_MODE_P (mode)
3960 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3961 {
3962 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3963 : trueop0;
3964 if (GET_CODE (tem) == ABS)
3965 {
3966 if (INTEGRAL_MODE_P (mode)
3967 && (issue_strict_overflow_warning
3968 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3969 warning (OPT_Wstrict_overflow,
3970 ("assuming signed overflow does not occur when "
3971 "assuming abs (x) >= 0 is true"));
3972 return const_true_rtx;
3973 }
3974 }
3975 break;
3976
3977 case UNGE:
3978 /* Optimize ! (abs(x) < 0.0). */
3979 if (trueop1 == CONST0_RTX (mode))
3980 {
3981 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3982 : trueop0;
3983 if (GET_CODE (tem) == ABS)
3984 return const_true_rtx;
3985 }
3986 break;
3987
3988 default:
3989 break;
3990 }
3991
3992 return 0;
3993 }
3994
3995 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3996 as appropriate. */
3997 switch (code)
3998 {
3999 case EQ:
4000 case UNEQ:
4001 return equal ? const_true_rtx : const0_rtx;
4002 case NE:
4003 case LTGT:
4004 return ! equal ? const_true_rtx : const0_rtx;
4005 case LT:
4006 case UNLT:
4007 return op0lt ? const_true_rtx : const0_rtx;
4008 case GT:
4009 case UNGT:
4010 return op1lt ? const_true_rtx : const0_rtx;
4011 case LTU:
4012 return op0ltu ? const_true_rtx : const0_rtx;
4013 case GTU:
4014 return op1ltu ? const_true_rtx : const0_rtx;
4015 case LE:
4016 case UNLE:
4017 return equal || op0lt ? const_true_rtx : const0_rtx;
4018 case GE:
4019 case UNGE:
4020 return equal || op1lt ? const_true_rtx : const0_rtx;
4021 case LEU:
4022 return equal || op0ltu ? const_true_rtx : const0_rtx;
4023 case GEU:
4024 return equal || op1ltu ? const_true_rtx : const0_rtx;
4025 case ORDERED:
4026 return const_true_rtx;
4027 case UNORDERED:
4028 return const0_rtx;
4029 default:
4030 gcc_unreachable ();
4031 }
4032}
4033
4034/* Simplify CODE, an operation with result mode MODE and three operands,
4035 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4036 a constant. Return 0 if no simplifications is possible. */
4037
4038rtx
4039simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4040 enum machine_mode op0_mode, rtx op0, rtx op1,
4041 rtx op2)
4042{
4043 unsigned int width = GET_MODE_BITSIZE (mode);
4044
4045 /* VOIDmode means "infinite" precision. */
4046 if (width == 0)
4047 width = HOST_BITS_PER_WIDE_INT;
4048
4049 switch (code)
4050 {
4051 case SIGN_EXTRACT:
4052 case ZERO_EXTRACT:
4053 if (GET_CODE (op0) == CONST_INT
4054 && GET_CODE (op1) == CONST_INT
4055 && GET_CODE (op2) == CONST_INT
4056 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4057 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4058 {
4059 /* Extracting a bit-field from a constant */
4060 HOST_WIDE_INT val = INTVAL (op0);
4061
4062 if (BITS_BIG_ENDIAN)
4063 val >>= (GET_MODE_BITSIZE (op0_mode)
4064 - INTVAL (op2) - INTVAL (op1));
4065 else
4066 val >>= INTVAL (op2);
4067
4068 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4069 {
4070 /* First zero-extend. */
4071 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4072 /* If desired, propagate sign bit. */
4073 if (code == SIGN_EXTRACT
4074 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4075 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4076 }
4077
4078 /* Clear the bits that don't belong in our mode,
4079 unless they and our sign bit are all one.
4080 So we get either a reasonable negative value or a reasonable
4081 unsigned value for this mode. */
4082 if (width < HOST_BITS_PER_WIDE_INT
4083 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4084 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4085 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4086
4087 return gen_int_mode (val, mode);
4088 }
4089 break;
4090
4091 case IF_THEN_ELSE:
4092 if (GET_CODE (op0) == CONST_INT)
4093 return op0 != const0_rtx ? op1 : op2;
4094
4095 /* Convert c ? a : a into "a". */
4096 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4097 return op1;
4098
4099 /* Convert a != b ? a : b into "a". */
4100 if (GET_CODE (op0) == NE
4101 && ! side_effects_p (op0)
4102 && ! HONOR_NANS (mode)
4103 && ! HONOR_SIGNED_ZEROS (mode)
4104 && ((rtx_equal_p (XEXP (op0, 0), op1)
4105 && rtx_equal_p (XEXP (op0, 1), op2))
4106 || (rtx_equal_p (XEXP (op0, 0), op2)
4107 && rtx_equal_p (XEXP (op0, 1), op1))))
4108 return op1;
4109
4110 /* Convert a == b ? a : b into "b". */
4111 if (GET_CODE (op0) == EQ
4112 && ! side_effects_p (op0)
4113 && ! HONOR_NANS (mode)
4114 && ! HONOR_SIGNED_ZEROS (mode)
4115 && ((rtx_equal_p (XEXP (op0, 0), op1)
4116 && rtx_equal_p (XEXP (op0, 1), op2))
4117 || (rtx_equal_p (XEXP (op0, 0), op2)
4118 && rtx_equal_p (XEXP (op0, 1), op1))))
4119 return op2;
4120
4121 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4122 {
4123 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4124 ? GET_MODE (XEXP (op0, 1))
4125 : GET_MODE (XEXP (op0, 0)));
4126 rtx temp;
4127
4128 /* Look for happy constants in op1 and op2. */
4129 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4130 {
4131 HOST_WIDE_INT t = INTVAL (op1);
4132 HOST_WIDE_INT f = INTVAL (op2);
4133
4134 if (t == STORE_FLAG_VALUE && f == 0)
4135 code = GET_CODE (op0);
4136 else if (t == 0 && f == STORE_FLAG_VALUE)
4137 {
4138 enum rtx_code tmp;
4139 tmp = reversed_comparison_code (op0, NULL_RTX);
4140 if (tmp == UNKNOWN)
4141 break;
4142 code = tmp;
4143 }
4144 else
4145 break;
4146
4147 return simplify_gen_relational (code, mode, cmp_mode,
4148 XEXP (op0, 0), XEXP (op0, 1));
4149 }
4150
4151 if (cmp_mode == VOIDmode)
4152 cmp_mode = op0_mode;
4153 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4154 cmp_mode, XEXP (op0, 0),
4155 XEXP (op0, 1));
4156
4157 /* See if any simplifications were possible. */
4158 if (temp)
4159 {
4160 if (GET_CODE (temp) == CONST_INT)
4161 return temp == const0_rtx ? op2 : op1;
4162 else if (temp)
4163 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4164 }
4165 }
4166 break;
4167
4168 case VEC_MERGE:
4169 gcc_assert (GET_MODE (op0) == mode);
4170 gcc_assert (GET_MODE (op1) == mode);
4171 gcc_assert (VECTOR_MODE_P (mode));
4172 op2 = avoid_constant_pool_reference (op2);
4173 if (GET_CODE (op2) == CONST_INT)
4174 {
4175 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4176 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4177 int mask = (1 << n_elts) - 1;
4178
4179 if (!(INTVAL (op2) & mask))
4180 return op1;
4181 if ((INTVAL (op2) & mask) == mask)
4182 return op0;
4183
4184 op0 = avoid_constant_pool_reference (op0);
4185 op1 = avoid_constant_pool_reference (op1);
4186 if (GET_CODE (op0) == CONST_VECTOR
4187 && GET_CODE (op1) == CONST_VECTOR)
4188 {
4189 rtvec v = rtvec_alloc (n_elts);
4190 unsigned int i;
4191
4192 for (i = 0; i < n_elts; i++)
4193 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4194 ? CONST_VECTOR_ELT (op0, i)
4195 : CONST_VECTOR_ELT (op1, i));
4196 return gen_rtx_CONST_VECTOR (mode, v);
4197 }
4198 }
4199 break;
4200
4201 default:
4202 gcc_unreachable ();
4203 }
4204
4205 return 0;
4206}
4207
4208/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4209 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4210
4211 Works by unpacking OP into a collection of 8-bit values
4212 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4213 and then repacking them again for OUTERMODE. */
4214
4215static rtx
4216simplify_immed_subreg (enum machine_mode outermode, rtx op,
4217 enum machine_mode innermode, unsigned int byte)
4218{
4219 /* We support up to 512-bit values (for V8DFmode). */
4220 enum {
4221 max_bitsize = 512,
4222 value_bit = 8,
4223 value_mask = (1 << value_bit) - 1
4224 };
4225 unsigned char value[max_bitsize / value_bit];
4226 int value_start;
4227 int i;
4228 int elem;
4229
4230 int num_elem;
4231 rtx * elems;
4232 int elem_bitsize;
4233 rtx result_s;
4234 rtvec result_v = NULL;
4235 enum mode_class outer_class;
4236 enum machine_mode outer_submode;
4237
4238 /* Some ports misuse CCmode. */
4239 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4240 return op;
4241
4242 /* We have no way to represent a complex constant at the rtl level. */
4243 if (COMPLEX_MODE_P (outermode))
4244 return NULL_RTX;
4245
4246 /* Unpack the value. */
4247
4248 if (GET_CODE (op) == CONST_VECTOR)
4249 {
4250 num_elem = CONST_VECTOR_NUNITS (op);
4251 elems = &CONST_VECTOR_ELT (op, 0);
4252 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4253 }
4254 else
4255 {
4256 num_elem = 1;
4257 elems = &op;
4258 elem_bitsize = max_bitsize;
4259 }
4260 /* If this asserts, it is too complicated; reducing value_bit may help. */
4261 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4262 /* I don't know how to handle endianness of sub-units. */
4263 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4264
4265 for (elem = 0; elem < num_elem; elem++)
4266 {
4267 unsigned char * vp;
4268 rtx el = elems[elem];
4269
4270 /* Vectors are kept in target memory order. (This is probably
4271 a mistake.) */
4272 {
4273 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4274 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4275 / BITS_PER_UNIT);
4276 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4277 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4278 unsigned bytele = (subword_byte % UNITS_PER_WORD
4279 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4280 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4281 }
4282
4283 switch (GET_CODE (el))
4284 {
4285 case CONST_INT:
4286 for (i = 0;
4287 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4288 i += value_bit)
4289 *vp++ = INTVAL (el) >> i;
4290 /* CONST_INTs are always logically sign-extended. */
4291 for (; i < elem_bitsize; i += value_bit)
4292 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4293 break;
4294
4295 case CONST_DOUBLE:
4296 if (GET_MODE (el) == VOIDmode)
4297 {
4298 /* If this triggers, someone should have generated a
4299 CONST_INT instead. */
4300 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4301
4302 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4303 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4304 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4305 {
4306 *vp++
4307 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4308 i += value_bit;
4309 }
4310 /* It shouldn't matter what's done here, so fill it with
4311 zero. */
4312 for (; i < elem_bitsize; i += value_bit)
4313 *vp++ = 0;
4314 }
4315 else
4316 {
4317 long tmp[max_bitsize / 32];
4318 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4319
4320 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4321 gcc_assert (bitsize <= elem_bitsize);
4322 gcc_assert (bitsize % value_bit == 0);
4323
4324 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4325 GET_MODE (el));
4326
4327 /* real_to_target produces its result in words affected by
4328 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4329 and use WORDS_BIG_ENDIAN instead; see the documentation
4330 of SUBREG in rtl.texi. */
4331 for (i = 0; i < bitsize; i += value_bit)
4332 {
4333 int ibase;
4334 if (WORDS_BIG_ENDIAN)
4335 ibase = bitsize - 1 - i;
4336 else
4337 ibase = i;
4338 *vp++ = tmp[ibase / 32] >> i % 32;
4339 }
4340
4341 /* It shouldn't matter what's done here, so fill it with
4342 zero. */
4343 for (; i < elem_bitsize; i += value_bit)
4344 *vp++ = 0;
4345 }
4346 break;
4347
4348 default:
4349 gcc_unreachable ();
4350 }
4351 }
4352
4353 /* Now, pick the right byte to start with. */
4354 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4355 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4356 will already have offset 0. */
4357 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4358 {
4359 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4360 - byte);
4361 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4362 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4363 byte = (subword_byte % UNITS_PER_WORD
4364 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4365 }
4366
4367 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4368 so if it's become negative it will instead be very large.) */
4369 gcc_assert (byte < GET_MODE_SIZE (innermode));
4370
4371 /* Convert from bytes to chunks of size value_bit. */
4372 value_start = byte * (BITS_PER_UNIT / value_bit);
4373
4374 /* Re-pack the value. */
4375
4376 if (VECTOR_MODE_P (outermode))
4377 {
4378 num_elem = GET_MODE_NUNITS (outermode);
4379 result_v = rtvec_alloc (num_elem);
4380 elems = &RTVEC_ELT (result_v, 0);
4381 outer_submode = GET_MODE_INNER (outermode);
4382 }
4383 else
4384 {
4385 num_elem = 1;
4386 elems = &result_s;
4387 outer_submode = outermode;
4388 }
4389
4390 outer_class = GET_MODE_CLASS (outer_submode);
4391 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4392
4393 gcc_assert (elem_bitsize % value_bit == 0);
4394 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4395
4396 for (elem = 0; elem < num_elem; elem++)
4397 {
4398 unsigned char *vp;
4399
4400 /* Vectors are stored in target memory order. (This is probably
4401 a mistake.) */
4402 {
4403 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4404 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4405 / BITS_PER_UNIT);
4406 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4407 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4408 unsigned bytele = (subword_byte % UNITS_PER_WORD
4409 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4410 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4411 }
4412
4413 switch (outer_class)
4414 {
4415 case MODE_INT:
4416 case MODE_PARTIAL_INT:
4417 {
4418 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4419
4420 for (i = 0;
4421 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4422 i += value_bit)
4423 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4424 for (; i < elem_bitsize; i += value_bit)
4425 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4426 << (i - HOST_BITS_PER_WIDE_INT));
4427
4428 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4429 know why. */
4430 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4431 elems[elem] = gen_int_mode (lo, outer_submode);
4432 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4433 elems[elem] = immed_double_const (lo, hi, outer_submode);
4434 else
4435 return NULL_RTX;
4436 }
4437 break;
4438
4439 case MODE_FLOAT:
4440 case MODE_DECIMAL_FLOAT:
4441 {
4442 REAL_VALUE_TYPE r;
4443 long tmp[max_bitsize / 32];
4444
4445 /* real_from_target wants its input in words affected by
4446 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4447 and use WORDS_BIG_ENDIAN instead; see the documentation
4448 of SUBREG in rtl.texi. */
4449 for (i = 0; i < max_bitsize / 32; i++)
4450 tmp[i] = 0;
4451 for (i = 0; i < elem_bitsize; i += value_bit)
4452 {
4453 int ibase;
4454 if (WORDS_BIG_ENDIAN)
4455 ibase = elem_bitsize - 1 - i;
4456 else
4457 ibase = i;
4458 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4459 }
4460
4461 real_from_target (&r, tmp, outer_submode);
4462 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4463 }
4464 break;
4465
4466 default:
4467 gcc_unreachable ();
4468 }
4469 }
4470 if (VECTOR_MODE_P (outermode))
4471 return gen_rtx_CONST_VECTOR (outermode, result_v);
4472 else
4473 return result_s;
4474}
4475
4476/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4477 Return 0 if no simplifications are possible. */
4478rtx
4479simplify_subreg (enum machine_mode outermode, rtx op,
4480 enum machine_mode innermode, unsigned int byte)
4481{
4482 /* Little bit of sanity checking. */
4483 gcc_assert (innermode != VOIDmode);
4484 gcc_assert (outermode != VOIDmode);
4485 gcc_assert (innermode != BLKmode);
4486 gcc_assert (outermode != BLKmode);
4487
4488 gcc_assert (GET_MODE (op) == innermode
4489 || GET_MODE (op) == VOIDmode);
4490
4491 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4492 gcc_assert (byte < GET_MODE_SIZE (innermode));
4493
4494 if (outermode == innermode && !byte)
4495 return op;
4496
4497 if (GET_CODE (op) == CONST_INT
4498 || GET_CODE (op) == CONST_DOUBLE
4499 || GET_CODE (op) == CONST_VECTOR)
4500 return simplify_immed_subreg (outermode, op, innermode, byte);
4501
4502 /* Changing mode twice with SUBREG => just change it once,
4503 or not at all if changing back op starting mode. */
4504 if (GET_CODE (op) == SUBREG)
4505 {
4506 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4507 int final_offset = byte + SUBREG_BYTE (op);
4508 rtx newx;
4509
4510 if (outermode == innermostmode
4511 && byte == 0 && SUBREG_BYTE (op) == 0)
4512 return SUBREG_REG (op);
4513
4514 /* The SUBREG_BYTE represents offset, as if the value were stored
4515 in memory. Irritating exception is paradoxical subreg, where
4516 we define SUBREG_BYTE to be 0. On big endian machines, this
4517 value should be negative. For a moment, undo this exception. */
4518 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4519 {
4520 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4521 if (WORDS_BIG_ENDIAN)
4522 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4523 if (BYTES_BIG_ENDIAN)
4524 final_offset += difference % UNITS_PER_WORD;
4525 }
4526 if (SUBREG_BYTE (op) == 0
4527 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4528 {
4529 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4530 if (WORDS_BIG_ENDIAN)
4531 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4532 if (BYTES_BIG_ENDIAN)
4533 final_offset += difference % UNITS_PER_WORD;
4534 }
4535
4536 /* See whether resulting subreg will be paradoxical. */
4537 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4538 {
4539 /* In nonparadoxical subregs we can't handle negative offsets. */
4540 if (final_offset < 0)
4541 return NULL_RTX;
4542 /* Bail out in case resulting subreg would be incorrect. */
4543 if (final_offset % GET_MODE_SIZE (outermode)
4544 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4545 return NULL_RTX;
4546 }
4547 else
4548 {
4549 int offset = 0;
4550 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4551
4552 /* In paradoxical subreg, see if we are still looking on lower part.
4553 If so, our SUBREG_BYTE will be 0. */
4554 if (WORDS_BIG_ENDIAN)
4555 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4556 if (BYTES_BIG_ENDIAN)
4557 offset += difference % UNITS_PER_WORD;
4558 if (offset == final_offset)
4559 final_offset = 0;
4560 else
4561 return NULL_RTX;
4562 }
4563
4564 /* Recurse for further possible simplifications. */
4565 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4566 final_offset);
4567 if (newx)
4568 return newx;
4569 if (validate_subreg (outermode, innermostmode,
4570 SUBREG_REG (op), final_offset))
4571 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4572 return NULL_RTX;
4573 }
4574
4575 /* Merge implicit and explicit truncations. */
4576
4577 if (GET_CODE (op) == TRUNCATE
4578 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4579 && subreg_lowpart_offset (outermode, innermode) == byte)
4580 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4581 GET_MODE (XEXP (op, 0)));
4582
4583 /* SUBREG of a hard register => just change the register number
4584 and/or mode. If the hard register is not valid in that mode,
4585 suppress this simplification. If the hard register is the stack,
4586 frame, or argument pointer, leave this as a SUBREG. */
4587
4588 if (REG_P (op)
4589 && REGNO (op) < FIRST_PSEUDO_REGISTER
4590#ifdef CANNOT_CHANGE_MODE_CLASS
4591 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4592 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4593 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4594#endif
4595 && ((reload_completed && !frame_pointer_needed)
4596 || (REGNO (op) != FRAME_POINTER_REGNUM
4597#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4598 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4599#endif
4600 ))
4601#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4602 && REGNO (op) != ARG_POINTER_REGNUM
4603#endif
4604 && REGNO (op) != STACK_POINTER_REGNUM
4605 && subreg_offset_representable_p (REGNO (op), innermode,
4606 byte, outermode))
4607 {
4608 unsigned int regno = REGNO (op);
4609 unsigned int final_regno
4610 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4611
4612 /* ??? We do allow it if the current REG is not valid for
4613 its mode. This is a kludge to work around how float/complex
4614 arguments are passed on 32-bit SPARC and should be fixed. */
4615 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4616 || ! HARD_REGNO_MODE_OK (regno, innermode))
4617 {
4618 rtx x;
4619 int final_offset = byte;
4620
4621 /* Adjust offset for paradoxical subregs. */
4622 if (byte == 0
4623 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4624 {
4625 int difference = (GET_MODE_SIZE (innermode)
4626 - GET_MODE_SIZE (outermode));
4627 if (WORDS_BIG_ENDIAN)
4628 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4629 if (BYTES_BIG_ENDIAN)
4630 final_offset += difference % UNITS_PER_WORD;
4631 }
4632
4633 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4634
4635 /* Propagate original regno. We don't have any way to specify
4636 the offset inside original regno, so do so only for lowpart.
4637 The information is used only by alias analysis that can not
4638 grog partial register anyway. */
4639
4640 if (subreg_lowpart_offset (outermode, innermode) == byte)
4641 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4642 return x;
4643 }
4644 }
4645
4646 /* If we have a SUBREG of a register that we are replacing and we are
4647 replacing it with a MEM, make a new MEM and try replacing the
4648 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4649 or if we would be widening it. */
4650
4651 if (MEM_P (op)
4652 && ! mode_dependent_address_p (XEXP (op, 0))
4653 /* Allow splitting of volatile memory references in case we don't
4654 have instruction to move the whole thing. */
4655 && (! MEM_VOLATILE_P (op)
4656 || ! have_insn_for (SET, innermode))
4657 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4658 return adjust_address_nv (op, outermode, byte);
4659
4660 /* Handle complex values represented as CONCAT
4661 of real and imaginary part. */
4662 if (GET_CODE (op) == CONCAT)
4663 {
4664 unsigned int inner_size, final_offset;
4665 rtx part, res;
4666
4667 inner_size = GET_MODE_UNIT_SIZE (innermode);
4668 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4669 final_offset = byte % inner_size;
4670 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4671 return NULL_RTX;
4672
4673 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4674 if (res)
4675 return res;
4676 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4677 return gen_rtx_SUBREG (outermode, part, final_offset);
4678 return NULL_RTX;
4679 }
4680
4681 /* Optimize SUBREG truncations of zero and sign extended values. */
4682 if ((GET_CODE (op) == ZERO_EXTEND
4683 || GET_CODE (op) == SIGN_EXTEND)
4684 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4685 {
4686 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4687
4688 /* If we're requesting the lowpart of a zero or sign extension,
4689 there are three possibilities. If the outermode is the same
4690 as the origmode, we can omit both the extension and the subreg.
4691 If the outermode is not larger than the origmode, we can apply
4692 the truncation without the extension. Finally, if the outermode
4693 is larger than the origmode, but both are integer modes, we
4694 can just extend to the appropriate mode. */
4695 if (bitpos == 0)
4696 {
4697 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4698 if (outermode == origmode)
4699 return XEXP (op, 0);
4700 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4701 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4702 subreg_lowpart_offset (outermode,
4703 origmode));
4704 if (SCALAR_INT_MODE_P (outermode))
4705 return simplify_gen_unary (GET_CODE (op), outermode,
4706 XEXP (op, 0), origmode);
4707 }
4708
4709 /* A SUBREG resulting from a zero extension may fold to zero if
4710 it extracts higher bits that the ZERO_EXTEND's source bits. */
4711 if (GET_CODE (op) == ZERO_EXTEND
4712 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4713 return CONST0_RTX (outermode);
4714 }
4715
4716 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4717 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4718 the outer subreg is effectively a truncation to the original mode. */
4719 if ((GET_CODE (op) == LSHIFTRT
4720 || GET_CODE (op) == ASHIFTRT)
4721 && SCALAR_INT_MODE_P (outermode)
4722 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4723 to avoid the possibility that an outer LSHIFTRT shifts by more
4724 than the sign extension's sign_bit_copies and introduces zeros
4725 into the high bits of the result. */
4726 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4727 && GET_CODE (XEXP (op, 1)) == CONST_INT
4728 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4729 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4730 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4731 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4732 return simplify_gen_binary (ASHIFTRT, outermode,
4733 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4734
4735 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4736 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4737 the outer subreg is effectively a truncation to the original mode. */
4738 if ((GET_CODE (op) == LSHIFTRT
4739 || GET_CODE (op) == ASHIFTRT)
4740 && SCALAR_INT_MODE_P (outermode)
4741 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4742 && GET_CODE (XEXP (op, 1)) == CONST_INT
4743 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4744 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4745 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4746 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4747 return simplify_gen_binary (LSHIFTRT, outermode,
4748 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4749
4750 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4751 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4752 the outer subreg is effectively a truncation to the original mode. */
4753 if (GET_CODE (op) == ASHIFT
4754 && SCALAR_INT_MODE_P (outermode)
4755 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4756 && GET_CODE (XEXP (op, 1)) == CONST_INT
4757 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4758 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4759 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4760 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4761 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4762 return simplify_gen_binary (ASHIFT, outermode,
4763 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4764
4765 return NULL_RTX;
4766}
4767
4768/* Make a SUBREG operation or equivalent if it folds. */
4769
4770rtx
4771simplify_gen_subreg (enum machine_mode outermode, rtx op,
4772 enum machine_mode innermode, unsigned int byte)
4773{
4774 rtx newx;
4775
4776 newx = simplify_subreg (outermode, op, innermode, byte);
4777 if (newx)
4778 return newx;
4779
4780 if (GET_CODE (op) == SUBREG
4781 || GET_CODE (op) == CONCAT
4782 || GET_MODE (op) == VOIDmode)
4783 return NULL_RTX;
4784
4785 if (validate_subreg (outermode, innermode, op, byte))
4786 return gen_rtx_SUBREG (outermode, op, byte);
4787
4788 return NULL_RTX;
4789}
4790
4791/* Simplify X, an rtx expression.
4792
4793 Return the simplified expression or NULL if no simplifications
4794 were possible.
4795
4796 This is the preferred entry point into the simplification routines;
4797 however, we still allow passes to call the more specific routines.
4798
4799 Right now GCC has three (yes, three) major bodies of RTL simplification
4800 code that need to be unified.
4801
4802 1. fold_rtx in cse.c. This code uses various CSE specific
4803 information to aid in RTL simplification.
4804
4805 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4806 it uses combine specific information to aid in RTL
4807 simplification.
4808
4809 3. The routines in this file.
4810
4811
4812 Long term we want to only have one body of simplification code; to
4813 get to that state I recommend the following steps:
4814
4815 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4816 which are not pass dependent state into these routines.
4817
4818 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4819 use this routine whenever possible.
4820
4821 3. Allow for pass dependent state to be provided to these
4822 routines and add simplifications based on the pass dependent
4823 state. Remove code from cse.c & combine.c that becomes
4824 redundant/dead.
4825
4826 It will take time, but ultimately the compiler will be easier to
4827 maintain and improve. It's totally silly that when we add a
4828 simplification that it needs to be added to 4 places (3 for RTL
4829 simplification and 1 for tree simplification. */
4830
4831rtx
4832simplify_rtx (rtx x)
4833{
4834 enum rtx_code code = GET_CODE (x);
4835 enum machine_mode mode = GET_MODE (x);
4836
4837 switch (GET_RTX_CLASS (code))
4838 {
4839 case RTX_UNARY:
4840 return simplify_unary_operation (code, mode,
4841 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4842 case RTX_COMM_ARITH:
4843 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4844 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4845
4846 /* Fall through.... */
4847
4848 case RTX_BIN_ARITH:
4849 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4850
4851 case RTX_TERNARY:
4852 case RTX_BITFIELD_OPS:
4853 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4854 XEXP (x, 0), XEXP (x, 1),
4855 XEXP (x, 2));
4856
4857 case RTX_COMPARE:
4858 case RTX_COMM_COMPARE:
4859 return simplify_relational_operation (code, mode,
4860 ((GET_MODE (XEXP (x, 0))
4861 != VOIDmode)
4862 ? GET_MODE (XEXP (x, 0))
4863 : GET_MODE (XEXP (x, 1))),
4864 XEXP (x, 0),
4865 XEXP (x, 1));
4866
4867 case RTX_EXTRA:
4868 if (code == SUBREG)
4869 return simplify_gen_subreg (mode, SUBREG_REG (x),
4870 GET_MODE (SUBREG_REG (x)),
4871 SUBREG_BYTE (x));
4872 break;
4873
4874 case RTX_OBJ:
4875 if (code == LO_SUM)
4876 {
4877 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4878 if (GET_CODE (XEXP (x, 0)) == HIGH
4879 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4880 return XEXP (x, 1);
4881 }
4882 break;
4883
4884 default:
4885 break;
4886 }
4887 return NULL;
4888}
4889
594 {
595 enum machine_mode inner = GET_MODE (XEXP (op, 0));
596 int isize = GET_MODE_BITSIZE (inner);
597 if (STORE_FLAG_VALUE == 1)
598 {
599 temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
600 GEN_INT (isize - 1));
601 if (mode == inner)
602 return temp;
603 if (GET_MODE_BITSIZE (mode) > isize)
604 return simplify_gen_unary (SIGN_EXTEND, mode, temp, inner);
605 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
606 }
607 else if (STORE_FLAG_VALUE == -1)
608 {
609 temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
610 GEN_INT (isize - 1));
611 if (mode == inner)
612 return temp;
613 if (GET_MODE_BITSIZE (mode) > isize)
614 return simplify_gen_unary (ZERO_EXTEND, mode, temp, inner);
615 return simplify_gen_unary (TRUNCATE, mode, temp, inner);
616 }
617 }
618 break;
619
620 case TRUNCATE:
621 /* We can't handle truncation to a partial integer mode here
622 because we don't know the real bitsize of the partial
623 integer mode. */
624 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
625 break;
626
627 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
628 if ((GET_CODE (op) == SIGN_EXTEND
629 || GET_CODE (op) == ZERO_EXTEND)
630 && GET_MODE (XEXP (op, 0)) == mode)
631 return XEXP (op, 0);
632
633 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
634 (OP:SI foo:SI) if OP is NEG or ABS. */
635 if ((GET_CODE (op) == ABS
636 || GET_CODE (op) == NEG)
637 && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
638 || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
639 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
640 return simplify_gen_unary (GET_CODE (op), mode,
641 XEXP (XEXP (op, 0), 0), mode);
642
643 /* (truncate:A (subreg:B (truncate:C X) 0)) is
644 (truncate:A X). */
645 if (GET_CODE (op) == SUBREG
646 && GET_CODE (SUBREG_REG (op)) == TRUNCATE
647 && subreg_lowpart_p (op))
648 return simplify_gen_unary (TRUNCATE, mode, XEXP (SUBREG_REG (op), 0),
649 GET_MODE (XEXP (SUBREG_REG (op), 0)));
650
651 /* If we know that the value is already truncated, we can
652 replace the TRUNCATE with a SUBREG. Note that this is also
653 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
654 modes we just have to apply a different definition for
655 truncation. But don't do this for an (LSHIFTRT (MULT ...))
656 since this will cause problems with the umulXi3_highpart
657 patterns. */
658 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
659 GET_MODE_BITSIZE (GET_MODE (op)))
660 ? (num_sign_bit_copies (op, GET_MODE (op))
661 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op))
662 - GET_MODE_BITSIZE (mode)))
663 : truncated_to_mode (mode, op))
664 && ! (GET_CODE (op) == LSHIFTRT
665 && GET_CODE (XEXP (op, 0)) == MULT))
666 return rtl_hooks.gen_lowpart_no_emit (mode, op);
667
668 /* A truncate of a comparison can be replaced with a subreg if
669 STORE_FLAG_VALUE permits. This is like the previous test,
670 but it works even if the comparison is done in a mode larger
671 than HOST_BITS_PER_WIDE_INT. */
672 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
673 && COMPARISON_P (op)
674 && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0)
675 return rtl_hooks.gen_lowpart_no_emit (mode, op);
676 break;
677
678 case FLOAT_TRUNCATE:
679 if (DECIMAL_FLOAT_MODE_P (mode))
680 break;
681
682 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
683 if (GET_CODE (op) == FLOAT_EXTEND
684 && GET_MODE (XEXP (op, 0)) == mode)
685 return XEXP (op, 0);
686
687 /* (float_truncate:SF (float_truncate:DF foo:XF))
688 = (float_truncate:SF foo:XF).
689 This may eliminate double rounding, so it is unsafe.
690
691 (float_truncate:SF (float_extend:XF foo:DF))
692 = (float_truncate:SF foo:DF).
693
694 (float_truncate:DF (float_extend:XF foo:SF))
695 = (float_extend:SF foo:DF). */
696 if ((GET_CODE (op) == FLOAT_TRUNCATE
697 && flag_unsafe_math_optimizations)
698 || GET_CODE (op) == FLOAT_EXTEND)
699 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op,
700 0)))
701 > GET_MODE_SIZE (mode)
702 ? FLOAT_TRUNCATE : FLOAT_EXTEND,
703 mode,
704 XEXP (op, 0), mode);
705
706 /* (float_truncate (float x)) is (float x) */
707 if (GET_CODE (op) == FLOAT
708 && (flag_unsafe_math_optimizations
709 || ((unsigned)significand_size (GET_MODE (op))
710 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
711 - num_sign_bit_copies (XEXP (op, 0),
712 GET_MODE (XEXP (op, 0)))))))
713 return simplify_gen_unary (FLOAT, mode,
714 XEXP (op, 0),
715 GET_MODE (XEXP (op, 0)));
716
717 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
718 (OP:SF foo:SF) if OP is NEG or ABS. */
719 if ((GET_CODE (op) == ABS
720 || GET_CODE (op) == NEG)
721 && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
722 && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
723 return simplify_gen_unary (GET_CODE (op), mode,
724 XEXP (XEXP (op, 0), 0), mode);
725
726 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
727 is (float_truncate:SF x). */
728 if (GET_CODE (op) == SUBREG
729 && subreg_lowpart_p (op)
730 && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
731 return SUBREG_REG (op);
732 break;
733
734 case FLOAT_EXTEND:
735 if (DECIMAL_FLOAT_MODE_P (mode))
736 break;
737
738 /* (float_extend (float_extend x)) is (float_extend x)
739
740 (float_extend (float x)) is (float x) assuming that double
741 rounding can't happen.
742 */
743 if (GET_CODE (op) == FLOAT_EXTEND
744 || (GET_CODE (op) == FLOAT
745 && ((unsigned)significand_size (GET_MODE (op))
746 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0)))
747 - num_sign_bit_copies (XEXP (op, 0),
748 GET_MODE (XEXP (op, 0)))))))
749 return simplify_gen_unary (GET_CODE (op), mode,
750 XEXP (op, 0),
751 GET_MODE (XEXP (op, 0)));
752
753 break;
754
755 case ABS:
756 /* (abs (neg <foo>)) -> (abs <foo>) */
757 if (GET_CODE (op) == NEG)
758 return simplify_gen_unary (ABS, mode, XEXP (op, 0),
759 GET_MODE (XEXP (op, 0)));
760
761 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
762 do nothing. */
763 if (GET_MODE (op) == VOIDmode)
764 break;
765
766 /* If operand is something known to be positive, ignore the ABS. */
767 if (GET_CODE (op) == FFS || GET_CODE (op) == ABS
768 || ((GET_MODE_BITSIZE (GET_MODE (op))
769 <= HOST_BITS_PER_WIDE_INT)
770 && ((nonzero_bits (op, GET_MODE (op))
771 & ((HOST_WIDE_INT) 1
772 << (GET_MODE_BITSIZE (GET_MODE (op)) - 1)))
773 == 0)))
774 return op;
775
776 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
777 if (num_sign_bit_copies (op, mode) == GET_MODE_BITSIZE (mode))
778 return gen_rtx_NEG (mode, op);
779
780 break;
781
782 case FFS:
783 /* (ffs (*_extend <X>)) = (ffs <X>) */
784 if (GET_CODE (op) == SIGN_EXTEND
785 || GET_CODE (op) == ZERO_EXTEND)
786 return simplify_gen_unary (FFS, mode, XEXP (op, 0),
787 GET_MODE (XEXP (op, 0)));
788 break;
789
790 case POPCOUNT:
791 case PARITY:
792 /* (pop* (zero_extend <X>)) = (pop* <X>) */
793 if (GET_CODE (op) == ZERO_EXTEND)
794 return simplify_gen_unary (code, mode, XEXP (op, 0),
795 GET_MODE (XEXP (op, 0)));
796 break;
797
798 case FLOAT:
799 /* (float (sign_extend <X>)) = (float <X>). */
800 if (GET_CODE (op) == SIGN_EXTEND)
801 return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
802 GET_MODE (XEXP (op, 0)));
803 break;
804
805 case SIGN_EXTEND:
806 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
807 becomes just the MINUS if its mode is MODE. This allows
808 folding switch statements on machines using casesi (such as
809 the VAX). */
810 if (GET_CODE (op) == TRUNCATE
811 && GET_MODE (XEXP (op, 0)) == mode
812 && GET_CODE (XEXP (op, 0)) == MINUS
813 && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
814 && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
815 return XEXP (op, 0);
816
817 /* Check for a sign extension of a subreg of a promoted
818 variable, where the promotion is sign-extended, and the
819 target mode is the same as the variable's promotion. */
820 if (GET_CODE (op) == SUBREG
821 && SUBREG_PROMOTED_VAR_P (op)
822 && ! SUBREG_PROMOTED_UNSIGNED_P (op)
823 && GET_MODE (XEXP (op, 0)) == mode)
824 return XEXP (op, 0);
825
826#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
827 if (! POINTERS_EXTEND_UNSIGNED
828 && mode == Pmode && GET_MODE (op) == ptr_mode
829 && (CONSTANT_P (op)
830 || (GET_CODE (op) == SUBREG
831 && REG_P (SUBREG_REG (op))
832 && REG_POINTER (SUBREG_REG (op))
833 && GET_MODE (SUBREG_REG (op)) == Pmode)))
834 return convert_memory_address (Pmode, op);
835#endif
836 break;
837
838 case ZERO_EXTEND:
839 /* Check for a zero extension of a subreg of a promoted
840 variable, where the promotion is zero-extended, and the
841 target mode is the same as the variable's promotion. */
842 if (GET_CODE (op) == SUBREG
843 && SUBREG_PROMOTED_VAR_P (op)
844 && SUBREG_PROMOTED_UNSIGNED_P (op) > 0
845 && GET_MODE (XEXP (op, 0)) == mode)
846 return XEXP (op, 0);
847
848#if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
849 if (POINTERS_EXTEND_UNSIGNED > 0
850 && mode == Pmode && GET_MODE (op) == ptr_mode
851 && (CONSTANT_P (op)
852 || (GET_CODE (op) == SUBREG
853 && REG_P (SUBREG_REG (op))
854 && REG_POINTER (SUBREG_REG (op))
855 && GET_MODE (SUBREG_REG (op)) == Pmode)))
856 return convert_memory_address (Pmode, op);
857#endif
858 break;
859
860 default:
861 break;
862 }
863
864 return 0;
865}
866
867/* Try to compute the value of a unary operation CODE whose output mode is to
868 be MODE with input operand OP whose mode was originally OP_MODE.
869 Return zero if the value cannot be computed. */
870rtx
871simplify_const_unary_operation (enum rtx_code code, enum machine_mode mode,
872 rtx op, enum machine_mode op_mode)
873{
874 unsigned int width = GET_MODE_BITSIZE (mode);
875
876 if (code == VEC_DUPLICATE)
877 {
878 gcc_assert (VECTOR_MODE_P (mode));
879 if (GET_MODE (op) != VOIDmode)
880 {
881 if (!VECTOR_MODE_P (GET_MODE (op)))
882 gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
883 else
884 gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
885 (GET_MODE (op)));
886 }
887 if (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
888 || GET_CODE (op) == CONST_VECTOR)
889 {
890 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
891 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
892 rtvec v = rtvec_alloc (n_elts);
893 unsigned int i;
894
895 if (GET_CODE (op) != CONST_VECTOR)
896 for (i = 0; i < n_elts; i++)
897 RTVEC_ELT (v, i) = op;
898 else
899 {
900 enum machine_mode inmode = GET_MODE (op);
901 int in_elt_size = GET_MODE_SIZE (GET_MODE_INNER (inmode));
902 unsigned in_n_elts = (GET_MODE_SIZE (inmode) / in_elt_size);
903
904 gcc_assert (in_n_elts < n_elts);
905 gcc_assert ((n_elts % in_n_elts) == 0);
906 for (i = 0; i < n_elts; i++)
907 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op, i % in_n_elts);
908 }
909 return gen_rtx_CONST_VECTOR (mode, v);
910 }
911 }
912
913 if (VECTOR_MODE_P (mode) && GET_CODE (op) == CONST_VECTOR)
914 {
915 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
916 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
917 enum machine_mode opmode = GET_MODE (op);
918 int op_elt_size = GET_MODE_SIZE (GET_MODE_INNER (opmode));
919 unsigned op_n_elts = (GET_MODE_SIZE (opmode) / op_elt_size);
920 rtvec v = rtvec_alloc (n_elts);
921 unsigned int i;
922
923 gcc_assert (op_n_elts == n_elts);
924 for (i = 0; i < n_elts; i++)
925 {
926 rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
927 CONST_VECTOR_ELT (op, i),
928 GET_MODE_INNER (opmode));
929 if (!x)
930 return 0;
931 RTVEC_ELT (v, i) = x;
932 }
933 return gen_rtx_CONST_VECTOR (mode, v);
934 }
935
936 /* The order of these tests is critical so that, for example, we don't
937 check the wrong mode (input vs. output) for a conversion operation,
938 such as FIX. At some point, this should be simplified. */
939
940 if (code == FLOAT && GET_MODE (op) == VOIDmode
941 && (GET_CODE (op) == CONST_DOUBLE || GET_CODE (op) == CONST_INT))
942 {
943 HOST_WIDE_INT hv, lv;
944 REAL_VALUE_TYPE d;
945
946 if (GET_CODE (op) == CONST_INT)
947 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
948 else
949 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
950
951 REAL_VALUE_FROM_INT (d, lv, hv, mode);
952 d = real_value_truncate (mode, d);
953 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
954 }
955 else if (code == UNSIGNED_FLOAT && GET_MODE (op) == VOIDmode
956 && (GET_CODE (op) == CONST_DOUBLE
957 || GET_CODE (op) == CONST_INT))
958 {
959 HOST_WIDE_INT hv, lv;
960 REAL_VALUE_TYPE d;
961
962 if (GET_CODE (op) == CONST_INT)
963 lv = INTVAL (op), hv = HWI_SIGN_EXTEND (lv);
964 else
965 lv = CONST_DOUBLE_LOW (op), hv = CONST_DOUBLE_HIGH (op);
966
967 if (op_mode == VOIDmode)
968 {
969 /* We don't know how to interpret negative-looking numbers in
970 this case, so don't try to fold those. */
971 if (hv < 0)
972 return 0;
973 }
974 else if (GET_MODE_BITSIZE (op_mode) >= HOST_BITS_PER_WIDE_INT * 2)
975 ;
976 else
977 hv = 0, lv &= GET_MODE_MASK (op_mode);
978
979 REAL_VALUE_FROM_UNSIGNED_INT (d, lv, hv, mode);
980 d = real_value_truncate (mode, d);
981 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
982 }
983
984 if (GET_CODE (op) == CONST_INT
985 && width <= HOST_BITS_PER_WIDE_INT && width > 0)
986 {
987 HOST_WIDE_INT arg0 = INTVAL (op);
988 HOST_WIDE_INT val;
989
990 switch (code)
991 {
992 case NOT:
993 val = ~ arg0;
994 break;
995
996 case NEG:
997 val = - arg0;
998 break;
999
1000 case ABS:
1001 val = (arg0 >= 0 ? arg0 : - arg0);
1002 break;
1003
1004 case FFS:
1005 /* Don't use ffs here. Instead, get low order bit and then its
1006 number. If arg0 is zero, this will return 0, as desired. */
1007 arg0 &= GET_MODE_MASK (mode);
1008 val = exact_log2 (arg0 & (- arg0)) + 1;
1009 break;
1010
1011 case CLZ:
1012 arg0 &= GET_MODE_MASK (mode);
1013 if (arg0 == 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode, val))
1014 ;
1015 else
1016 val = GET_MODE_BITSIZE (mode) - floor_log2 (arg0) - 1;
1017 break;
1018
1019 case CTZ:
1020 arg0 &= GET_MODE_MASK (mode);
1021 if (arg0 == 0)
1022 {
1023 /* Even if the value at zero is undefined, we have to come
1024 up with some replacement. Seems good enough. */
1025 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, val))
1026 val = GET_MODE_BITSIZE (mode);
1027 }
1028 else
1029 val = exact_log2 (arg0 & -arg0);
1030 break;
1031
1032 case POPCOUNT:
1033 arg0 &= GET_MODE_MASK (mode);
1034 val = 0;
1035 while (arg0)
1036 val++, arg0 &= arg0 - 1;
1037 break;
1038
1039 case PARITY:
1040 arg0 &= GET_MODE_MASK (mode);
1041 val = 0;
1042 while (arg0)
1043 val++, arg0 &= arg0 - 1;
1044 val &= 1;
1045 break;
1046
1047 case TRUNCATE:
1048 val = arg0;
1049 break;
1050
1051 case ZERO_EXTEND:
1052 /* When zero-extending a CONST_INT, we need to know its
1053 original mode. */
1054 gcc_assert (op_mode != VOIDmode);
1055 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1056 {
1057 /* If we were really extending the mode,
1058 we would have to distinguish between zero-extension
1059 and sign-extension. */
1060 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1061 val = arg0;
1062 }
1063 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1064 val = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1065 else
1066 return 0;
1067 break;
1068
1069 case SIGN_EXTEND:
1070 if (op_mode == VOIDmode)
1071 op_mode = mode;
1072 if (GET_MODE_BITSIZE (op_mode) == HOST_BITS_PER_WIDE_INT)
1073 {
1074 /* If we were really extending the mode,
1075 we would have to distinguish between zero-extension
1076 and sign-extension. */
1077 gcc_assert (width == GET_MODE_BITSIZE (op_mode));
1078 val = arg0;
1079 }
1080 else if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT)
1081 {
1082 val
1083 = arg0 & ~((HOST_WIDE_INT) (-1) << GET_MODE_BITSIZE (op_mode));
1084 if (val
1085 & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (op_mode) - 1)))
1086 val -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1087 }
1088 else
1089 return 0;
1090 break;
1091
1092 case SQRT:
1093 case FLOAT_EXTEND:
1094 case FLOAT_TRUNCATE:
1095 case SS_TRUNCATE:
1096 case US_TRUNCATE:
1097 case SS_NEG:
1098 return 0;
1099
1100 default:
1101 gcc_unreachable ();
1102 }
1103
1104 return gen_int_mode (val, mode);
1105 }
1106
1107 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1108 for a DImode operation on a CONST_INT. */
1109 else if (GET_MODE (op) == VOIDmode
1110 && width <= HOST_BITS_PER_WIDE_INT * 2
1111 && (GET_CODE (op) == CONST_DOUBLE
1112 || GET_CODE (op) == CONST_INT))
1113 {
1114 unsigned HOST_WIDE_INT l1, lv;
1115 HOST_WIDE_INT h1, hv;
1116
1117 if (GET_CODE (op) == CONST_DOUBLE)
1118 l1 = CONST_DOUBLE_LOW (op), h1 = CONST_DOUBLE_HIGH (op);
1119 else
1120 l1 = INTVAL (op), h1 = HWI_SIGN_EXTEND (l1);
1121
1122 switch (code)
1123 {
1124 case NOT:
1125 lv = ~ l1;
1126 hv = ~ h1;
1127 break;
1128
1129 case NEG:
1130 neg_double (l1, h1, &lv, &hv);
1131 break;
1132
1133 case ABS:
1134 if (h1 < 0)
1135 neg_double (l1, h1, &lv, &hv);
1136 else
1137 lv = l1, hv = h1;
1138 break;
1139
1140 case FFS:
1141 hv = 0;
1142 if (l1 == 0)
1143 {
1144 if (h1 == 0)
1145 lv = 0;
1146 else
1147 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1) + 1;
1148 }
1149 else
1150 lv = exact_log2 (l1 & -l1) + 1;
1151 break;
1152
1153 case CLZ:
1154 hv = 0;
1155 if (h1 != 0)
1156 lv = GET_MODE_BITSIZE (mode) - floor_log2 (h1) - 1
1157 - HOST_BITS_PER_WIDE_INT;
1158 else if (l1 != 0)
1159 lv = GET_MODE_BITSIZE (mode) - floor_log2 (l1) - 1;
1160 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1161 lv = GET_MODE_BITSIZE (mode);
1162 break;
1163
1164 case CTZ:
1165 hv = 0;
1166 if (l1 != 0)
1167 lv = exact_log2 (l1 & -l1);
1168 else if (h1 != 0)
1169 lv = HOST_BITS_PER_WIDE_INT + exact_log2 (h1 & -h1);
1170 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode, lv))
1171 lv = GET_MODE_BITSIZE (mode);
1172 break;
1173
1174 case POPCOUNT:
1175 hv = 0;
1176 lv = 0;
1177 while (l1)
1178 lv++, l1 &= l1 - 1;
1179 while (h1)
1180 lv++, h1 &= h1 - 1;
1181 break;
1182
1183 case PARITY:
1184 hv = 0;
1185 lv = 0;
1186 while (l1)
1187 lv++, l1 &= l1 - 1;
1188 while (h1)
1189 lv++, h1 &= h1 - 1;
1190 lv &= 1;
1191 break;
1192
1193 case TRUNCATE:
1194 /* This is just a change-of-mode, so do nothing. */
1195 lv = l1, hv = h1;
1196 break;
1197
1198 case ZERO_EXTEND:
1199 gcc_assert (op_mode != VOIDmode);
1200
1201 if (GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1202 return 0;
1203
1204 hv = 0;
1205 lv = l1 & GET_MODE_MASK (op_mode);
1206 break;
1207
1208 case SIGN_EXTEND:
1209 if (op_mode == VOIDmode
1210 || GET_MODE_BITSIZE (op_mode) > HOST_BITS_PER_WIDE_INT)
1211 return 0;
1212 else
1213 {
1214 lv = l1 & GET_MODE_MASK (op_mode);
1215 if (GET_MODE_BITSIZE (op_mode) < HOST_BITS_PER_WIDE_INT
1216 && (lv & ((HOST_WIDE_INT) 1
1217 << (GET_MODE_BITSIZE (op_mode) - 1))) != 0)
1218 lv -= (HOST_WIDE_INT) 1 << GET_MODE_BITSIZE (op_mode);
1219
1220 hv = HWI_SIGN_EXTEND (lv);
1221 }
1222 break;
1223
1224 case SQRT:
1225 return 0;
1226
1227 default:
1228 return 0;
1229 }
1230
1231 return immed_double_const (lv, hv, mode);
1232 }
1233
1234 else if (GET_CODE (op) == CONST_DOUBLE
1235 && SCALAR_FLOAT_MODE_P (mode))
1236 {
1237 REAL_VALUE_TYPE d, t;
1238 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
1239
1240 switch (code)
1241 {
1242 case SQRT:
1243 if (HONOR_SNANS (mode) && real_isnan (&d))
1244 return 0;
1245 real_sqrt (&t, mode, &d);
1246 d = t;
1247 break;
1248 case ABS:
1249 d = REAL_VALUE_ABS (d);
1250 break;
1251 case NEG:
1252 d = REAL_VALUE_NEGATE (d);
1253 break;
1254 case FLOAT_TRUNCATE:
1255 d = real_value_truncate (mode, d);
1256 break;
1257 case FLOAT_EXTEND:
1258 /* All this does is change the mode. */
1259 break;
1260 case FIX:
1261 real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
1262 break;
1263 case NOT:
1264 {
1265 long tmp[4];
1266 int i;
1267
1268 real_to_target (tmp, &d, GET_MODE (op));
1269 for (i = 0; i < 4; i++)
1270 tmp[i] = ~tmp[i];
1271 real_from_target (&d, tmp, mode);
1272 break;
1273 }
1274 default:
1275 gcc_unreachable ();
1276 }
1277 return CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
1278 }
1279
1280 else if (GET_CODE (op) == CONST_DOUBLE
1281 && SCALAR_FLOAT_MODE_P (GET_MODE (op))
1282 && GET_MODE_CLASS (mode) == MODE_INT
1283 && width <= 2*HOST_BITS_PER_WIDE_INT && width > 0)
1284 {
1285 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1286 operators are intentionally left unspecified (to ease implementation
1287 by target backends), for consistency, this routine implements the
1288 same semantics for constant folding as used by the middle-end. */
1289
1290 /* This was formerly used only for non-IEEE float.
1291 eggert@twinsun.com says it is safe for IEEE also. */
1292 HOST_WIDE_INT xh, xl, th, tl;
1293 REAL_VALUE_TYPE x, t;
1294 REAL_VALUE_FROM_CONST_DOUBLE (x, op);
1295 switch (code)
1296 {
1297 case FIX:
1298 if (REAL_VALUE_ISNAN (x))
1299 return const0_rtx;
1300
1301 /* Test against the signed upper bound. */
1302 if (width > HOST_BITS_PER_WIDE_INT)
1303 {
1304 th = ((unsigned HOST_WIDE_INT) 1
1305 << (width - HOST_BITS_PER_WIDE_INT - 1)) - 1;
1306 tl = -1;
1307 }
1308 else
1309 {
1310 th = 0;
1311 tl = ((unsigned HOST_WIDE_INT) 1 << (width - 1)) - 1;
1312 }
1313 real_from_integer (&t, VOIDmode, tl, th, 0);
1314 if (REAL_VALUES_LESS (t, x))
1315 {
1316 xh = th;
1317 xl = tl;
1318 break;
1319 }
1320
1321 /* Test against the signed lower bound. */
1322 if (width > HOST_BITS_PER_WIDE_INT)
1323 {
1324 th = (HOST_WIDE_INT) -1 << (width - HOST_BITS_PER_WIDE_INT - 1);
1325 tl = 0;
1326 }
1327 else
1328 {
1329 th = -1;
1330 tl = (HOST_WIDE_INT) -1 << (width - 1);
1331 }
1332 real_from_integer (&t, VOIDmode, tl, th, 0);
1333 if (REAL_VALUES_LESS (x, t))
1334 {
1335 xh = th;
1336 xl = tl;
1337 break;
1338 }
1339 REAL_VALUE_TO_INT (&xl, &xh, x);
1340 break;
1341
1342 case UNSIGNED_FIX:
1343 if (REAL_VALUE_ISNAN (x) || REAL_VALUE_NEGATIVE (x))
1344 return const0_rtx;
1345
1346 /* Test against the unsigned upper bound. */
1347 if (width == 2*HOST_BITS_PER_WIDE_INT)
1348 {
1349 th = -1;
1350 tl = -1;
1351 }
1352 else if (width >= HOST_BITS_PER_WIDE_INT)
1353 {
1354 th = ((unsigned HOST_WIDE_INT) 1
1355 << (width - HOST_BITS_PER_WIDE_INT)) - 1;
1356 tl = -1;
1357 }
1358 else
1359 {
1360 th = 0;
1361 tl = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
1362 }
1363 real_from_integer (&t, VOIDmode, tl, th, 1);
1364 if (REAL_VALUES_LESS (t, x))
1365 {
1366 xh = th;
1367 xl = tl;
1368 break;
1369 }
1370
1371 REAL_VALUE_TO_INT (&xl, &xh, x);
1372 break;
1373
1374 default:
1375 gcc_unreachable ();
1376 }
1377 return immed_double_const (xl, xh, mode);
1378 }
1379
1380 return NULL_RTX;
1381}
1382
1383/* Subroutine of simplify_binary_operation to simplify a commutative,
1384 associative binary operation CODE with result mode MODE, operating
1385 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1386 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1387 canonicalization is possible. */
1388
1389static rtx
1390simplify_associative_operation (enum rtx_code code, enum machine_mode mode,
1391 rtx op0, rtx op1)
1392{
1393 rtx tem;
1394
1395 /* Linearize the operator to the left. */
1396 if (GET_CODE (op1) == code)
1397 {
1398 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1399 if (GET_CODE (op0) == code)
1400 {
1401 tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
1402 return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
1403 }
1404
1405 /* "a op (b op c)" becomes "(b op c) op a". */
1406 if (! swap_commutative_operands_p (op1, op0))
1407 return simplify_gen_binary (code, mode, op1, op0);
1408
1409 tem = op0;
1410 op0 = op1;
1411 op1 = tem;
1412 }
1413
1414 if (GET_CODE (op0) == code)
1415 {
1416 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1417 if (swap_commutative_operands_p (XEXP (op0, 1), op1))
1418 {
1419 tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
1420 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1421 }
1422
1423 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1424 tem = swap_commutative_operands_p (XEXP (op0, 1), op1)
1425 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 1))
1426 : simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
1427 if (tem != 0)
1428 return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
1429
1430 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1431 tem = swap_commutative_operands_p (XEXP (op0, 0), op1)
1432 ? simplify_binary_operation (code, mode, op1, XEXP (op0, 0))
1433 : simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
1434 if (tem != 0)
1435 return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
1436 }
1437
1438 return 0;
1439}
1440
1441
1442/* Simplify a binary operation CODE with result mode MODE, operating on OP0
1443 and OP1. Return 0 if no simplification is possible.
1444
1445 Don't use this for relational operations such as EQ or LT.
1446 Use simplify_relational_operation instead. */
1447rtx
1448simplify_binary_operation (enum rtx_code code, enum machine_mode mode,
1449 rtx op0, rtx op1)
1450{
1451 rtx trueop0, trueop1;
1452 rtx tem;
1453
1454 /* Relational operations don't work here. We must know the mode
1455 of the operands in order to do the comparison correctly.
1456 Assuming a full word can give incorrect results.
1457 Consider comparing 128 with -128 in QImode. */
1458 gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
1459 gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
1460
1461 /* Make sure the constant is second. */
1462 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1463 && swap_commutative_operands_p (op0, op1))
1464 {
1465 tem = op0, op0 = op1, op1 = tem;
1466 }
1467
1468 trueop0 = avoid_constant_pool_reference (op0);
1469 trueop1 = avoid_constant_pool_reference (op1);
1470
1471 tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
1472 if (tem)
1473 return tem;
1474 return simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
1475}
1476
1477/* Subroutine of simplify_binary_operation. Simplify a binary operation
1478 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1479 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1480 actual constants. */
1481
1482static rtx
1483simplify_binary_operation_1 (enum rtx_code code, enum machine_mode mode,
1484 rtx op0, rtx op1, rtx trueop0, rtx trueop1)
1485{
1486 rtx tem, reversed, opleft, opright;
1487 HOST_WIDE_INT val;
1488 unsigned int width = GET_MODE_BITSIZE (mode);
1489
1490 /* Even if we can't compute a constant result,
1491 there are some cases worth simplifying. */
1492
1493 switch (code)
1494 {
1495 case PLUS:
1496 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1497 when x is NaN, infinite, or finite and nonzero. They aren't
1498 when x is -0 and the rounding mode is not towards -infinity,
1499 since (-0) + 0 is then 0. */
1500 if (!HONOR_SIGNED_ZEROS (mode) && trueop1 == CONST0_RTX (mode))
1501 return op0;
1502
1503 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1504 transformations are safe even for IEEE. */
1505 if (GET_CODE (op0) == NEG)
1506 return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
1507 else if (GET_CODE (op1) == NEG)
1508 return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
1509
1510 /* (~a) + 1 -> -a */
1511 if (INTEGRAL_MODE_P (mode)
1512 && GET_CODE (op0) == NOT
1513 && trueop1 == const1_rtx)
1514 return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
1515
1516 /* Handle both-operands-constant cases. We can only add
1517 CONST_INTs to constants since the sum of relocatable symbols
1518 can't be handled by most assemblers. Don't add CONST_INT
1519 to CONST_INT since overflow won't be computed properly if wider
1520 than HOST_BITS_PER_WIDE_INT. */
1521
1522 if (CONSTANT_P (op0) && GET_MODE (op0) != VOIDmode
1523 && GET_CODE (op1) == CONST_INT)
1524 return plus_constant (op0, INTVAL (op1));
1525 else if (CONSTANT_P (op1) && GET_MODE (op1) != VOIDmode
1526 && GET_CODE (op0) == CONST_INT)
1527 return plus_constant (op1, INTVAL (op0));
1528
1529 /* See if this is something like X * C - X or vice versa or
1530 if the multiplication is written as a shift. If so, we can
1531 distribute and make a new multiply, shift, or maybe just
1532 have X (if C is 2 in the example above). But don't make
1533 something more expensive than we had before. */
1534
1535 if (SCALAR_INT_MODE_P (mode))
1536 {
1537 HOST_WIDE_INT coeff0h = 0, coeff1h = 0;
1538 unsigned HOST_WIDE_INT coeff0l = 1, coeff1l = 1;
1539 rtx lhs = op0, rhs = op1;
1540
1541 if (GET_CODE (lhs) == NEG)
1542 {
1543 coeff0l = -1;
1544 coeff0h = -1;
1545 lhs = XEXP (lhs, 0);
1546 }
1547 else if (GET_CODE (lhs) == MULT
1548 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1549 {
1550 coeff0l = INTVAL (XEXP (lhs, 1));
1551 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1552 lhs = XEXP (lhs, 0);
1553 }
1554 else if (GET_CODE (lhs) == ASHIFT
1555 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1556 && INTVAL (XEXP (lhs, 1)) >= 0
1557 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1558 {
1559 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1560 coeff0h = 0;
1561 lhs = XEXP (lhs, 0);
1562 }
1563
1564 if (GET_CODE (rhs) == NEG)
1565 {
1566 coeff1l = -1;
1567 coeff1h = -1;
1568 rhs = XEXP (rhs, 0);
1569 }
1570 else if (GET_CODE (rhs) == MULT
1571 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1572 {
1573 coeff1l = INTVAL (XEXP (rhs, 1));
1574 coeff1h = INTVAL (XEXP (rhs, 1)) < 0 ? -1 : 0;
1575 rhs = XEXP (rhs, 0);
1576 }
1577 else if (GET_CODE (rhs) == ASHIFT
1578 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1579 && INTVAL (XEXP (rhs, 1)) >= 0
1580 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1581 {
1582 coeff1l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1));
1583 coeff1h = 0;
1584 rhs = XEXP (rhs, 0);
1585 }
1586
1587 if (rtx_equal_p (lhs, rhs))
1588 {
1589 rtx orig = gen_rtx_PLUS (mode, op0, op1);
1590 rtx coeff;
1591 unsigned HOST_WIDE_INT l;
1592 HOST_WIDE_INT h;
1593
1594 add_double (coeff0l, coeff0h, coeff1l, coeff1h, &l, &h);
1595 coeff = immed_double_const (l, h, mode);
1596
1597 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1598 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1599 ? tem : 0;
1600 }
1601 }
1602
1603 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1604 if ((GET_CODE (op1) == CONST_INT
1605 || GET_CODE (op1) == CONST_DOUBLE)
1606 && GET_CODE (op0) == XOR
1607 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
1608 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
1609 && mode_signbit_p (mode, op1))
1610 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
1611 simplify_gen_binary (XOR, mode, op1,
1612 XEXP (op0, 1)));
1613
1614 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1615 if (GET_CODE (op0) == MULT
1616 && GET_CODE (XEXP (op0, 0)) == NEG)
1617 {
1618 rtx in1, in2;
1619
1620 in1 = XEXP (XEXP (op0, 0), 0);
1621 in2 = XEXP (op0, 1);
1622 return simplify_gen_binary (MINUS, mode, op1,
1623 simplify_gen_binary (MULT, mode,
1624 in1, in2));
1625 }
1626
1627 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1628 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1629 is 1. */
1630 if (COMPARISON_P (op0)
1631 && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
1632 || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
1633 && (reversed = reversed_comparison (op0, mode)))
1634 return
1635 simplify_gen_unary (NEG, mode, reversed, mode);
1636
1637 /* If one of the operands is a PLUS or a MINUS, see if we can
1638 simplify this by the associative law.
1639 Don't use the associative law for floating point.
1640 The inaccuracy makes it nonassociative,
1641 and subtle programs can break if operations are associated. */
1642
1643 if (INTEGRAL_MODE_P (mode)
1644 && (plus_minus_operand_p (op0)
1645 || plus_minus_operand_p (op1))
1646 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1647 return tem;
1648
1649 /* Reassociate floating point addition only when the user
1650 specifies unsafe math optimizations. */
1651 if (FLOAT_MODE_P (mode)
1652 && flag_unsafe_math_optimizations)
1653 {
1654 tem = simplify_associative_operation (code, mode, op0, op1);
1655 if (tem)
1656 return tem;
1657 }
1658 break;
1659
1660 case COMPARE:
1661#ifdef HAVE_cc0
1662 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1663 using cc0, in which case we want to leave it as a COMPARE
1664 so we can distinguish it from a register-register-copy.
1665
1666 In IEEE floating point, x-0 is not the same as x. */
1667
1668 if ((TARGET_FLOAT_FORMAT != IEEE_FLOAT_FORMAT
1669 || ! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
1670 && trueop1 == CONST0_RTX (mode))
1671 return op0;
1672#endif
1673
1674 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1675 if (((GET_CODE (op0) == GT && GET_CODE (op1) == LT)
1676 || (GET_CODE (op0) == GTU && GET_CODE (op1) == LTU))
1677 && XEXP (op0, 1) == const0_rtx && XEXP (op1, 1) == const0_rtx)
1678 {
1679 rtx xop00 = XEXP (op0, 0);
1680 rtx xop10 = XEXP (op1, 0);
1681
1682#ifdef HAVE_cc0
1683 if (GET_CODE (xop00) == CC0 && GET_CODE (xop10) == CC0)
1684#else
1685 if (REG_P (xop00) && REG_P (xop10)
1686 && GET_MODE (xop00) == GET_MODE (xop10)
1687 && REGNO (xop00) == REGNO (xop10)
1688 && GET_MODE_CLASS (GET_MODE (xop00)) == MODE_CC
1689 && GET_MODE_CLASS (GET_MODE (xop10)) == MODE_CC)
1690#endif
1691 return xop00;
1692 }
1693 break;
1694
1695 case MINUS:
1696 /* We can't assume x-x is 0 even with non-IEEE floating point,
1697 but since it is zero except in very strange circumstances, we
1698 will treat it as zero with -funsafe-math-optimizations. */
1699 if (rtx_equal_p (trueop0, trueop1)
1700 && ! side_effects_p (op0)
1701 && (! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations))
1702 return CONST0_RTX (mode);
1703
1704 /* Change subtraction from zero into negation. (0 - x) is the
1705 same as -x when x is NaN, infinite, or finite and nonzero.
1706 But if the mode has signed zeros, and does not round towards
1707 -infinity, then 0 - 0 is 0, not -0. */
1708 if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
1709 return simplify_gen_unary (NEG, mode, op1, mode);
1710
1711 /* (-1 - a) is ~a. */
1712 if (trueop0 == constm1_rtx)
1713 return simplify_gen_unary (NOT, mode, op1, mode);
1714
1715 /* Subtracting 0 has no effect unless the mode has signed zeros
1716 and supports rounding towards -infinity. In such a case,
1717 0 - 0 is -0. */
1718 if (!(HONOR_SIGNED_ZEROS (mode)
1719 && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1720 && trueop1 == CONST0_RTX (mode))
1721 return op0;
1722
1723 /* See if this is something like X * C - X or vice versa or
1724 if the multiplication is written as a shift. If so, we can
1725 distribute and make a new multiply, shift, or maybe just
1726 have X (if C is 2 in the example above). But don't make
1727 something more expensive than we had before. */
1728
1729 if (SCALAR_INT_MODE_P (mode))
1730 {
1731 HOST_WIDE_INT coeff0h = 0, negcoeff1h = -1;
1732 unsigned HOST_WIDE_INT coeff0l = 1, negcoeff1l = -1;
1733 rtx lhs = op0, rhs = op1;
1734
1735 if (GET_CODE (lhs) == NEG)
1736 {
1737 coeff0l = -1;
1738 coeff0h = -1;
1739 lhs = XEXP (lhs, 0);
1740 }
1741 else if (GET_CODE (lhs) == MULT
1742 && GET_CODE (XEXP (lhs, 1)) == CONST_INT)
1743 {
1744 coeff0l = INTVAL (XEXP (lhs, 1));
1745 coeff0h = INTVAL (XEXP (lhs, 1)) < 0 ? -1 : 0;
1746 lhs = XEXP (lhs, 0);
1747 }
1748 else if (GET_CODE (lhs) == ASHIFT
1749 && GET_CODE (XEXP (lhs, 1)) == CONST_INT
1750 && INTVAL (XEXP (lhs, 1)) >= 0
1751 && INTVAL (XEXP (lhs, 1)) < HOST_BITS_PER_WIDE_INT)
1752 {
1753 coeff0l = ((HOST_WIDE_INT) 1) << INTVAL (XEXP (lhs, 1));
1754 coeff0h = 0;
1755 lhs = XEXP (lhs, 0);
1756 }
1757
1758 if (GET_CODE (rhs) == NEG)
1759 {
1760 negcoeff1l = 1;
1761 negcoeff1h = 0;
1762 rhs = XEXP (rhs, 0);
1763 }
1764 else if (GET_CODE (rhs) == MULT
1765 && GET_CODE (XEXP (rhs, 1)) == CONST_INT)
1766 {
1767 negcoeff1l = -INTVAL (XEXP (rhs, 1));
1768 negcoeff1h = INTVAL (XEXP (rhs, 1)) <= 0 ? 0 : -1;
1769 rhs = XEXP (rhs, 0);
1770 }
1771 else if (GET_CODE (rhs) == ASHIFT
1772 && GET_CODE (XEXP (rhs, 1)) == CONST_INT
1773 && INTVAL (XEXP (rhs, 1)) >= 0
1774 && INTVAL (XEXP (rhs, 1)) < HOST_BITS_PER_WIDE_INT)
1775 {
1776 negcoeff1l = -(((HOST_WIDE_INT) 1) << INTVAL (XEXP (rhs, 1)));
1777 negcoeff1h = -1;
1778 rhs = XEXP (rhs, 0);
1779 }
1780
1781 if (rtx_equal_p (lhs, rhs))
1782 {
1783 rtx orig = gen_rtx_MINUS (mode, op0, op1);
1784 rtx coeff;
1785 unsigned HOST_WIDE_INT l;
1786 HOST_WIDE_INT h;
1787
1788 add_double (coeff0l, coeff0h, negcoeff1l, negcoeff1h, &l, &h);
1789 coeff = immed_double_const (l, h, mode);
1790
1791 tem = simplify_gen_binary (MULT, mode, lhs, coeff);
1792 return rtx_cost (tem, SET) <= rtx_cost (orig, SET)
1793 ? tem : 0;
1794 }
1795 }
1796
1797 /* (a - (-b)) -> (a + b). True even for IEEE. */
1798 if (GET_CODE (op1) == NEG)
1799 return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
1800
1801 /* (-x - c) may be simplified as (-c - x). */
1802 if (GET_CODE (op0) == NEG
1803 && (GET_CODE (op1) == CONST_INT
1804 || GET_CODE (op1) == CONST_DOUBLE))
1805 {
1806 tem = simplify_unary_operation (NEG, mode, op1, mode);
1807 if (tem)
1808 return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
1809 }
1810
1811 /* Don't let a relocatable value get a negative coeff. */
1812 if (GET_CODE (op1) == CONST_INT && GET_MODE (op0) != VOIDmode)
1813 return simplify_gen_binary (PLUS, mode,
1814 op0,
1815 neg_const_int (mode, op1));
1816
1817 /* (x - (x & y)) -> (x & ~y) */
1818 if (GET_CODE (op1) == AND)
1819 {
1820 if (rtx_equal_p (op0, XEXP (op1, 0)))
1821 {
1822 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
1823 GET_MODE (XEXP (op1, 1)));
1824 return simplify_gen_binary (AND, mode, op0, tem);
1825 }
1826 if (rtx_equal_p (op0, XEXP (op1, 1)))
1827 {
1828 tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
1829 GET_MODE (XEXP (op1, 0)));
1830 return simplify_gen_binary (AND, mode, op0, tem);
1831 }
1832 }
1833
1834 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
1835 by reversing the comparison code if valid. */
1836 if (STORE_FLAG_VALUE == 1
1837 && trueop0 == const1_rtx
1838 && COMPARISON_P (op1)
1839 && (reversed = reversed_comparison (op1, mode)))
1840 return reversed;
1841
1842 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
1843 if (GET_CODE (op1) == MULT
1844 && GET_CODE (XEXP (op1, 0)) == NEG)
1845 {
1846 rtx in1, in2;
1847
1848 in1 = XEXP (XEXP (op1, 0), 0);
1849 in2 = XEXP (op1, 1);
1850 return simplify_gen_binary (PLUS, mode,
1851 simplify_gen_binary (MULT, mode,
1852 in1, in2),
1853 op0);
1854 }
1855
1856 /* Canonicalize (minus (neg A) (mult B C)) to
1857 (minus (mult (neg B) C) A). */
1858 if (GET_CODE (op1) == MULT
1859 && GET_CODE (op0) == NEG)
1860 {
1861 rtx in1, in2;
1862
1863 in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
1864 in2 = XEXP (op1, 1);
1865 return simplify_gen_binary (MINUS, mode,
1866 simplify_gen_binary (MULT, mode,
1867 in1, in2),
1868 XEXP (op0, 0));
1869 }
1870
1871 /* If one of the operands is a PLUS or a MINUS, see if we can
1872 simplify this by the associative law. This will, for example,
1873 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
1874 Don't use the associative law for floating point.
1875 The inaccuracy makes it nonassociative,
1876 and subtle programs can break if operations are associated. */
1877
1878 if (INTEGRAL_MODE_P (mode)
1879 && (plus_minus_operand_p (op0)
1880 || plus_minus_operand_p (op1))
1881 && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
1882 return tem;
1883 break;
1884
1885 case MULT:
1886 if (trueop1 == constm1_rtx)
1887 return simplify_gen_unary (NEG, mode, op0, mode);
1888
1889 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1890 x is NaN, since x * 0 is then also NaN. Nor is it valid
1891 when the mode has signed zeros, since multiplying a negative
1892 number by 0 will give -0, not 0. */
1893 if (!HONOR_NANS (mode)
1894 && !HONOR_SIGNED_ZEROS (mode)
1895 && trueop1 == CONST0_RTX (mode)
1896 && ! side_effects_p (op0))
1897 return op1;
1898
1899 /* In IEEE floating point, x*1 is not equivalent to x for
1900 signalling NaNs. */
1901 if (!HONOR_SNANS (mode)
1902 && trueop1 == CONST1_RTX (mode))
1903 return op0;
1904
1905 /* Convert multiply by constant power of two into shift unless
1906 we are still generating RTL. This test is a kludge. */
1907 if (GET_CODE (trueop1) == CONST_INT
1908 && (val = exact_log2 (INTVAL (trueop1))) >= 0
1909 /* If the mode is larger than the host word size, and the
1910 uppermost bit is set, then this isn't a power of two due
1911 to implicit sign extension. */
1912 && (width <= HOST_BITS_PER_WIDE_INT
1913 || val != HOST_BITS_PER_WIDE_INT - 1))
1914 return simplify_gen_binary (ASHIFT, mode, op0, GEN_INT (val));
1915
1916 /* Likewise for multipliers wider than a word. */
1917 if (GET_CODE (trueop1) == CONST_DOUBLE
1918 && (GET_MODE (trueop1) == VOIDmode
1919 || GET_MODE_CLASS (GET_MODE (trueop1)) == MODE_INT)
1920 && GET_MODE (op0) == mode
1921 && CONST_DOUBLE_LOW (trueop1) == 0
1922 && (val = exact_log2 (CONST_DOUBLE_HIGH (trueop1))) >= 0)
1923 return simplify_gen_binary (ASHIFT, mode, op0,
1924 GEN_INT (val + HOST_BITS_PER_WIDE_INT));
1925
1926 /* x*2 is x+x and x*(-1) is -x */
1927 if (GET_CODE (trueop1) == CONST_DOUBLE
1928 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
1929 && GET_MODE (op0) == mode)
1930 {
1931 REAL_VALUE_TYPE d;
1932 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
1933
1934 if (REAL_VALUES_EQUAL (d, dconst2))
1935 return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
1936
1937 if (!HONOR_SNANS (mode)
1938 && REAL_VALUES_EQUAL (d, dconstm1))
1939 return simplify_gen_unary (NEG, mode, op0, mode);
1940 }
1941
1942 /* Optimize -x * -x as x * x. */
1943 if (FLOAT_MODE_P (mode)
1944 && GET_CODE (op0) == NEG
1945 && GET_CODE (op1) == NEG
1946 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1947 && !side_effects_p (XEXP (op0, 0)))
1948 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1949
1950 /* Likewise, optimize abs(x) * abs(x) as x * x. */
1951 if (SCALAR_FLOAT_MODE_P (mode)
1952 && GET_CODE (op0) == ABS
1953 && GET_CODE (op1) == ABS
1954 && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
1955 && !side_effects_p (XEXP (op0, 0)))
1956 return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
1957
1958 /* Reassociate multiplication, but for floating point MULTs
1959 only when the user specifies unsafe math optimizations. */
1960 if (! FLOAT_MODE_P (mode)
1961 || flag_unsafe_math_optimizations)
1962 {
1963 tem = simplify_associative_operation (code, mode, op0, op1);
1964 if (tem)
1965 return tem;
1966 }
1967 break;
1968
1969 case IOR:
1970 if (trueop1 == const0_rtx)
1971 return op0;
1972 if (GET_CODE (trueop1) == CONST_INT
1973 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
1974 == GET_MODE_MASK (mode)))
1975 return op1;
1976 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
1977 return op0;
1978 /* A | (~A) -> -1 */
1979 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
1980 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
1981 && ! side_effects_p (op0)
1982 && SCALAR_INT_MODE_P (mode))
1983 return constm1_rtx;
1984
1985 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
1986 if (GET_CODE (op1) == CONST_INT
1987 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1988 && (nonzero_bits (op0, mode) & ~INTVAL (op1)) == 0)
1989 return op1;
1990
1991 /* Convert (A & B) | A to A. */
1992 if (GET_CODE (op0) == AND
1993 && (rtx_equal_p (XEXP (op0, 0), op1)
1994 || rtx_equal_p (XEXP (op0, 1), op1))
1995 && ! side_effects_p (XEXP (op0, 0))
1996 && ! side_effects_p (XEXP (op0, 1)))
1997 return op1;
1998
1999 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2000 mode size to (rotate A CX). */
2001
2002 if (GET_CODE (op1) == ASHIFT
2003 || GET_CODE (op1) == SUBREG)
2004 {
2005 opleft = op1;
2006 opright = op0;
2007 }
2008 else
2009 {
2010 opright = op1;
2011 opleft = op0;
2012 }
2013
2014 if (GET_CODE (opleft) == ASHIFT && GET_CODE (opright) == LSHIFTRT
2015 && rtx_equal_p (XEXP (opleft, 0), XEXP (opright, 0))
2016 && GET_CODE (XEXP (opleft, 1)) == CONST_INT
2017 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2018 && (INTVAL (XEXP (opleft, 1)) + INTVAL (XEXP (opright, 1))
2019 == GET_MODE_BITSIZE (mode)))
2020 return gen_rtx_ROTATE (mode, XEXP (opright, 0), XEXP (opleft, 1));
2021
2022 /* Same, but for ashift that has been "simplified" to a wider mode
2023 by simplify_shift_const. */
2024
2025 if (GET_CODE (opleft) == SUBREG
2026 && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2027 && GET_CODE (opright) == LSHIFTRT
2028 && GET_CODE (XEXP (opright, 0)) == SUBREG
2029 && GET_MODE (opleft) == GET_MODE (XEXP (opright, 0))
2030 && SUBREG_BYTE (opleft) == SUBREG_BYTE (XEXP (opright, 0))
2031 && (GET_MODE_SIZE (GET_MODE (opleft))
2032 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft))))
2033 && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2034 SUBREG_REG (XEXP (opright, 0)))
2035 && GET_CODE (XEXP (SUBREG_REG (opleft), 1)) == CONST_INT
2036 && GET_CODE (XEXP (opright, 1)) == CONST_INT
2037 && (INTVAL (XEXP (SUBREG_REG (opleft), 1)) + INTVAL (XEXP (opright, 1))
2038 == GET_MODE_BITSIZE (mode)))
2039 return gen_rtx_ROTATE (mode, XEXP (opright, 0),
2040 XEXP (SUBREG_REG (opleft), 1));
2041
2042 /* If we have (ior (and (X C1) C2)), simplify this by making
2043 C1 as small as possible if C1 actually changes. */
2044 if (GET_CODE (op1) == CONST_INT
2045 && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2046 || INTVAL (op1) > 0)
2047 && GET_CODE (op0) == AND
2048 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2049 && GET_CODE (op1) == CONST_INT
2050 && (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) != 0)
2051 return simplify_gen_binary (IOR, mode,
2052 simplify_gen_binary
2053 (AND, mode, XEXP (op0, 0),
2054 GEN_INT (INTVAL (XEXP (op0, 1))
2055 & ~INTVAL (op1))),
2056 op1);
2057
2058 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2059 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2060 the PLUS does not affect any of the bits in OP1: then we can do
2061 the IOR as a PLUS and we can associate. This is valid if OP1
2062 can be safely shifted left C bits. */
2063 if (GET_CODE (trueop1) == CONST_INT && GET_CODE (op0) == ASHIFTRT
2064 && GET_CODE (XEXP (op0, 0)) == PLUS
2065 && GET_CODE (XEXP (XEXP (op0, 0), 1)) == CONST_INT
2066 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2067 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
2068 {
2069 int count = INTVAL (XEXP (op0, 1));
2070 HOST_WIDE_INT mask = INTVAL (trueop1) << count;
2071
2072 if (mask >> count == INTVAL (trueop1)
2073 && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
2074 return simplify_gen_binary (ASHIFTRT, mode,
2075 plus_constant (XEXP (op0, 0), mask),
2076 XEXP (op0, 1));
2077 }
2078
2079 tem = simplify_associative_operation (code, mode, op0, op1);
2080 if (tem)
2081 return tem;
2082 break;
2083
2084 case XOR:
2085 if (trueop1 == const0_rtx)
2086 return op0;
2087 if (GET_CODE (trueop1) == CONST_INT
2088 && ((INTVAL (trueop1) & GET_MODE_MASK (mode))
2089 == GET_MODE_MASK (mode)))
2090 return simplify_gen_unary (NOT, mode, op0, mode);
2091 if (rtx_equal_p (trueop0, trueop1)
2092 && ! side_effects_p (op0)
2093 && GET_MODE_CLASS (mode) != MODE_CC)
2094 return CONST0_RTX (mode);
2095
2096 /* Canonicalize XOR of the most significant bit to PLUS. */
2097 if ((GET_CODE (op1) == CONST_INT
2098 || GET_CODE (op1) == CONST_DOUBLE)
2099 && mode_signbit_p (mode, op1))
2100 return simplify_gen_binary (PLUS, mode, op0, op1);
2101 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2102 if ((GET_CODE (op1) == CONST_INT
2103 || GET_CODE (op1) == CONST_DOUBLE)
2104 && GET_CODE (op0) == PLUS
2105 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
2106 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE)
2107 && mode_signbit_p (mode, XEXP (op0, 1)))
2108 return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
2109 simplify_gen_binary (XOR, mode, op1,
2110 XEXP (op0, 1)));
2111
2112 /* If we are XORing two things that have no bits in common,
2113 convert them into an IOR. This helps to detect rotation encoded
2114 using those methods and possibly other simplifications. */
2115
2116 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2117 && (nonzero_bits (op0, mode)
2118 & nonzero_bits (op1, mode)) == 0)
2119 return (simplify_gen_binary (IOR, mode, op0, op1));
2120
2121 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2122 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2123 (NOT y). */
2124 {
2125 int num_negated = 0;
2126
2127 if (GET_CODE (op0) == NOT)
2128 num_negated++, op0 = XEXP (op0, 0);
2129 if (GET_CODE (op1) == NOT)
2130 num_negated++, op1 = XEXP (op1, 0);
2131
2132 if (num_negated == 2)
2133 return simplify_gen_binary (XOR, mode, op0, op1);
2134 else if (num_negated == 1)
2135 return simplify_gen_unary (NOT, mode,
2136 simplify_gen_binary (XOR, mode, op0, op1),
2137 mode);
2138 }
2139
2140 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2141 correspond to a machine insn or result in further simplifications
2142 if B is a constant. */
2143
2144 if (GET_CODE (op0) == AND
2145 && rtx_equal_p (XEXP (op0, 1), op1)
2146 && ! side_effects_p (op1))
2147 return simplify_gen_binary (AND, mode,
2148 simplify_gen_unary (NOT, mode,
2149 XEXP (op0, 0), mode),
2150 op1);
2151
2152 else if (GET_CODE (op0) == AND
2153 && rtx_equal_p (XEXP (op0, 0), op1)
2154 && ! side_effects_p (op1))
2155 return simplify_gen_binary (AND, mode,
2156 simplify_gen_unary (NOT, mode,
2157 XEXP (op0, 1), mode),
2158 op1);
2159
2160 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2161 comparison if STORE_FLAG_VALUE is 1. */
2162 if (STORE_FLAG_VALUE == 1
2163 && trueop1 == const1_rtx
2164 && COMPARISON_P (op0)
2165 && (reversed = reversed_comparison (op0, mode)))
2166 return reversed;
2167
2168 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2169 is (lt foo (const_int 0)), so we can perform the above
2170 simplification if STORE_FLAG_VALUE is 1. */
2171
2172 if (STORE_FLAG_VALUE == 1
2173 && trueop1 == const1_rtx
2174 && GET_CODE (op0) == LSHIFTRT
2175 && GET_CODE (XEXP (op0, 1)) == CONST_INT
2176 && INTVAL (XEXP (op0, 1)) == GET_MODE_BITSIZE (mode) - 1)
2177 return gen_rtx_GE (mode, XEXP (op0, 0), const0_rtx);
2178
2179 /* (xor (comparison foo bar) (const_int sign-bit))
2180 when STORE_FLAG_VALUE is the sign bit. */
2181 if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2182 && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
2183 == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
2184 && trueop1 == const_true_rtx
2185 && COMPARISON_P (op0)
2186 && (reversed = reversed_comparison (op0, mode)))
2187 return reversed;
2188
2189 break;
2190
2191 tem = simplify_associative_operation (code, mode, op0, op1);
2192 if (tem)
2193 return tem;
2194 break;
2195
2196 case AND:
2197 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2198 return trueop1;
2199 /* If we are turning off bits already known off in OP0, we need
2200 not do an AND. */
2201 if (GET_CODE (trueop1) == CONST_INT
2202 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2203 && (nonzero_bits (trueop0, mode) & ~INTVAL (trueop1)) == 0)
2204 return op0;
2205 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
2206 && GET_MODE_CLASS (mode) != MODE_CC)
2207 return op0;
2208 /* A & (~A) -> 0 */
2209 if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
2210 || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
2211 && ! side_effects_p (op0)
2212 && GET_MODE_CLASS (mode) != MODE_CC)
2213 return CONST0_RTX (mode);
2214
2215 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2216 there are no nonzero bits of C outside of X's mode. */
2217 if ((GET_CODE (op0) == SIGN_EXTEND
2218 || GET_CODE (op0) == ZERO_EXTEND)
2219 && GET_CODE (trueop1) == CONST_INT
2220 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2221 && (~GET_MODE_MASK (GET_MODE (XEXP (op0, 0)))
2222 & INTVAL (trueop1)) == 0)
2223 {
2224 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2225 tem = simplify_gen_binary (AND, imode, XEXP (op0, 0),
2226 gen_int_mode (INTVAL (trueop1),
2227 imode));
2228 return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
2229 }
2230
2231 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2232 insn (and may simplify more). */
2233 if (GET_CODE (op0) == XOR
2234 && rtx_equal_p (XEXP (op0, 0), op1)
2235 && ! side_effects_p (op1))
2236 return simplify_gen_binary (AND, mode,
2237 simplify_gen_unary (NOT, mode,
2238 XEXP (op0, 1), mode),
2239 op1);
2240
2241 if (GET_CODE (op0) == XOR
2242 && rtx_equal_p (XEXP (op0, 1), op1)
2243 && ! side_effects_p (op1))
2244 return simplify_gen_binary (AND, mode,
2245 simplify_gen_unary (NOT, mode,
2246 XEXP (op0, 0), mode),
2247 op1);
2248
2249 /* Similarly for (~(A ^ B)) & A. */
2250 if (GET_CODE (op0) == NOT
2251 && GET_CODE (XEXP (op0, 0)) == XOR
2252 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
2253 && ! side_effects_p (op1))
2254 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
2255
2256 if (GET_CODE (op0) == NOT
2257 && GET_CODE (XEXP (op0, 0)) == XOR
2258 && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
2259 && ! side_effects_p (op1))
2260 return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
2261
2262 /* Convert (A | B) & A to A. */
2263 if (GET_CODE (op0) == IOR
2264 && (rtx_equal_p (XEXP (op0, 0), op1)
2265 || rtx_equal_p (XEXP (op0, 1), op1))
2266 && ! side_effects_p (XEXP (op0, 0))
2267 && ! side_effects_p (XEXP (op0, 1)))
2268 return op1;
2269
2270 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2271 ((A & N) + B) & M -> (A + B) & M
2272 Similarly if (N & M) == 0,
2273 ((A | N) + B) & M -> (A + B) & M
2274 and for - instead of + and/or ^ instead of |. */
2275 if (GET_CODE (trueop1) == CONST_INT
2276 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2277 && ~INTVAL (trueop1)
2278 && (INTVAL (trueop1) & (INTVAL (trueop1) + 1)) == 0
2279 && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
2280 {
2281 rtx pmop[2];
2282 int which;
2283
2284 pmop[0] = XEXP (op0, 0);
2285 pmop[1] = XEXP (op0, 1);
2286
2287 for (which = 0; which < 2; which++)
2288 {
2289 tem = pmop[which];
2290 switch (GET_CODE (tem))
2291 {
2292 case AND:
2293 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2294 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1))
2295 == INTVAL (trueop1))
2296 pmop[which] = XEXP (tem, 0);
2297 break;
2298 case IOR:
2299 case XOR:
2300 if (GET_CODE (XEXP (tem, 1)) == CONST_INT
2301 && (INTVAL (XEXP (tem, 1)) & INTVAL (trueop1)) == 0)
2302 pmop[which] = XEXP (tem, 0);
2303 break;
2304 default:
2305 break;
2306 }
2307 }
2308
2309 if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
2310 {
2311 tem = simplify_gen_binary (GET_CODE (op0), mode,
2312 pmop[0], pmop[1]);
2313 return simplify_gen_binary (code, mode, tem, op1);
2314 }
2315 }
2316 tem = simplify_associative_operation (code, mode, op0, op1);
2317 if (tem)
2318 return tem;
2319 break;
2320
2321 case UDIV:
2322 /* 0/x is 0 (or x&0 if x has side-effects). */
2323 if (trueop0 == CONST0_RTX (mode))
2324 {
2325 if (side_effects_p (op1))
2326 return simplify_gen_binary (AND, mode, op1, trueop0);
2327 return trueop0;
2328 }
2329 /* x/1 is x. */
2330 if (trueop1 == CONST1_RTX (mode))
2331 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2332 /* Convert divide by power of two into shift. */
2333 if (GET_CODE (trueop1) == CONST_INT
2334 && (val = exact_log2 (INTVAL (trueop1))) > 0)
2335 return simplify_gen_binary (LSHIFTRT, mode, op0, GEN_INT (val));
2336 break;
2337
2338 case DIV:
2339 /* Handle floating point and integers separately. */
2340 if (SCALAR_FLOAT_MODE_P (mode))
2341 {
2342 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2343 safe for modes with NaNs, since 0.0 / 0.0 will then be
2344 NaN rather than 0.0. Nor is it safe for modes with signed
2345 zeros, since dividing 0 by a negative number gives -0.0 */
2346 if (trueop0 == CONST0_RTX (mode)
2347 && !HONOR_NANS (mode)
2348 && !HONOR_SIGNED_ZEROS (mode)
2349 && ! side_effects_p (op1))
2350 return op0;
2351 /* x/1.0 is x. */
2352 if (trueop1 == CONST1_RTX (mode)
2353 && !HONOR_SNANS (mode))
2354 return op0;
2355
2356 if (GET_CODE (trueop1) == CONST_DOUBLE
2357 && trueop1 != CONST0_RTX (mode))
2358 {
2359 REAL_VALUE_TYPE d;
2360 REAL_VALUE_FROM_CONST_DOUBLE (d, trueop1);
2361
2362 /* x/-1.0 is -x. */
2363 if (REAL_VALUES_EQUAL (d, dconstm1)
2364 && !HONOR_SNANS (mode))
2365 return simplify_gen_unary (NEG, mode, op0, mode);
2366
2367 /* Change FP division by a constant into multiplication.
2368 Only do this with -funsafe-math-optimizations. */
2369 if (flag_unsafe_math_optimizations
2370 && !REAL_VALUES_EQUAL (d, dconst0))
2371 {
2372 REAL_ARITHMETIC (d, RDIV_EXPR, dconst1, d);
2373 tem = CONST_DOUBLE_FROM_REAL_VALUE (d, mode);
2374 return simplify_gen_binary (MULT, mode, op0, tem);
2375 }
2376 }
2377 }
2378 else
2379 {
2380 /* 0/x is 0 (or x&0 if x has side-effects). */
2381 if (trueop0 == CONST0_RTX (mode))
2382 {
2383 if (side_effects_p (op1))
2384 return simplify_gen_binary (AND, mode, op1, trueop0);
2385 return trueop0;
2386 }
2387 /* x/1 is x. */
2388 if (trueop1 == CONST1_RTX (mode))
2389 return rtl_hooks.gen_lowpart_no_emit (mode, op0);
2390 /* x/-1 is -x. */
2391 if (trueop1 == constm1_rtx)
2392 {
2393 rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
2394 return simplify_gen_unary (NEG, mode, x, mode);
2395 }
2396 }
2397 break;
2398
2399 case UMOD:
2400 /* 0%x is 0 (or x&0 if x has side-effects). */
2401 if (trueop0 == CONST0_RTX (mode))
2402 {
2403 if (side_effects_p (op1))
2404 return simplify_gen_binary (AND, mode, op1, trueop0);
2405 return trueop0;
2406 }
2407 /* x%1 is 0 (of x&0 if x has side-effects). */
2408 if (trueop1 == CONST1_RTX (mode))
2409 {
2410 if (side_effects_p (op0))
2411 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2412 return CONST0_RTX (mode);
2413 }
2414 /* Implement modulus by power of two as AND. */
2415 if (GET_CODE (trueop1) == CONST_INT
2416 && exact_log2 (INTVAL (trueop1)) > 0)
2417 return simplify_gen_binary (AND, mode, op0,
2418 GEN_INT (INTVAL (op1) - 1));
2419 break;
2420
2421 case MOD:
2422 /* 0%x is 0 (or x&0 if x has side-effects). */
2423 if (trueop0 == CONST0_RTX (mode))
2424 {
2425 if (side_effects_p (op1))
2426 return simplify_gen_binary (AND, mode, op1, trueop0);
2427 return trueop0;
2428 }
2429 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2430 if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
2431 {
2432 if (side_effects_p (op0))
2433 return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
2434 return CONST0_RTX (mode);
2435 }
2436 break;
2437
2438 case ROTATERT:
2439 case ROTATE:
2440 case ASHIFTRT:
2441 if (trueop1 == CONST0_RTX (mode))
2442 return op0;
2443 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2444 return op0;
2445 /* Rotating ~0 always results in ~0. */
2446 if (GET_CODE (trueop0) == CONST_INT && width <= HOST_BITS_PER_WIDE_INT
2447 && (unsigned HOST_WIDE_INT) INTVAL (trueop0) == GET_MODE_MASK (mode)
2448 && ! side_effects_p (op1))
2449 return op0;
2450 break;
2451
2452 case ASHIFT:
2453 case SS_ASHIFT:
2454 if (trueop1 == CONST0_RTX (mode))
2455 return op0;
2456 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2457 return op0;
2458 break;
2459
2460 case LSHIFTRT:
2461 if (trueop1 == CONST0_RTX (mode))
2462 return op0;
2463 if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
2464 return op0;
2465 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2466 if (GET_CODE (op0) == CLZ
2467 && GET_CODE (trueop1) == CONST_INT
2468 && STORE_FLAG_VALUE == 1
2469 && INTVAL (trueop1) < (HOST_WIDE_INT)width)
2470 {
2471 enum machine_mode imode = GET_MODE (XEXP (op0, 0));
2472 unsigned HOST_WIDE_INT zero_val = 0;
2473
2474 if (CLZ_DEFINED_VALUE_AT_ZERO (imode, zero_val)
2475 && zero_val == GET_MODE_BITSIZE (imode)
2476 && INTVAL (trueop1) == exact_log2 (zero_val))
2477 return simplify_gen_relational (EQ, mode, imode,
2478 XEXP (op0, 0), const0_rtx);
2479 }
2480 break;
2481
2482 case SMIN:
2483 if (width <= HOST_BITS_PER_WIDE_INT
2484 && GET_CODE (trueop1) == CONST_INT
2485 && INTVAL (trueop1) == (HOST_WIDE_INT) 1 << (width -1)
2486 && ! side_effects_p (op0))
2487 return op1;
2488 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2489 return op0;
2490 tem = simplify_associative_operation (code, mode, op0, op1);
2491 if (tem)
2492 return tem;
2493 break;
2494
2495 case SMAX:
2496 if (width <= HOST_BITS_PER_WIDE_INT
2497 && GET_CODE (trueop1) == CONST_INT
2498 && ((unsigned HOST_WIDE_INT) INTVAL (trueop1)
2499 == (unsigned HOST_WIDE_INT) GET_MODE_MASK (mode) >> 1)
2500 && ! side_effects_p (op0))
2501 return op1;
2502 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2503 return op0;
2504 tem = simplify_associative_operation (code, mode, op0, op1);
2505 if (tem)
2506 return tem;
2507 break;
2508
2509 case UMIN:
2510 if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
2511 return op1;
2512 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2513 return op0;
2514 tem = simplify_associative_operation (code, mode, op0, op1);
2515 if (tem)
2516 return tem;
2517 break;
2518
2519 case UMAX:
2520 if (trueop1 == constm1_rtx && ! side_effects_p (op0))
2521 return op1;
2522 if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
2523 return op0;
2524 tem = simplify_associative_operation (code, mode, op0, op1);
2525 if (tem)
2526 return tem;
2527 break;
2528
2529 case SS_PLUS:
2530 case US_PLUS:
2531 case SS_MINUS:
2532 case US_MINUS:
2533 /* ??? There are simplifications that can be done. */
2534 return 0;
2535
2536 case VEC_SELECT:
2537 if (!VECTOR_MODE_P (mode))
2538 {
2539 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2540 gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
2541 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2542 gcc_assert (XVECLEN (trueop1, 0) == 1);
2543 gcc_assert (GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT);
2544
2545 if (GET_CODE (trueop0) == CONST_VECTOR)
2546 return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
2547 (trueop1, 0, 0)));
2548 }
2549 else
2550 {
2551 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
2552 gcc_assert (GET_MODE_INNER (mode)
2553 == GET_MODE_INNER (GET_MODE (trueop0)));
2554 gcc_assert (GET_CODE (trueop1) == PARALLEL);
2555
2556 if (GET_CODE (trueop0) == CONST_VECTOR)
2557 {
2558 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2559 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2560 rtvec v = rtvec_alloc (n_elts);
2561 unsigned int i;
2562
2563 gcc_assert (XVECLEN (trueop1, 0) == (int) n_elts);
2564 for (i = 0; i < n_elts; i++)
2565 {
2566 rtx x = XVECEXP (trueop1, 0, i);
2567
2568 gcc_assert (GET_CODE (x) == CONST_INT);
2569 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
2570 INTVAL (x));
2571 }
2572
2573 return gen_rtx_CONST_VECTOR (mode, v);
2574 }
2575 }
2576
2577 if (XVECLEN (trueop1, 0) == 1
2578 && GET_CODE (XVECEXP (trueop1, 0, 0)) == CONST_INT
2579 && GET_CODE (trueop0) == VEC_CONCAT)
2580 {
2581 rtx vec = trueop0;
2582 int offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
2583
2584 /* Try to find the element in the VEC_CONCAT. */
2585 while (GET_MODE (vec) != mode
2586 && GET_CODE (vec) == VEC_CONCAT)
2587 {
2588 HOST_WIDE_INT vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
2589 if (offset < vec_size)
2590 vec = XEXP (vec, 0);
2591 else
2592 {
2593 offset -= vec_size;
2594 vec = XEXP (vec, 1);
2595 }
2596 vec = avoid_constant_pool_reference (vec);
2597 }
2598
2599 if (GET_MODE (vec) == mode)
2600 return vec;
2601 }
2602
2603 return 0;
2604 case VEC_CONCAT:
2605 {
2606 enum machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
2607 ? GET_MODE (trueop0)
2608 : GET_MODE_INNER (mode));
2609 enum machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
2610 ? GET_MODE (trueop1)
2611 : GET_MODE_INNER (mode));
2612
2613 gcc_assert (VECTOR_MODE_P (mode));
2614 gcc_assert (GET_MODE_SIZE (op0_mode) + GET_MODE_SIZE (op1_mode)
2615 == GET_MODE_SIZE (mode));
2616
2617 if (VECTOR_MODE_P (op0_mode))
2618 gcc_assert (GET_MODE_INNER (mode)
2619 == GET_MODE_INNER (op0_mode));
2620 else
2621 gcc_assert (GET_MODE_INNER (mode) == op0_mode);
2622
2623 if (VECTOR_MODE_P (op1_mode))
2624 gcc_assert (GET_MODE_INNER (mode)
2625 == GET_MODE_INNER (op1_mode));
2626 else
2627 gcc_assert (GET_MODE_INNER (mode) == op1_mode);
2628
2629 if ((GET_CODE (trueop0) == CONST_VECTOR
2630 || GET_CODE (trueop0) == CONST_INT
2631 || GET_CODE (trueop0) == CONST_DOUBLE)
2632 && (GET_CODE (trueop1) == CONST_VECTOR
2633 || GET_CODE (trueop1) == CONST_INT
2634 || GET_CODE (trueop1) == CONST_DOUBLE))
2635 {
2636 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
2637 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
2638 rtvec v = rtvec_alloc (n_elts);
2639 unsigned int i;
2640 unsigned in_n_elts = 1;
2641
2642 if (VECTOR_MODE_P (op0_mode))
2643 in_n_elts = (GET_MODE_SIZE (op0_mode) / elt_size);
2644 for (i = 0; i < n_elts; i++)
2645 {
2646 if (i < in_n_elts)
2647 {
2648 if (!VECTOR_MODE_P (op0_mode))
2649 RTVEC_ELT (v, i) = trueop0;
2650 else
2651 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
2652 }
2653 else
2654 {
2655 if (!VECTOR_MODE_P (op1_mode))
2656 RTVEC_ELT (v, i) = trueop1;
2657 else
2658 RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
2659 i - in_n_elts);
2660 }
2661 }
2662
2663 return gen_rtx_CONST_VECTOR (mode, v);
2664 }
2665 }
2666 return 0;
2667
2668 default:
2669 gcc_unreachable ();
2670 }
2671
2672 return 0;
2673}
2674
2675rtx
2676simplify_const_binary_operation (enum rtx_code code, enum machine_mode mode,
2677 rtx op0, rtx op1)
2678{
2679 HOST_WIDE_INT arg0, arg1, arg0s, arg1s;
2680 HOST_WIDE_INT val;
2681 unsigned int width = GET_MODE_BITSIZE (mode);
2682
2683 if (VECTOR_MODE_P (mode)
2684 && code != VEC_CONCAT
2685 && GET_CODE (op0) == CONST_VECTOR
2686 && GET_CODE (op1) == CONST_VECTOR)
2687 {
2688 unsigned n_elts = GET_MODE_NUNITS (mode);
2689 enum machine_mode op0mode = GET_MODE (op0);
2690 unsigned op0_n_elts = GET_MODE_NUNITS (op0mode);
2691 enum machine_mode op1mode = GET_MODE (op1);
2692 unsigned op1_n_elts = GET_MODE_NUNITS (op1mode);
2693 rtvec v = rtvec_alloc (n_elts);
2694 unsigned int i;
2695
2696 gcc_assert (op0_n_elts == n_elts);
2697 gcc_assert (op1_n_elts == n_elts);
2698 for (i = 0; i < n_elts; i++)
2699 {
2700 rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
2701 CONST_VECTOR_ELT (op0, i),
2702 CONST_VECTOR_ELT (op1, i));
2703 if (!x)
2704 return 0;
2705 RTVEC_ELT (v, i) = x;
2706 }
2707
2708 return gen_rtx_CONST_VECTOR (mode, v);
2709 }
2710
2711 if (VECTOR_MODE_P (mode)
2712 && code == VEC_CONCAT
2713 && CONSTANT_P (op0) && CONSTANT_P (op1))
2714 {
2715 unsigned n_elts = GET_MODE_NUNITS (mode);
2716 rtvec v = rtvec_alloc (n_elts);
2717
2718 gcc_assert (n_elts >= 2);
2719 if (n_elts == 2)
2720 {
2721 gcc_assert (GET_CODE (op0) != CONST_VECTOR);
2722 gcc_assert (GET_CODE (op1) != CONST_VECTOR);
2723
2724 RTVEC_ELT (v, 0) = op0;
2725 RTVEC_ELT (v, 1) = op1;
2726 }
2727 else
2728 {
2729 unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0));
2730 unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1));
2731 unsigned i;
2732
2733 gcc_assert (GET_CODE (op0) == CONST_VECTOR);
2734 gcc_assert (GET_CODE (op1) == CONST_VECTOR);
2735 gcc_assert (op0_n_elts + op1_n_elts == n_elts);
2736
2737 for (i = 0; i < op0_n_elts; ++i)
2738 RTVEC_ELT (v, i) = XVECEXP (op0, 0, i);
2739 for (i = 0; i < op1_n_elts; ++i)
2740 RTVEC_ELT (v, op0_n_elts+i) = XVECEXP (op1, 0, i);
2741 }
2742
2743 return gen_rtx_CONST_VECTOR (mode, v);
2744 }
2745
2746 if (SCALAR_FLOAT_MODE_P (mode)
2747 && GET_CODE (op0) == CONST_DOUBLE
2748 && GET_CODE (op1) == CONST_DOUBLE
2749 && mode == GET_MODE (op0) && mode == GET_MODE (op1))
2750 {
2751 if (code == AND
2752 || code == IOR
2753 || code == XOR)
2754 {
2755 long tmp0[4];
2756 long tmp1[4];
2757 REAL_VALUE_TYPE r;
2758 int i;
2759
2760 real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
2761 GET_MODE (op0));
2762 real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
2763 GET_MODE (op1));
2764 for (i = 0; i < 4; i++)
2765 {
2766 switch (code)
2767 {
2768 case AND:
2769 tmp0[i] &= tmp1[i];
2770 break;
2771 case IOR:
2772 tmp0[i] |= tmp1[i];
2773 break;
2774 case XOR:
2775 tmp0[i] ^= tmp1[i];
2776 break;
2777 default:
2778 gcc_unreachable ();
2779 }
2780 }
2781 real_from_target (&r, tmp0, mode);
2782 return CONST_DOUBLE_FROM_REAL_VALUE (r, mode);
2783 }
2784 else
2785 {
2786 REAL_VALUE_TYPE f0, f1, value, result;
2787 bool inexact;
2788
2789 REAL_VALUE_FROM_CONST_DOUBLE (f0, op0);
2790 REAL_VALUE_FROM_CONST_DOUBLE (f1, op1);
2791 real_convert (&f0, mode, &f0);
2792 real_convert (&f1, mode, &f1);
2793
2794 if (HONOR_SNANS (mode)
2795 && (REAL_VALUE_ISNAN (f0) || REAL_VALUE_ISNAN (f1)))
2796 return 0;
2797
2798 if (code == DIV
2799 && REAL_VALUES_EQUAL (f1, dconst0)
2800 && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
2801 return 0;
2802
2803 if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2804 && flag_trapping_math
2805 && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
2806 {
2807 int s0 = REAL_VALUE_NEGATIVE (f0);
2808 int s1 = REAL_VALUE_NEGATIVE (f1);
2809
2810 switch (code)
2811 {
2812 case PLUS:
2813 /* Inf + -Inf = NaN plus exception. */
2814 if (s0 != s1)
2815 return 0;
2816 break;
2817 case MINUS:
2818 /* Inf - Inf = NaN plus exception. */
2819 if (s0 == s1)
2820 return 0;
2821 break;
2822 case DIV:
2823 /* Inf / Inf = NaN plus exception. */
2824 return 0;
2825 default:
2826 break;
2827 }
2828 }
2829
2830 if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
2831 && flag_trapping_math
2832 && ((REAL_VALUE_ISINF (f0) && REAL_VALUES_EQUAL (f1, dconst0))
2833 || (REAL_VALUE_ISINF (f1)
2834 && REAL_VALUES_EQUAL (f0, dconst0))))
2835 /* Inf * 0 = NaN plus exception. */
2836 return 0;
2837
2838 inexact = real_arithmetic (&value, rtx_to_tree_code (code),
2839 &f0, &f1);
2840 real_convert (&result, mode, &value);
2841
2842 /* Don't constant fold this floating point operation if
2843 the result has overflowed and flag_trapping_math. */
2844
2845 if (flag_trapping_math
2846 && MODE_HAS_INFINITIES (mode)
2847 && REAL_VALUE_ISINF (result)
2848 && !REAL_VALUE_ISINF (f0)
2849 && !REAL_VALUE_ISINF (f1))
2850 /* Overflow plus exception. */
2851 return 0;
2852
2853 /* Don't constant fold this floating point operation if the
2854 result may dependent upon the run-time rounding mode and
2855 flag_rounding_math is set, or if GCC's software emulation
2856 is unable to accurately represent the result. */
2857
2858 if ((flag_rounding_math
2859 || (REAL_MODE_FORMAT_COMPOSITE_P (mode)
2860 && !flag_unsafe_math_optimizations))
2861 && (inexact || !real_identical (&result, &value)))
2862 return NULL_RTX;
2863
2864 return CONST_DOUBLE_FROM_REAL_VALUE (result, mode);
2865 }
2866 }
2867
2868 /* We can fold some multi-word operations. */
2869 if (GET_MODE_CLASS (mode) == MODE_INT
2870 && width == HOST_BITS_PER_WIDE_INT * 2
2871 && (GET_CODE (op0) == CONST_DOUBLE || GET_CODE (op0) == CONST_INT)
2872 && (GET_CODE (op1) == CONST_DOUBLE || GET_CODE (op1) == CONST_INT))
2873 {
2874 unsigned HOST_WIDE_INT l1, l2, lv, lt;
2875 HOST_WIDE_INT h1, h2, hv, ht;
2876
2877 if (GET_CODE (op0) == CONST_DOUBLE)
2878 l1 = CONST_DOUBLE_LOW (op0), h1 = CONST_DOUBLE_HIGH (op0);
2879 else
2880 l1 = INTVAL (op0), h1 = HWI_SIGN_EXTEND (l1);
2881
2882 if (GET_CODE (op1) == CONST_DOUBLE)
2883 l2 = CONST_DOUBLE_LOW (op1), h2 = CONST_DOUBLE_HIGH (op1);
2884 else
2885 l2 = INTVAL (op1), h2 = HWI_SIGN_EXTEND (l2);
2886
2887 switch (code)
2888 {
2889 case MINUS:
2890 /* A - B == A + (-B). */
2891 neg_double (l2, h2, &lv, &hv);
2892 l2 = lv, h2 = hv;
2893
2894 /* Fall through.... */
2895
2896 case PLUS:
2897 add_double (l1, h1, l2, h2, &lv, &hv);
2898 break;
2899
2900 case MULT:
2901 mul_double (l1, h1, l2, h2, &lv, &hv);
2902 break;
2903
2904 case DIV:
2905 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2906 &lv, &hv, &lt, &ht))
2907 return 0;
2908 break;
2909
2910 case MOD:
2911 if (div_and_round_double (TRUNC_DIV_EXPR, 0, l1, h1, l2, h2,
2912 &lt, &ht, &lv, &hv))
2913 return 0;
2914 break;
2915
2916 case UDIV:
2917 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2918 &lv, &hv, &lt, &ht))
2919 return 0;
2920 break;
2921
2922 case UMOD:
2923 if (div_and_round_double (TRUNC_DIV_EXPR, 1, l1, h1, l2, h2,
2924 &lt, &ht, &lv, &hv))
2925 return 0;
2926 break;
2927
2928 case AND:
2929 lv = l1 & l2, hv = h1 & h2;
2930 break;
2931
2932 case IOR:
2933 lv = l1 | l2, hv = h1 | h2;
2934 break;
2935
2936 case XOR:
2937 lv = l1 ^ l2, hv = h1 ^ h2;
2938 break;
2939
2940 case SMIN:
2941 if (h1 < h2
2942 || (h1 == h2
2943 && ((unsigned HOST_WIDE_INT) l1
2944 < (unsigned HOST_WIDE_INT) l2)))
2945 lv = l1, hv = h1;
2946 else
2947 lv = l2, hv = h2;
2948 break;
2949
2950 case SMAX:
2951 if (h1 > h2
2952 || (h1 == h2
2953 && ((unsigned HOST_WIDE_INT) l1
2954 > (unsigned HOST_WIDE_INT) l2)))
2955 lv = l1, hv = h1;
2956 else
2957 lv = l2, hv = h2;
2958 break;
2959
2960 case UMIN:
2961 if ((unsigned HOST_WIDE_INT) h1 < (unsigned HOST_WIDE_INT) h2
2962 || (h1 == h2
2963 && ((unsigned HOST_WIDE_INT) l1
2964 < (unsigned HOST_WIDE_INT) l2)))
2965 lv = l1, hv = h1;
2966 else
2967 lv = l2, hv = h2;
2968 break;
2969
2970 case UMAX:
2971 if ((unsigned HOST_WIDE_INT) h1 > (unsigned HOST_WIDE_INT) h2
2972 || (h1 == h2
2973 && ((unsigned HOST_WIDE_INT) l1
2974 > (unsigned HOST_WIDE_INT) l2)))
2975 lv = l1, hv = h1;
2976 else
2977 lv = l2, hv = h2;
2978 break;
2979
2980 case LSHIFTRT: case ASHIFTRT:
2981 case ASHIFT:
2982 case ROTATE: case ROTATERT:
2983 if (SHIFT_COUNT_TRUNCATED)
2984 l2 &= (GET_MODE_BITSIZE (mode) - 1), h2 = 0;
2985
2986 if (h2 != 0 || l2 >= GET_MODE_BITSIZE (mode))
2987 return 0;
2988
2989 if (code == LSHIFTRT || code == ASHIFTRT)
2990 rshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv,
2991 code == ASHIFTRT);
2992 else if (code == ASHIFT)
2993 lshift_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv, 1);
2994 else if (code == ROTATE)
2995 lrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2996 else /* code == ROTATERT */
2997 rrotate_double (l1, h1, l2, GET_MODE_BITSIZE (mode), &lv, &hv);
2998 break;
2999
3000 default:
3001 return 0;
3002 }
3003
3004 return immed_double_const (lv, hv, mode);
3005 }
3006
3007 if (GET_CODE (op0) == CONST_INT && GET_CODE (op1) == CONST_INT
3008 && width <= HOST_BITS_PER_WIDE_INT && width != 0)
3009 {
3010 /* Get the integer argument values in two forms:
3011 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3012
3013 arg0 = INTVAL (op0);
3014 arg1 = INTVAL (op1);
3015
3016 if (width < HOST_BITS_PER_WIDE_INT)
3017 {
3018 arg0 &= ((HOST_WIDE_INT) 1 << width) - 1;
3019 arg1 &= ((HOST_WIDE_INT) 1 << width) - 1;
3020
3021 arg0s = arg0;
3022 if (arg0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3023 arg0s |= ((HOST_WIDE_INT) (-1) << width);
3024
3025 arg1s = arg1;
3026 if (arg1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3027 arg1s |= ((HOST_WIDE_INT) (-1) << width);
3028 }
3029 else
3030 {
3031 arg0s = arg0;
3032 arg1s = arg1;
3033 }
3034
3035 /* Compute the value of the arithmetic. */
3036
3037 switch (code)
3038 {
3039 case PLUS:
3040 val = arg0s + arg1s;
3041 break;
3042
3043 case MINUS:
3044 val = arg0s - arg1s;
3045 break;
3046
3047 case MULT:
3048 val = arg0s * arg1s;
3049 break;
3050
3051 case DIV:
3052 if (arg1s == 0
3053 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3054 && arg1s == -1))
3055 return 0;
3056 val = arg0s / arg1s;
3057 break;
3058
3059 case MOD:
3060 if (arg1s == 0
3061 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3062 && arg1s == -1))
3063 return 0;
3064 val = arg0s % arg1s;
3065 break;
3066
3067 case UDIV:
3068 if (arg1 == 0
3069 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3070 && arg1s == -1))
3071 return 0;
3072 val = (unsigned HOST_WIDE_INT) arg0 / arg1;
3073 break;
3074
3075 case UMOD:
3076 if (arg1 == 0
3077 || (arg0s == (HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)
3078 && arg1s == -1))
3079 return 0;
3080 val = (unsigned HOST_WIDE_INT) arg0 % arg1;
3081 break;
3082
3083 case AND:
3084 val = arg0 & arg1;
3085 break;
3086
3087 case IOR:
3088 val = arg0 | arg1;
3089 break;
3090
3091 case XOR:
3092 val = arg0 ^ arg1;
3093 break;
3094
3095 case LSHIFTRT:
3096 case ASHIFT:
3097 case ASHIFTRT:
3098 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3099 the value is in range. We can't return any old value for
3100 out-of-range arguments because either the middle-end (via
3101 shift_truncation_mask) or the back-end might be relying on
3102 target-specific knowledge. Nor can we rely on
3103 shift_truncation_mask, since the shift might not be part of an
3104 ashlM3, lshrM3 or ashrM3 instruction. */
3105 if (SHIFT_COUNT_TRUNCATED)
3106 arg1 = (unsigned HOST_WIDE_INT) arg1 % width;
3107 else if (arg1 < 0 || arg1 >= GET_MODE_BITSIZE (mode))
3108 return 0;
3109
3110 val = (code == ASHIFT
3111 ? ((unsigned HOST_WIDE_INT) arg0) << arg1
3112 : ((unsigned HOST_WIDE_INT) arg0) >> arg1);
3113
3114 /* Sign-extend the result for arithmetic right shifts. */
3115 if (code == ASHIFTRT && arg0s < 0 && arg1 > 0)
3116 val |= ((HOST_WIDE_INT) -1) << (width - arg1);
3117 break;
3118
3119 case ROTATERT:
3120 if (arg1 < 0)
3121 return 0;
3122
3123 arg1 %= width;
3124 val = ((((unsigned HOST_WIDE_INT) arg0) << (width - arg1))
3125 | (((unsigned HOST_WIDE_INT) arg0) >> arg1));
3126 break;
3127
3128 case ROTATE:
3129 if (arg1 < 0)
3130 return 0;
3131
3132 arg1 %= width;
3133 val = ((((unsigned HOST_WIDE_INT) arg0) << arg1)
3134 | (((unsigned HOST_WIDE_INT) arg0) >> (width - arg1)));
3135 break;
3136
3137 case COMPARE:
3138 /* Do nothing here. */
3139 return 0;
3140
3141 case SMIN:
3142 val = arg0s <= arg1s ? arg0s : arg1s;
3143 break;
3144
3145 case UMIN:
3146 val = ((unsigned HOST_WIDE_INT) arg0
3147 <= (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3148 break;
3149
3150 case SMAX:
3151 val = arg0s > arg1s ? arg0s : arg1s;
3152 break;
3153
3154 case UMAX:
3155 val = ((unsigned HOST_WIDE_INT) arg0
3156 > (unsigned HOST_WIDE_INT) arg1 ? arg0 : arg1);
3157 break;
3158
3159 case SS_PLUS:
3160 case US_PLUS:
3161 case SS_MINUS:
3162 case US_MINUS:
3163 case SS_ASHIFT:
3164 /* ??? There are simplifications that can be done. */
3165 return 0;
3166
3167 default:
3168 gcc_unreachable ();
3169 }
3170
3171 return gen_int_mode (val, mode);
3172 }
3173
3174 return NULL_RTX;
3175}
3176
3177
3178
3179/* Simplify a PLUS or MINUS, at least one of whose operands may be another
3180 PLUS or MINUS.
3181
3182 Rather than test for specific case, we do this by a brute-force method
3183 and do all possible simplifications until no more changes occur. Then
3184 we rebuild the operation. */
3185
3186struct simplify_plus_minus_op_data
3187{
3188 rtx op;
3189 short neg;
3190};
3191
3192static int
3193simplify_plus_minus_op_data_cmp (const void *p1, const void *p2)
3194{
3195 const struct simplify_plus_minus_op_data *d1 = p1;
3196 const struct simplify_plus_minus_op_data *d2 = p2;
3197 int result;
3198
3199 result = (commutative_operand_precedence (d2->op)
3200 - commutative_operand_precedence (d1->op));
3201 if (result)
3202 return result;
3203
3204 /* Group together equal REGs to do more simplification. */
3205 if (REG_P (d1->op) && REG_P (d2->op))
3206 return REGNO (d1->op) - REGNO (d2->op);
3207 else
3208 return 0;
3209}
3210
3211static rtx
3212simplify_plus_minus (enum rtx_code code, enum machine_mode mode, rtx op0,
3213 rtx op1)
3214{
3215 struct simplify_plus_minus_op_data ops[8];
3216 rtx result, tem;
3217 int n_ops = 2, input_ops = 2;
3218 int changed, n_constants = 0, canonicalized = 0;
3219 int i, j;
3220
3221 memset (ops, 0, sizeof ops);
3222
3223 /* Set up the two operands and then expand them until nothing has been
3224 changed. If we run out of room in our array, give up; this should
3225 almost never happen. */
3226
3227 ops[0].op = op0;
3228 ops[0].neg = 0;
3229 ops[1].op = op1;
3230 ops[1].neg = (code == MINUS);
3231
3232 do
3233 {
3234 changed = 0;
3235
3236 for (i = 0; i < n_ops; i++)
3237 {
3238 rtx this_op = ops[i].op;
3239 int this_neg = ops[i].neg;
3240 enum rtx_code this_code = GET_CODE (this_op);
3241
3242 switch (this_code)
3243 {
3244 case PLUS:
3245 case MINUS:
3246 if (n_ops == 7)
3247 return NULL_RTX;
3248
3249 ops[n_ops].op = XEXP (this_op, 1);
3250 ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
3251 n_ops++;
3252
3253 ops[i].op = XEXP (this_op, 0);
3254 input_ops++;
3255 changed = 1;
3256 canonicalized |= this_neg;
3257 break;
3258
3259 case NEG:
3260 ops[i].op = XEXP (this_op, 0);
3261 ops[i].neg = ! this_neg;
3262 changed = 1;
3263 canonicalized = 1;
3264 break;
3265
3266 case CONST:
3267 if (n_ops < 7
3268 && GET_CODE (XEXP (this_op, 0)) == PLUS
3269 && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
3270 && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
3271 {
3272 ops[i].op = XEXP (XEXP (this_op, 0), 0);
3273 ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
3274 ops[n_ops].neg = this_neg;
3275 n_ops++;
3276 changed = 1;
3277 canonicalized = 1;
3278 }
3279 break;
3280
3281 case NOT:
3282 /* ~a -> (-a - 1) */
3283 if (n_ops != 7)
3284 {
3285 ops[n_ops].op = constm1_rtx;
3286 ops[n_ops++].neg = this_neg;
3287 ops[i].op = XEXP (this_op, 0);
3288 ops[i].neg = !this_neg;
3289 changed = 1;
3290 canonicalized = 1;
3291 }
3292 break;
3293
3294 case CONST_INT:
3295 n_constants++;
3296 if (this_neg)
3297 {
3298 ops[i].op = neg_const_int (mode, this_op);
3299 ops[i].neg = 0;
3300 changed = 1;
3301 canonicalized = 1;
3302 }
3303 break;
3304
3305 default:
3306 break;
3307 }
3308 }
3309 }
3310 while (changed);
3311
3312 if (n_constants > 1)
3313 canonicalized = 1;
3314
3315 gcc_assert (n_ops >= 2);
3316
3317 /* If we only have two operands, we can avoid the loops. */
3318 if (n_ops == 2)
3319 {
3320 enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
3321 rtx lhs, rhs;
3322
3323 /* Get the two operands. Be careful with the order, especially for
3324 the cases where code == MINUS. */
3325 if (ops[0].neg && ops[1].neg)
3326 {
3327 lhs = gen_rtx_NEG (mode, ops[0].op);
3328 rhs = ops[1].op;
3329 }
3330 else if (ops[0].neg)
3331 {
3332 lhs = ops[1].op;
3333 rhs = ops[0].op;
3334 }
3335 else
3336 {
3337 lhs = ops[0].op;
3338 rhs = ops[1].op;
3339 }
3340
3341 return simplify_const_binary_operation (code, mode, lhs, rhs);
3342 }
3343
3344 /* Now simplify each pair of operands until nothing changes. */
3345 do
3346 {
3347 /* Insertion sort is good enough for an eight-element array. */
3348 for (i = 1; i < n_ops; i++)
3349 {
3350 struct simplify_plus_minus_op_data save;
3351 j = i - 1;
3352 if (simplify_plus_minus_op_data_cmp (&ops[j], &ops[i]) < 0)
3353 continue;
3354
3355 canonicalized = 1;
3356 save = ops[i];
3357 do
3358 ops[j + 1] = ops[j];
3359 while (j-- && simplify_plus_minus_op_data_cmp (&ops[j], &save) > 0);
3360 ops[j + 1] = save;
3361 }
3362
3363 /* This is only useful the first time through. */
3364 if (!canonicalized)
3365 return NULL_RTX;
3366
3367 changed = 0;
3368 for (i = n_ops - 1; i > 0; i--)
3369 for (j = i - 1; j >= 0; j--)
3370 {
3371 rtx lhs = ops[j].op, rhs = ops[i].op;
3372 int lneg = ops[j].neg, rneg = ops[i].neg;
3373
3374 if (lhs != 0 && rhs != 0)
3375 {
3376 enum rtx_code ncode = PLUS;
3377
3378 if (lneg != rneg)
3379 {
3380 ncode = MINUS;
3381 if (lneg)
3382 tem = lhs, lhs = rhs, rhs = tem;
3383 }
3384 else if (swap_commutative_operands_p (lhs, rhs))
3385 tem = lhs, lhs = rhs, rhs = tem;
3386
3387 if ((GET_CODE (lhs) == CONST || GET_CODE (lhs) == CONST_INT)
3388 && (GET_CODE (rhs) == CONST || GET_CODE (rhs) == CONST_INT))
3389 {
3390 rtx tem_lhs, tem_rhs;
3391
3392 tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
3393 tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
3394 tem = simplify_binary_operation (ncode, mode, tem_lhs, tem_rhs);
3395
3396 if (tem && !CONSTANT_P (tem))
3397 tem = gen_rtx_CONST (GET_MODE (tem), tem);
3398 }
3399 else
3400 tem = simplify_binary_operation (ncode, mode, lhs, rhs);
3401
3402 /* Reject "simplifications" that just wrap the two
3403 arguments in a CONST. Failure to do so can result
3404 in infinite recursion with simplify_binary_operation
3405 when it calls us to simplify CONST operations. */
3406 if (tem
3407 && ! (GET_CODE (tem) == CONST
3408 && GET_CODE (XEXP (tem, 0)) == ncode
3409 && XEXP (XEXP (tem, 0), 0) == lhs
3410 && XEXP (XEXP (tem, 0), 1) == rhs))
3411 {
3412 lneg &= rneg;
3413 if (GET_CODE (tem) == NEG)
3414 tem = XEXP (tem, 0), lneg = !lneg;
3415 if (GET_CODE (tem) == CONST_INT && lneg)
3416 tem = neg_const_int (mode, tem), lneg = 0;
3417
3418 ops[i].op = tem;
3419 ops[i].neg = lneg;
3420 ops[j].op = NULL_RTX;
3421 changed = 1;
3422 }
3423 }
3424 }
3425
3426 /* Pack all the operands to the lower-numbered entries. */
3427 for (i = 0, j = 0; j < n_ops; j++)
3428 if (ops[j].op)
3429 {
3430 ops[i] = ops[j];
3431 i++;
3432 }
3433 n_ops = i;
3434 }
3435 while (changed);
3436
3437 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3438 if (n_ops == 2
3439 && GET_CODE (ops[1].op) == CONST_INT
3440 && CONSTANT_P (ops[0].op)
3441 && ops[0].neg)
3442 return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
3443
3444 /* We suppressed creation of trivial CONST expressions in the
3445 combination loop to avoid recursion. Create one manually now.
3446 The combination loop should have ensured that there is exactly
3447 one CONST_INT, and the sort will have ensured that it is last
3448 in the array and that any other constant will be next-to-last. */
3449
3450 if (n_ops > 1
3451 && GET_CODE (ops[n_ops - 1].op) == CONST_INT
3452 && CONSTANT_P (ops[n_ops - 2].op))
3453 {
3454 rtx value = ops[n_ops - 1].op;
3455 if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
3456 value = neg_const_int (mode, value);
3457 ops[n_ops - 2].op = plus_constant (ops[n_ops - 2].op, INTVAL (value));
3458 n_ops--;
3459 }
3460
3461 /* Put a non-negated operand first, if possible. */
3462
3463 for (i = 0; i < n_ops && ops[i].neg; i++)
3464 continue;
3465 if (i == n_ops)
3466 ops[0].op = gen_rtx_NEG (mode, ops[0].op);
3467 else if (i != 0)
3468 {
3469 tem = ops[0].op;
3470 ops[0] = ops[i];
3471 ops[i].op = tem;
3472 ops[i].neg = 1;
3473 }
3474
3475 /* Now make the result by performing the requested operations. */
3476 result = ops[0].op;
3477 for (i = 1; i < n_ops; i++)
3478 result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
3479 mode, result, ops[i].op);
3480
3481 return result;
3482}
3483
3484/* Check whether an operand is suitable for calling simplify_plus_minus. */
3485static bool
3486plus_minus_operand_p (rtx x)
3487{
3488 return GET_CODE (x) == PLUS
3489 || GET_CODE (x) == MINUS
3490 || (GET_CODE (x) == CONST
3491 && GET_CODE (XEXP (x, 0)) == PLUS
3492 && CONSTANT_P (XEXP (XEXP (x, 0), 0))
3493 && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
3494}
3495
3496/* Like simplify_binary_operation except used for relational operators.
3497 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3498 not also be VOIDmode.
3499
3500 CMP_MODE specifies in which mode the comparison is done in, so it is
3501 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3502 the operands or, if both are VOIDmode, the operands are compared in
3503 "infinite precision". */
3504rtx
3505simplify_relational_operation (enum rtx_code code, enum machine_mode mode,
3506 enum machine_mode cmp_mode, rtx op0, rtx op1)
3507{
3508 rtx tem, trueop0, trueop1;
3509
3510 if (cmp_mode == VOIDmode)
3511 cmp_mode = GET_MODE (op0);
3512 if (cmp_mode == VOIDmode)
3513 cmp_mode = GET_MODE (op1);
3514
3515 tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
3516 if (tem)
3517 {
3518 if (SCALAR_FLOAT_MODE_P (mode))
3519 {
3520 if (tem == const0_rtx)
3521 return CONST0_RTX (mode);
3522#ifdef FLOAT_STORE_FLAG_VALUE
3523 {
3524 REAL_VALUE_TYPE val;
3525 val = FLOAT_STORE_FLAG_VALUE (mode);
3526 return CONST_DOUBLE_FROM_REAL_VALUE (val, mode);
3527 }
3528#else
3529 return NULL_RTX;
3530#endif
3531 }
3532 if (VECTOR_MODE_P (mode))
3533 {
3534 if (tem == const0_rtx)
3535 return CONST0_RTX (mode);
3536#ifdef VECTOR_STORE_FLAG_VALUE
3537 {
3538 int i, units;
3539 rtvec v;
3540
3541 rtx val = VECTOR_STORE_FLAG_VALUE (mode);
3542 if (val == NULL_RTX)
3543 return NULL_RTX;
3544 if (val == const1_rtx)
3545 return CONST1_RTX (mode);
3546
3547 units = GET_MODE_NUNITS (mode);
3548 v = rtvec_alloc (units);
3549 for (i = 0; i < units; i++)
3550 RTVEC_ELT (v, i) = val;
3551 return gen_rtx_raw_CONST_VECTOR (mode, v);
3552 }
3553#else
3554 return NULL_RTX;
3555#endif
3556 }
3557
3558 return tem;
3559 }
3560
3561 /* For the following tests, ensure const0_rtx is op1. */
3562 if (swap_commutative_operands_p (op0, op1)
3563 || (op0 == const0_rtx && op1 != const0_rtx))
3564 tem = op0, op0 = op1, op1 = tem, code = swap_condition (code);
3565
3566 /* If op0 is a compare, extract the comparison arguments from it. */
3567 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3568 return simplify_relational_operation (code, mode, VOIDmode,
3569 XEXP (op0, 0), XEXP (op0, 1));
3570
3571 if (GET_MODE_CLASS (cmp_mode) == MODE_CC
3572 || CC0_P (op0))
3573 return NULL_RTX;
3574
3575 trueop0 = avoid_constant_pool_reference (op0);
3576 trueop1 = avoid_constant_pool_reference (op1);
3577 return simplify_relational_operation_1 (code, mode, cmp_mode,
3578 trueop0, trueop1);
3579}
3580
3581/* This part of simplify_relational_operation is only used when CMP_MODE
3582 is not in class MODE_CC (i.e. it is a real comparison).
3583
3584 MODE is the mode of the result, while CMP_MODE specifies in which
3585 mode the comparison is done in, so it is the mode of the operands. */
3586
3587static rtx
3588simplify_relational_operation_1 (enum rtx_code code, enum machine_mode mode,
3589 enum machine_mode cmp_mode, rtx op0, rtx op1)
3590{
3591 enum rtx_code op0code = GET_CODE (op0);
3592
3593 if (GET_CODE (op1) == CONST_INT)
3594 {
3595 if (INTVAL (op1) == 0 && COMPARISON_P (op0))
3596 {
3597 /* If op0 is a comparison, extract the comparison arguments
3598 from it. */
3599 if (code == NE)
3600 {
3601 if (GET_MODE (op0) == mode)
3602 return simplify_rtx (op0);
3603 else
3604 return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
3605 XEXP (op0, 0), XEXP (op0, 1));
3606 }
3607 else if (code == EQ)
3608 {
3609 enum rtx_code new_code = reversed_comparison_code (op0, NULL_RTX);
3610 if (new_code != UNKNOWN)
3611 return simplify_gen_relational (new_code, mode, VOIDmode,
3612 XEXP (op0, 0), XEXP (op0, 1));
3613 }
3614 }
3615 }
3616
3617 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
3618 if ((code == EQ || code == NE)
3619 && (op0code == PLUS || op0code == MINUS)
3620 && CONSTANT_P (op1)
3621 && CONSTANT_P (XEXP (op0, 1))
3622 && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
3623 {
3624 rtx x = XEXP (op0, 0);
3625 rtx c = XEXP (op0, 1);
3626
3627 c = simplify_gen_binary (op0code == PLUS ? MINUS : PLUS,
3628 cmp_mode, op1, c);
3629 return simplify_gen_relational (code, mode, cmp_mode, x, c);
3630 }
3631
3632 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
3633 the same as (zero_extract:SI FOO (const_int 1) BAR). */
3634 if (code == NE
3635 && op1 == const0_rtx
3636 && GET_MODE_CLASS (mode) == MODE_INT
3637 && cmp_mode != VOIDmode
3638 /* ??? Work-around BImode bugs in the ia64 backend. */
3639 && mode != BImode
3640 && cmp_mode != BImode
3641 && nonzero_bits (op0, cmp_mode) == 1
3642 && STORE_FLAG_VALUE == 1)
3643 return GET_MODE_SIZE (mode) > GET_MODE_SIZE (cmp_mode)
3644 ? simplify_gen_unary (ZERO_EXTEND, mode, op0, cmp_mode)
3645 : lowpart_subreg (mode, op0, cmp_mode);
3646
3647 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
3648 if ((code == EQ || code == NE)
3649 && op1 == const0_rtx
3650 && op0code == XOR)
3651 return simplify_gen_relational (code, mode, cmp_mode,
3652 XEXP (op0, 0), XEXP (op0, 1));
3653
3654 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
3655 if ((code == EQ || code == NE)
3656 && op0code == XOR
3657 && rtx_equal_p (XEXP (op0, 0), op1)
3658 && !side_effects_p (XEXP (op0, 0)))
3659 return simplify_gen_relational (code, mode, cmp_mode,
3660 XEXP (op0, 1), const0_rtx);
3661
3662 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
3663 if ((code == EQ || code == NE)
3664 && op0code == XOR
3665 && rtx_equal_p (XEXP (op0, 1), op1)
3666 && !side_effects_p (XEXP (op0, 1)))
3667 return simplify_gen_relational (code, mode, cmp_mode,
3668 XEXP (op0, 0), const0_rtx);
3669
3670 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
3671 if ((code == EQ || code == NE)
3672 && op0code == XOR
3673 && (GET_CODE (op1) == CONST_INT
3674 || GET_CODE (op1) == CONST_DOUBLE)
3675 && (GET_CODE (XEXP (op0, 1)) == CONST_INT
3676 || GET_CODE (XEXP (op0, 1)) == CONST_DOUBLE))
3677 return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
3678 simplify_gen_binary (XOR, cmp_mode,
3679 XEXP (op0, 1), op1));
3680
3681 return NULL_RTX;
3682}
3683
3684/* Check if the given comparison (done in the given MODE) is actually a
3685 tautology or a contradiction.
3686 If no simplification is possible, this function returns zero.
3687 Otherwise, it returns either const_true_rtx or const0_rtx. */
3688
3689rtx
3690simplify_const_relational_operation (enum rtx_code code,
3691 enum machine_mode mode,
3692 rtx op0, rtx op1)
3693{
3694 int equal, op0lt, op0ltu, op1lt, op1ltu;
3695 rtx tem;
3696 rtx trueop0;
3697 rtx trueop1;
3698
3699 gcc_assert (mode != VOIDmode
3700 || (GET_MODE (op0) == VOIDmode
3701 && GET_MODE (op1) == VOIDmode));
3702
3703 /* If op0 is a compare, extract the comparison arguments from it. */
3704 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
3705 {
3706 op1 = XEXP (op0, 1);
3707 op0 = XEXP (op0, 0);
3708
3709 if (GET_MODE (op0) != VOIDmode)
3710 mode = GET_MODE (op0);
3711 else if (GET_MODE (op1) != VOIDmode)
3712 mode = GET_MODE (op1);
3713 else
3714 return 0;
3715 }
3716
3717 /* We can't simplify MODE_CC values since we don't know what the
3718 actual comparison is. */
3719 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC || CC0_P (op0))
3720 return 0;
3721
3722 /* Make sure the constant is second. */
3723 if (swap_commutative_operands_p (op0, op1))
3724 {
3725 tem = op0, op0 = op1, op1 = tem;
3726 code = swap_condition (code);
3727 }
3728
3729 trueop0 = avoid_constant_pool_reference (op0);
3730 trueop1 = avoid_constant_pool_reference (op1);
3731
3732 /* For integer comparisons of A and B maybe we can simplify A - B and can
3733 then simplify a comparison of that with zero. If A and B are both either
3734 a register or a CONST_INT, this can't help; testing for these cases will
3735 prevent infinite recursion here and speed things up.
3736
3737 We can only do this for EQ and NE comparisons as otherwise we may
3738 lose or introduce overflow which we cannot disregard as undefined as
3739 we do not know the signedness of the operation on either the left or
3740 the right hand side of the comparison. */
3741
3742 if (INTEGRAL_MODE_P (mode) && trueop1 != const0_rtx
3743 && (code == EQ || code == NE)
3744 && ! ((REG_P (op0) || GET_CODE (trueop0) == CONST_INT)
3745 && (REG_P (op1) || GET_CODE (trueop1) == CONST_INT))
3746 && 0 != (tem = simplify_binary_operation (MINUS, mode, op0, op1))
3747 /* We cannot do this if tem is a nonzero address. */
3748 && ! nonzero_address_p (tem))
3749 return simplify_const_relational_operation (signed_condition (code),
3750 mode, tem, const0_rtx);
3751
3752 if (! HONOR_NANS (mode) && code == ORDERED)
3753 return const_true_rtx;
3754
3755 if (! HONOR_NANS (mode) && code == UNORDERED)
3756 return const0_rtx;
3757
3758 /* For modes without NaNs, if the two operands are equal, we know the
3759 result except if they have side-effects. */
3760 if (! HONOR_NANS (GET_MODE (trueop0))
3761 && rtx_equal_p (trueop0, trueop1)
3762 && ! side_effects_p (trueop0))
3763 equal = 1, op0lt = 0, op0ltu = 0, op1lt = 0, op1ltu = 0;
3764
3765 /* If the operands are floating-point constants, see if we can fold
3766 the result. */
3767 else if (GET_CODE (trueop0) == CONST_DOUBLE
3768 && GET_CODE (trueop1) == CONST_DOUBLE
3769 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
3770 {
3771 REAL_VALUE_TYPE d0, d1;
3772
3773 REAL_VALUE_FROM_CONST_DOUBLE (d0, trueop0);
3774 REAL_VALUE_FROM_CONST_DOUBLE (d1, trueop1);
3775
3776 /* Comparisons are unordered iff at least one of the values is NaN. */
3777 if (REAL_VALUE_ISNAN (d0) || REAL_VALUE_ISNAN (d1))
3778 switch (code)
3779 {
3780 case UNEQ:
3781 case UNLT:
3782 case UNGT:
3783 case UNLE:
3784 case UNGE:
3785 case NE:
3786 case UNORDERED:
3787 return const_true_rtx;
3788 case EQ:
3789 case LT:
3790 case GT:
3791 case LE:
3792 case GE:
3793 case LTGT:
3794 case ORDERED:
3795 return const0_rtx;
3796 default:
3797 return 0;
3798 }
3799
3800 equal = REAL_VALUES_EQUAL (d0, d1);
3801 op0lt = op0ltu = REAL_VALUES_LESS (d0, d1);
3802 op1lt = op1ltu = REAL_VALUES_LESS (d1, d0);
3803 }
3804
3805 /* Otherwise, see if the operands are both integers. */
3806 else if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
3807 && (GET_CODE (trueop0) == CONST_DOUBLE
3808 || GET_CODE (trueop0) == CONST_INT)
3809 && (GET_CODE (trueop1) == CONST_DOUBLE
3810 || GET_CODE (trueop1) == CONST_INT))
3811 {
3812 int width = GET_MODE_BITSIZE (mode);
3813 HOST_WIDE_INT l0s, h0s, l1s, h1s;
3814 unsigned HOST_WIDE_INT l0u, h0u, l1u, h1u;
3815
3816 /* Get the two words comprising each integer constant. */
3817 if (GET_CODE (trueop0) == CONST_DOUBLE)
3818 {
3819 l0u = l0s = CONST_DOUBLE_LOW (trueop0);
3820 h0u = h0s = CONST_DOUBLE_HIGH (trueop0);
3821 }
3822 else
3823 {
3824 l0u = l0s = INTVAL (trueop0);
3825 h0u = h0s = HWI_SIGN_EXTEND (l0s);
3826 }
3827
3828 if (GET_CODE (trueop1) == CONST_DOUBLE)
3829 {
3830 l1u = l1s = CONST_DOUBLE_LOW (trueop1);
3831 h1u = h1s = CONST_DOUBLE_HIGH (trueop1);
3832 }
3833 else
3834 {
3835 l1u = l1s = INTVAL (trueop1);
3836 h1u = h1s = HWI_SIGN_EXTEND (l1s);
3837 }
3838
3839 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
3840 we have to sign or zero-extend the values. */
3841 if (width != 0 && width < HOST_BITS_PER_WIDE_INT)
3842 {
3843 l0u &= ((HOST_WIDE_INT) 1 << width) - 1;
3844 l1u &= ((HOST_WIDE_INT) 1 << width) - 1;
3845
3846 if (l0s & ((HOST_WIDE_INT) 1 << (width - 1)))
3847 l0s |= ((HOST_WIDE_INT) (-1) << width);
3848
3849 if (l1s & ((HOST_WIDE_INT) 1 << (width - 1)))
3850 l1s |= ((HOST_WIDE_INT) (-1) << width);
3851 }
3852 if (width != 0 && width <= HOST_BITS_PER_WIDE_INT)
3853 h0u = h1u = 0, h0s = HWI_SIGN_EXTEND (l0s), h1s = HWI_SIGN_EXTEND (l1s);
3854
3855 equal = (h0u == h1u && l0u == l1u);
3856 op0lt = (h0s < h1s || (h0s == h1s && l0u < l1u));
3857 op1lt = (h1s < h0s || (h1s == h0s && l1u < l0u));
3858 op0ltu = (h0u < h1u || (h0u == h1u && l0u < l1u));
3859 op1ltu = (h1u < h0u || (h1u == h0u && l1u < l0u));
3860 }
3861
3862 /* Otherwise, there are some code-specific tests we can make. */
3863 else
3864 {
3865 /* Optimize comparisons with upper and lower bounds. */
3866 if (SCALAR_INT_MODE_P (mode)
3867 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
3868 {
3869 rtx mmin, mmax;
3870 int sign;
3871
3872 if (code == GEU
3873 || code == LEU
3874 || code == GTU
3875 || code == LTU)
3876 sign = 0;
3877 else
3878 sign = 1;
3879
3880 get_mode_bounds (mode, sign, mode, &mmin, &mmax);
3881
3882 tem = NULL_RTX;
3883 switch (code)
3884 {
3885 case GEU:
3886 case GE:
3887 /* x >= min is always true. */
3888 if (rtx_equal_p (trueop1, mmin))
3889 tem = const_true_rtx;
3890 else
3891 break;
3892
3893 case LEU:
3894 case LE:
3895 /* x <= max is always true. */
3896 if (rtx_equal_p (trueop1, mmax))
3897 tem = const_true_rtx;
3898 break;
3899
3900 case GTU:
3901 case GT:
3902 /* x > max is always false. */
3903 if (rtx_equal_p (trueop1, mmax))
3904 tem = const0_rtx;
3905 break;
3906
3907 case LTU:
3908 case LT:
3909 /* x < min is always false. */
3910 if (rtx_equal_p (trueop1, mmin))
3911 tem = const0_rtx;
3912 break;
3913
3914 default:
3915 break;
3916 }
3917 if (tem == const0_rtx
3918 || tem == const_true_rtx)
3919 return tem;
3920 }
3921
3922 switch (code)
3923 {
3924 case EQ:
3925 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3926 return const0_rtx;
3927 break;
3928
3929 case NE:
3930 if (trueop1 == const0_rtx && nonzero_address_p (op0))
3931 return const_true_rtx;
3932 break;
3933
3934 case LT:
3935 /* Optimize abs(x) < 0.0. */
3936 if (trueop1 == CONST0_RTX (mode)
3937 && !HONOR_SNANS (mode)
3938 && (!INTEGRAL_MODE_P (mode)
3939 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3940 {
3941 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3942 : trueop0;
3943 if (GET_CODE (tem) == ABS)
3944 {
3945 if (INTEGRAL_MODE_P (mode)
3946 && (issue_strict_overflow_warning
3947 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3948 warning (OPT_Wstrict_overflow,
3949 ("assuming signed overflow does not occur when "
3950 "assuming abs (x) < 0 is false"));
3951 return const0_rtx;
3952 }
3953 }
3954 break;
3955
3956 case GE:
3957 /* Optimize abs(x) >= 0.0. */
3958 if (trueop1 == CONST0_RTX (mode)
3959 && !HONOR_NANS (mode)
3960 && (!INTEGRAL_MODE_P (mode)
3961 || (!flag_wrapv && !flag_trapv && flag_strict_overflow)))
3962 {
3963 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3964 : trueop0;
3965 if (GET_CODE (tem) == ABS)
3966 {
3967 if (INTEGRAL_MODE_P (mode)
3968 && (issue_strict_overflow_warning
3969 (WARN_STRICT_OVERFLOW_CONDITIONAL)))
3970 warning (OPT_Wstrict_overflow,
3971 ("assuming signed overflow does not occur when "
3972 "assuming abs (x) >= 0 is true"));
3973 return const_true_rtx;
3974 }
3975 }
3976 break;
3977
3978 case UNGE:
3979 /* Optimize ! (abs(x) < 0.0). */
3980 if (trueop1 == CONST0_RTX (mode))
3981 {
3982 tem = GET_CODE (trueop0) == FLOAT_EXTEND ? XEXP (trueop0, 0)
3983 : trueop0;
3984 if (GET_CODE (tem) == ABS)
3985 return const_true_rtx;
3986 }
3987 break;
3988
3989 default:
3990 break;
3991 }
3992
3993 return 0;
3994 }
3995
3996 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
3997 as appropriate. */
3998 switch (code)
3999 {
4000 case EQ:
4001 case UNEQ:
4002 return equal ? const_true_rtx : const0_rtx;
4003 case NE:
4004 case LTGT:
4005 return ! equal ? const_true_rtx : const0_rtx;
4006 case LT:
4007 case UNLT:
4008 return op0lt ? const_true_rtx : const0_rtx;
4009 case GT:
4010 case UNGT:
4011 return op1lt ? const_true_rtx : const0_rtx;
4012 case LTU:
4013 return op0ltu ? const_true_rtx : const0_rtx;
4014 case GTU:
4015 return op1ltu ? const_true_rtx : const0_rtx;
4016 case LE:
4017 case UNLE:
4018 return equal || op0lt ? const_true_rtx : const0_rtx;
4019 case GE:
4020 case UNGE:
4021 return equal || op1lt ? const_true_rtx : const0_rtx;
4022 case LEU:
4023 return equal || op0ltu ? const_true_rtx : const0_rtx;
4024 case GEU:
4025 return equal || op1ltu ? const_true_rtx : const0_rtx;
4026 case ORDERED:
4027 return const_true_rtx;
4028 case UNORDERED:
4029 return const0_rtx;
4030 default:
4031 gcc_unreachable ();
4032 }
4033}
4034
4035/* Simplify CODE, an operation with result mode MODE and three operands,
4036 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4037 a constant. Return 0 if no simplifications is possible. */
4038
4039rtx
4040simplify_ternary_operation (enum rtx_code code, enum machine_mode mode,
4041 enum machine_mode op0_mode, rtx op0, rtx op1,
4042 rtx op2)
4043{
4044 unsigned int width = GET_MODE_BITSIZE (mode);
4045
4046 /* VOIDmode means "infinite" precision. */
4047 if (width == 0)
4048 width = HOST_BITS_PER_WIDE_INT;
4049
4050 switch (code)
4051 {
4052 case SIGN_EXTRACT:
4053 case ZERO_EXTRACT:
4054 if (GET_CODE (op0) == CONST_INT
4055 && GET_CODE (op1) == CONST_INT
4056 && GET_CODE (op2) == CONST_INT
4057 && ((unsigned) INTVAL (op1) + (unsigned) INTVAL (op2) <= width)
4058 && width <= (unsigned) HOST_BITS_PER_WIDE_INT)
4059 {
4060 /* Extracting a bit-field from a constant */
4061 HOST_WIDE_INT val = INTVAL (op0);
4062
4063 if (BITS_BIG_ENDIAN)
4064 val >>= (GET_MODE_BITSIZE (op0_mode)
4065 - INTVAL (op2) - INTVAL (op1));
4066 else
4067 val >>= INTVAL (op2);
4068
4069 if (HOST_BITS_PER_WIDE_INT != INTVAL (op1))
4070 {
4071 /* First zero-extend. */
4072 val &= ((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1;
4073 /* If desired, propagate sign bit. */
4074 if (code == SIGN_EXTRACT
4075 && (val & ((HOST_WIDE_INT) 1 << (INTVAL (op1) - 1))))
4076 val |= ~ (((HOST_WIDE_INT) 1 << INTVAL (op1)) - 1);
4077 }
4078
4079 /* Clear the bits that don't belong in our mode,
4080 unless they and our sign bit are all one.
4081 So we get either a reasonable negative value or a reasonable
4082 unsigned value for this mode. */
4083 if (width < HOST_BITS_PER_WIDE_INT
4084 && ((val & ((HOST_WIDE_INT) (-1) << (width - 1)))
4085 != ((HOST_WIDE_INT) (-1) << (width - 1))))
4086 val &= ((HOST_WIDE_INT) 1 << width) - 1;
4087
4088 return gen_int_mode (val, mode);
4089 }
4090 break;
4091
4092 case IF_THEN_ELSE:
4093 if (GET_CODE (op0) == CONST_INT)
4094 return op0 != const0_rtx ? op1 : op2;
4095
4096 /* Convert c ? a : a into "a". */
4097 if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
4098 return op1;
4099
4100 /* Convert a != b ? a : b into "a". */
4101 if (GET_CODE (op0) == NE
4102 && ! side_effects_p (op0)
4103 && ! HONOR_NANS (mode)
4104 && ! HONOR_SIGNED_ZEROS (mode)
4105 && ((rtx_equal_p (XEXP (op0, 0), op1)
4106 && rtx_equal_p (XEXP (op0, 1), op2))
4107 || (rtx_equal_p (XEXP (op0, 0), op2)
4108 && rtx_equal_p (XEXP (op0, 1), op1))))
4109 return op1;
4110
4111 /* Convert a == b ? a : b into "b". */
4112 if (GET_CODE (op0) == EQ
4113 && ! side_effects_p (op0)
4114 && ! HONOR_NANS (mode)
4115 && ! HONOR_SIGNED_ZEROS (mode)
4116 && ((rtx_equal_p (XEXP (op0, 0), op1)
4117 && rtx_equal_p (XEXP (op0, 1), op2))
4118 || (rtx_equal_p (XEXP (op0, 0), op2)
4119 && rtx_equal_p (XEXP (op0, 1), op1))))
4120 return op2;
4121
4122 if (COMPARISON_P (op0) && ! side_effects_p (op0))
4123 {
4124 enum machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
4125 ? GET_MODE (XEXP (op0, 1))
4126 : GET_MODE (XEXP (op0, 0)));
4127 rtx temp;
4128
4129 /* Look for happy constants in op1 and op2. */
4130 if (GET_CODE (op1) == CONST_INT && GET_CODE (op2) == CONST_INT)
4131 {
4132 HOST_WIDE_INT t = INTVAL (op1);
4133 HOST_WIDE_INT f = INTVAL (op2);
4134
4135 if (t == STORE_FLAG_VALUE && f == 0)
4136 code = GET_CODE (op0);
4137 else if (t == 0 && f == STORE_FLAG_VALUE)
4138 {
4139 enum rtx_code tmp;
4140 tmp = reversed_comparison_code (op0, NULL_RTX);
4141 if (tmp == UNKNOWN)
4142 break;
4143 code = tmp;
4144 }
4145 else
4146 break;
4147
4148 return simplify_gen_relational (code, mode, cmp_mode,
4149 XEXP (op0, 0), XEXP (op0, 1));
4150 }
4151
4152 if (cmp_mode == VOIDmode)
4153 cmp_mode = op0_mode;
4154 temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
4155 cmp_mode, XEXP (op0, 0),
4156 XEXP (op0, 1));
4157
4158 /* See if any simplifications were possible. */
4159 if (temp)
4160 {
4161 if (GET_CODE (temp) == CONST_INT)
4162 return temp == const0_rtx ? op2 : op1;
4163 else if (temp)
4164 return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
4165 }
4166 }
4167 break;
4168
4169 case VEC_MERGE:
4170 gcc_assert (GET_MODE (op0) == mode);
4171 gcc_assert (GET_MODE (op1) == mode);
4172 gcc_assert (VECTOR_MODE_P (mode));
4173 op2 = avoid_constant_pool_reference (op2);
4174 if (GET_CODE (op2) == CONST_INT)
4175 {
4176 int elt_size = GET_MODE_SIZE (GET_MODE_INNER (mode));
4177 unsigned n_elts = (GET_MODE_SIZE (mode) / elt_size);
4178 int mask = (1 << n_elts) - 1;
4179
4180 if (!(INTVAL (op2) & mask))
4181 return op1;
4182 if ((INTVAL (op2) & mask) == mask)
4183 return op0;
4184
4185 op0 = avoid_constant_pool_reference (op0);
4186 op1 = avoid_constant_pool_reference (op1);
4187 if (GET_CODE (op0) == CONST_VECTOR
4188 && GET_CODE (op1) == CONST_VECTOR)
4189 {
4190 rtvec v = rtvec_alloc (n_elts);
4191 unsigned int i;
4192
4193 for (i = 0; i < n_elts; i++)
4194 RTVEC_ELT (v, i) = (INTVAL (op2) & (1 << i)
4195 ? CONST_VECTOR_ELT (op0, i)
4196 : CONST_VECTOR_ELT (op1, i));
4197 return gen_rtx_CONST_VECTOR (mode, v);
4198 }
4199 }
4200 break;
4201
4202 default:
4203 gcc_unreachable ();
4204 }
4205
4206 return 0;
4207}
4208
4209/* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
4210 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
4211
4212 Works by unpacking OP into a collection of 8-bit values
4213 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4214 and then repacking them again for OUTERMODE. */
4215
4216static rtx
4217simplify_immed_subreg (enum machine_mode outermode, rtx op,
4218 enum machine_mode innermode, unsigned int byte)
4219{
4220 /* We support up to 512-bit values (for V8DFmode). */
4221 enum {
4222 max_bitsize = 512,
4223 value_bit = 8,
4224 value_mask = (1 << value_bit) - 1
4225 };
4226 unsigned char value[max_bitsize / value_bit];
4227 int value_start;
4228 int i;
4229 int elem;
4230
4231 int num_elem;
4232 rtx * elems;
4233 int elem_bitsize;
4234 rtx result_s;
4235 rtvec result_v = NULL;
4236 enum mode_class outer_class;
4237 enum machine_mode outer_submode;
4238
4239 /* Some ports misuse CCmode. */
4240 if (GET_MODE_CLASS (outermode) == MODE_CC && GET_CODE (op) == CONST_INT)
4241 return op;
4242
4243 /* We have no way to represent a complex constant at the rtl level. */
4244 if (COMPLEX_MODE_P (outermode))
4245 return NULL_RTX;
4246
4247 /* Unpack the value. */
4248
4249 if (GET_CODE (op) == CONST_VECTOR)
4250 {
4251 num_elem = CONST_VECTOR_NUNITS (op);
4252 elems = &CONST_VECTOR_ELT (op, 0);
4253 elem_bitsize = GET_MODE_BITSIZE (GET_MODE_INNER (innermode));
4254 }
4255 else
4256 {
4257 num_elem = 1;
4258 elems = &op;
4259 elem_bitsize = max_bitsize;
4260 }
4261 /* If this asserts, it is too complicated; reducing value_bit may help. */
4262 gcc_assert (BITS_PER_UNIT % value_bit == 0);
4263 /* I don't know how to handle endianness of sub-units. */
4264 gcc_assert (elem_bitsize % BITS_PER_UNIT == 0);
4265
4266 for (elem = 0; elem < num_elem; elem++)
4267 {
4268 unsigned char * vp;
4269 rtx el = elems[elem];
4270
4271 /* Vectors are kept in target memory order. (This is probably
4272 a mistake.) */
4273 {
4274 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4275 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4276 / BITS_PER_UNIT);
4277 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4278 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4279 unsigned bytele = (subword_byte % UNITS_PER_WORD
4280 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4281 vp = value + (bytele * BITS_PER_UNIT) / value_bit;
4282 }
4283
4284 switch (GET_CODE (el))
4285 {
4286 case CONST_INT:
4287 for (i = 0;
4288 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4289 i += value_bit)
4290 *vp++ = INTVAL (el) >> i;
4291 /* CONST_INTs are always logically sign-extended. */
4292 for (; i < elem_bitsize; i += value_bit)
4293 *vp++ = INTVAL (el) < 0 ? -1 : 0;
4294 break;
4295
4296 case CONST_DOUBLE:
4297 if (GET_MODE (el) == VOIDmode)
4298 {
4299 /* If this triggers, someone should have generated a
4300 CONST_INT instead. */
4301 gcc_assert (elem_bitsize > HOST_BITS_PER_WIDE_INT);
4302
4303 for (i = 0; i < HOST_BITS_PER_WIDE_INT; i += value_bit)
4304 *vp++ = CONST_DOUBLE_LOW (el) >> i;
4305 while (i < HOST_BITS_PER_WIDE_INT * 2 && i < elem_bitsize)
4306 {
4307 *vp++
4308 = CONST_DOUBLE_HIGH (el) >> (i - HOST_BITS_PER_WIDE_INT);
4309 i += value_bit;
4310 }
4311 /* It shouldn't matter what's done here, so fill it with
4312 zero. */
4313 for (; i < elem_bitsize; i += value_bit)
4314 *vp++ = 0;
4315 }
4316 else
4317 {
4318 long tmp[max_bitsize / 32];
4319 int bitsize = GET_MODE_BITSIZE (GET_MODE (el));
4320
4321 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el)));
4322 gcc_assert (bitsize <= elem_bitsize);
4323 gcc_assert (bitsize % value_bit == 0);
4324
4325 real_to_target (tmp, CONST_DOUBLE_REAL_VALUE (el),
4326 GET_MODE (el));
4327
4328 /* real_to_target produces its result in words affected by
4329 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4330 and use WORDS_BIG_ENDIAN instead; see the documentation
4331 of SUBREG in rtl.texi. */
4332 for (i = 0; i < bitsize; i += value_bit)
4333 {
4334 int ibase;
4335 if (WORDS_BIG_ENDIAN)
4336 ibase = bitsize - 1 - i;
4337 else
4338 ibase = i;
4339 *vp++ = tmp[ibase / 32] >> i % 32;
4340 }
4341
4342 /* It shouldn't matter what's done here, so fill it with
4343 zero. */
4344 for (; i < elem_bitsize; i += value_bit)
4345 *vp++ = 0;
4346 }
4347 break;
4348
4349 default:
4350 gcc_unreachable ();
4351 }
4352 }
4353
4354 /* Now, pick the right byte to start with. */
4355 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4356 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4357 will already have offset 0. */
4358 if (GET_MODE_SIZE (innermode) >= GET_MODE_SIZE (outermode))
4359 {
4360 unsigned ibyte = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode)
4361 - byte);
4362 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4363 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4364 byte = (subword_byte % UNITS_PER_WORD
4365 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4366 }
4367
4368 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4369 so if it's become negative it will instead be very large.) */
4370 gcc_assert (byte < GET_MODE_SIZE (innermode));
4371
4372 /* Convert from bytes to chunks of size value_bit. */
4373 value_start = byte * (BITS_PER_UNIT / value_bit);
4374
4375 /* Re-pack the value. */
4376
4377 if (VECTOR_MODE_P (outermode))
4378 {
4379 num_elem = GET_MODE_NUNITS (outermode);
4380 result_v = rtvec_alloc (num_elem);
4381 elems = &RTVEC_ELT (result_v, 0);
4382 outer_submode = GET_MODE_INNER (outermode);
4383 }
4384 else
4385 {
4386 num_elem = 1;
4387 elems = &result_s;
4388 outer_submode = outermode;
4389 }
4390
4391 outer_class = GET_MODE_CLASS (outer_submode);
4392 elem_bitsize = GET_MODE_BITSIZE (outer_submode);
4393
4394 gcc_assert (elem_bitsize % value_bit == 0);
4395 gcc_assert (elem_bitsize + value_start * value_bit <= max_bitsize);
4396
4397 for (elem = 0; elem < num_elem; elem++)
4398 {
4399 unsigned char *vp;
4400
4401 /* Vectors are stored in target memory order. (This is probably
4402 a mistake.) */
4403 {
4404 unsigned byte = (elem * elem_bitsize) / BITS_PER_UNIT;
4405 unsigned ibyte = (((num_elem - 1 - elem) * elem_bitsize)
4406 / BITS_PER_UNIT);
4407 unsigned word_byte = WORDS_BIG_ENDIAN ? ibyte : byte;
4408 unsigned subword_byte = BYTES_BIG_ENDIAN ? ibyte : byte;
4409 unsigned bytele = (subword_byte % UNITS_PER_WORD
4410 + (word_byte / UNITS_PER_WORD) * UNITS_PER_WORD);
4411 vp = value + value_start + (bytele * BITS_PER_UNIT) / value_bit;
4412 }
4413
4414 switch (outer_class)
4415 {
4416 case MODE_INT:
4417 case MODE_PARTIAL_INT:
4418 {
4419 unsigned HOST_WIDE_INT hi = 0, lo = 0;
4420
4421 for (i = 0;
4422 i < HOST_BITS_PER_WIDE_INT && i < elem_bitsize;
4423 i += value_bit)
4424 lo |= (HOST_WIDE_INT)(*vp++ & value_mask) << i;
4425 for (; i < elem_bitsize; i += value_bit)
4426 hi |= ((HOST_WIDE_INT)(*vp++ & value_mask)
4427 << (i - HOST_BITS_PER_WIDE_INT));
4428
4429 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4430 know why. */
4431 if (elem_bitsize <= HOST_BITS_PER_WIDE_INT)
4432 elems[elem] = gen_int_mode (lo, outer_submode);
4433 else if (elem_bitsize <= 2 * HOST_BITS_PER_WIDE_INT)
4434 elems[elem] = immed_double_const (lo, hi, outer_submode);
4435 else
4436 return NULL_RTX;
4437 }
4438 break;
4439
4440 case MODE_FLOAT:
4441 case MODE_DECIMAL_FLOAT:
4442 {
4443 REAL_VALUE_TYPE r;
4444 long tmp[max_bitsize / 32];
4445
4446 /* real_from_target wants its input in words affected by
4447 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4448 and use WORDS_BIG_ENDIAN instead; see the documentation
4449 of SUBREG in rtl.texi. */
4450 for (i = 0; i < max_bitsize / 32; i++)
4451 tmp[i] = 0;
4452 for (i = 0; i < elem_bitsize; i += value_bit)
4453 {
4454 int ibase;
4455 if (WORDS_BIG_ENDIAN)
4456 ibase = elem_bitsize - 1 - i;
4457 else
4458 ibase = i;
4459 tmp[ibase / 32] |= (*vp++ & value_mask) << i % 32;
4460 }
4461
4462 real_from_target (&r, tmp, outer_submode);
4463 elems[elem] = CONST_DOUBLE_FROM_REAL_VALUE (r, outer_submode);
4464 }
4465 break;
4466
4467 default:
4468 gcc_unreachable ();
4469 }
4470 }
4471 if (VECTOR_MODE_P (outermode))
4472 return gen_rtx_CONST_VECTOR (outermode, result_v);
4473 else
4474 return result_s;
4475}
4476
4477/* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
4478 Return 0 if no simplifications are possible. */
4479rtx
4480simplify_subreg (enum machine_mode outermode, rtx op,
4481 enum machine_mode innermode, unsigned int byte)
4482{
4483 /* Little bit of sanity checking. */
4484 gcc_assert (innermode != VOIDmode);
4485 gcc_assert (outermode != VOIDmode);
4486 gcc_assert (innermode != BLKmode);
4487 gcc_assert (outermode != BLKmode);
4488
4489 gcc_assert (GET_MODE (op) == innermode
4490 || GET_MODE (op) == VOIDmode);
4491
4492 gcc_assert ((byte % GET_MODE_SIZE (outermode)) == 0);
4493 gcc_assert (byte < GET_MODE_SIZE (innermode));
4494
4495 if (outermode == innermode && !byte)
4496 return op;
4497
4498 if (GET_CODE (op) == CONST_INT
4499 || GET_CODE (op) == CONST_DOUBLE
4500 || GET_CODE (op) == CONST_VECTOR)
4501 return simplify_immed_subreg (outermode, op, innermode, byte);
4502
4503 /* Changing mode twice with SUBREG => just change it once,
4504 or not at all if changing back op starting mode. */
4505 if (GET_CODE (op) == SUBREG)
4506 {
4507 enum machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
4508 int final_offset = byte + SUBREG_BYTE (op);
4509 rtx newx;
4510
4511 if (outermode == innermostmode
4512 && byte == 0 && SUBREG_BYTE (op) == 0)
4513 return SUBREG_REG (op);
4514
4515 /* The SUBREG_BYTE represents offset, as if the value were stored
4516 in memory. Irritating exception is paradoxical subreg, where
4517 we define SUBREG_BYTE to be 0. On big endian machines, this
4518 value should be negative. For a moment, undo this exception. */
4519 if (byte == 0 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4520 {
4521 int difference = (GET_MODE_SIZE (innermode) - GET_MODE_SIZE (outermode));
4522 if (WORDS_BIG_ENDIAN)
4523 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4524 if (BYTES_BIG_ENDIAN)
4525 final_offset += difference % UNITS_PER_WORD;
4526 }
4527 if (SUBREG_BYTE (op) == 0
4528 && GET_MODE_SIZE (innermostmode) < GET_MODE_SIZE (innermode))
4529 {
4530 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (innermode));
4531 if (WORDS_BIG_ENDIAN)
4532 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4533 if (BYTES_BIG_ENDIAN)
4534 final_offset += difference % UNITS_PER_WORD;
4535 }
4536
4537 /* See whether resulting subreg will be paradoxical. */
4538 if (GET_MODE_SIZE (innermostmode) > GET_MODE_SIZE (outermode))
4539 {
4540 /* In nonparadoxical subregs we can't handle negative offsets. */
4541 if (final_offset < 0)
4542 return NULL_RTX;
4543 /* Bail out in case resulting subreg would be incorrect. */
4544 if (final_offset % GET_MODE_SIZE (outermode)
4545 || (unsigned) final_offset >= GET_MODE_SIZE (innermostmode))
4546 return NULL_RTX;
4547 }
4548 else
4549 {
4550 int offset = 0;
4551 int difference = (GET_MODE_SIZE (innermostmode) - GET_MODE_SIZE (outermode));
4552
4553 /* In paradoxical subreg, see if we are still looking on lower part.
4554 If so, our SUBREG_BYTE will be 0. */
4555 if (WORDS_BIG_ENDIAN)
4556 offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4557 if (BYTES_BIG_ENDIAN)
4558 offset += difference % UNITS_PER_WORD;
4559 if (offset == final_offset)
4560 final_offset = 0;
4561 else
4562 return NULL_RTX;
4563 }
4564
4565 /* Recurse for further possible simplifications. */
4566 newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
4567 final_offset);
4568 if (newx)
4569 return newx;
4570 if (validate_subreg (outermode, innermostmode,
4571 SUBREG_REG (op), final_offset))
4572 return gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
4573 return NULL_RTX;
4574 }
4575
4576 /* Merge implicit and explicit truncations. */
4577
4578 if (GET_CODE (op) == TRUNCATE
4579 && GET_MODE_SIZE (outermode) < GET_MODE_SIZE (innermode)
4580 && subreg_lowpart_offset (outermode, innermode) == byte)
4581 return simplify_gen_unary (TRUNCATE, outermode, XEXP (op, 0),
4582 GET_MODE (XEXP (op, 0)));
4583
4584 /* SUBREG of a hard register => just change the register number
4585 and/or mode. If the hard register is not valid in that mode,
4586 suppress this simplification. If the hard register is the stack,
4587 frame, or argument pointer, leave this as a SUBREG. */
4588
4589 if (REG_P (op)
4590 && REGNO (op) < FIRST_PSEUDO_REGISTER
4591#ifdef CANNOT_CHANGE_MODE_CLASS
4592 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op), innermode, outermode)
4593 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_INT
4594 && GET_MODE_CLASS (innermode) != MODE_COMPLEX_FLOAT)
4595#endif
4596 && ((reload_completed && !frame_pointer_needed)
4597 || (REGNO (op) != FRAME_POINTER_REGNUM
4598#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
4599 && REGNO (op) != HARD_FRAME_POINTER_REGNUM
4600#endif
4601 ))
4602#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4603 && REGNO (op) != ARG_POINTER_REGNUM
4604#endif
4605 && REGNO (op) != STACK_POINTER_REGNUM
4606 && subreg_offset_representable_p (REGNO (op), innermode,
4607 byte, outermode))
4608 {
4609 unsigned int regno = REGNO (op);
4610 unsigned int final_regno
4611 = regno + subreg_regno_offset (regno, innermode, byte, outermode);
4612
4613 /* ??? We do allow it if the current REG is not valid for
4614 its mode. This is a kludge to work around how float/complex
4615 arguments are passed on 32-bit SPARC and should be fixed. */
4616 if (HARD_REGNO_MODE_OK (final_regno, outermode)
4617 || ! HARD_REGNO_MODE_OK (regno, innermode))
4618 {
4619 rtx x;
4620 int final_offset = byte;
4621
4622 /* Adjust offset for paradoxical subregs. */
4623 if (byte == 0
4624 && GET_MODE_SIZE (innermode) < GET_MODE_SIZE (outermode))
4625 {
4626 int difference = (GET_MODE_SIZE (innermode)
4627 - GET_MODE_SIZE (outermode));
4628 if (WORDS_BIG_ENDIAN)
4629 final_offset += (difference / UNITS_PER_WORD) * UNITS_PER_WORD;
4630 if (BYTES_BIG_ENDIAN)
4631 final_offset += difference % UNITS_PER_WORD;
4632 }
4633
4634 x = gen_rtx_REG_offset (op, outermode, final_regno, final_offset);
4635
4636 /* Propagate original regno. We don't have any way to specify
4637 the offset inside original regno, so do so only for lowpart.
4638 The information is used only by alias analysis that can not
4639 grog partial register anyway. */
4640
4641 if (subreg_lowpart_offset (outermode, innermode) == byte)
4642 ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
4643 return x;
4644 }
4645 }
4646
4647 /* If we have a SUBREG of a register that we are replacing and we are
4648 replacing it with a MEM, make a new MEM and try replacing the
4649 SUBREG with it. Don't do this if the MEM has a mode-dependent address
4650 or if we would be widening it. */
4651
4652 if (MEM_P (op)
4653 && ! mode_dependent_address_p (XEXP (op, 0))
4654 /* Allow splitting of volatile memory references in case we don't
4655 have instruction to move the whole thing. */
4656 && (! MEM_VOLATILE_P (op)
4657 || ! have_insn_for (SET, innermode))
4658 && GET_MODE_SIZE (outermode) <= GET_MODE_SIZE (GET_MODE (op)))
4659 return adjust_address_nv (op, outermode, byte);
4660
4661 /* Handle complex values represented as CONCAT
4662 of real and imaginary part. */
4663 if (GET_CODE (op) == CONCAT)
4664 {
4665 unsigned int inner_size, final_offset;
4666 rtx part, res;
4667
4668 inner_size = GET_MODE_UNIT_SIZE (innermode);
4669 part = byte < inner_size ? XEXP (op, 0) : XEXP (op, 1);
4670 final_offset = byte % inner_size;
4671 if (final_offset + GET_MODE_SIZE (outermode) > inner_size)
4672 return NULL_RTX;
4673
4674 res = simplify_subreg (outermode, part, GET_MODE (part), final_offset);
4675 if (res)
4676 return res;
4677 if (validate_subreg (outermode, GET_MODE (part), part, final_offset))
4678 return gen_rtx_SUBREG (outermode, part, final_offset);
4679 return NULL_RTX;
4680 }
4681
4682 /* Optimize SUBREG truncations of zero and sign extended values. */
4683 if ((GET_CODE (op) == ZERO_EXTEND
4684 || GET_CODE (op) == SIGN_EXTEND)
4685 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode))
4686 {
4687 unsigned int bitpos = subreg_lsb_1 (outermode, innermode, byte);
4688
4689 /* If we're requesting the lowpart of a zero or sign extension,
4690 there are three possibilities. If the outermode is the same
4691 as the origmode, we can omit both the extension and the subreg.
4692 If the outermode is not larger than the origmode, we can apply
4693 the truncation without the extension. Finally, if the outermode
4694 is larger than the origmode, but both are integer modes, we
4695 can just extend to the appropriate mode. */
4696 if (bitpos == 0)
4697 {
4698 enum machine_mode origmode = GET_MODE (XEXP (op, 0));
4699 if (outermode == origmode)
4700 return XEXP (op, 0);
4701 if (GET_MODE_BITSIZE (outermode) <= GET_MODE_BITSIZE (origmode))
4702 return simplify_gen_subreg (outermode, XEXP (op, 0), origmode,
4703 subreg_lowpart_offset (outermode,
4704 origmode));
4705 if (SCALAR_INT_MODE_P (outermode))
4706 return simplify_gen_unary (GET_CODE (op), outermode,
4707 XEXP (op, 0), origmode);
4708 }
4709
4710 /* A SUBREG resulting from a zero extension may fold to zero if
4711 it extracts higher bits that the ZERO_EXTEND's source bits. */
4712 if (GET_CODE (op) == ZERO_EXTEND
4713 && bitpos >= GET_MODE_BITSIZE (GET_MODE (XEXP (op, 0))))
4714 return CONST0_RTX (outermode);
4715 }
4716
4717 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
4718 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
4719 the outer subreg is effectively a truncation to the original mode. */
4720 if ((GET_CODE (op) == LSHIFTRT
4721 || GET_CODE (op) == ASHIFTRT)
4722 && SCALAR_INT_MODE_P (outermode)
4723 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
4724 to avoid the possibility that an outer LSHIFTRT shifts by more
4725 than the sign extension's sign_bit_copies and introduces zeros
4726 into the high bits of the result. */
4727 && (2 * GET_MODE_BITSIZE (outermode)) <= GET_MODE_BITSIZE (innermode)
4728 && GET_CODE (XEXP (op, 1)) == CONST_INT
4729 && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
4730 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4731 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4732 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4733 return simplify_gen_binary (ASHIFTRT, outermode,
4734 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4735
4736 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
4737 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
4738 the outer subreg is effectively a truncation to the original mode. */
4739 if ((GET_CODE (op) == LSHIFTRT
4740 || GET_CODE (op) == ASHIFTRT)
4741 && SCALAR_INT_MODE_P (outermode)
4742 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4743 && GET_CODE (XEXP (op, 1)) == CONST_INT
4744 && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4745 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4746 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4747 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4748 return simplify_gen_binary (LSHIFTRT, outermode,
4749 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4750
4751 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
4752 to (ashift:QI (x:QI) C), where C is a suitable small constant and
4753 the outer subreg is effectively a truncation to the original mode. */
4754 if (GET_CODE (op) == ASHIFT
4755 && SCALAR_INT_MODE_P (outermode)
4756 && GET_MODE_BITSIZE (outermode) < GET_MODE_BITSIZE (innermode)
4757 && GET_CODE (XEXP (op, 1)) == CONST_INT
4758 && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
4759 || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
4760 && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
4761 && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (outermode)
4762 && subreg_lsb_1 (outermode, innermode, byte) == 0)
4763 return simplify_gen_binary (ASHIFT, outermode,
4764 XEXP (XEXP (op, 0), 0), XEXP (op, 1));
4765
4766 return NULL_RTX;
4767}
4768
4769/* Make a SUBREG operation or equivalent if it folds. */
4770
4771rtx
4772simplify_gen_subreg (enum machine_mode outermode, rtx op,
4773 enum machine_mode innermode, unsigned int byte)
4774{
4775 rtx newx;
4776
4777 newx = simplify_subreg (outermode, op, innermode, byte);
4778 if (newx)
4779 return newx;
4780
4781 if (GET_CODE (op) == SUBREG
4782 || GET_CODE (op) == CONCAT
4783 || GET_MODE (op) == VOIDmode)
4784 return NULL_RTX;
4785
4786 if (validate_subreg (outermode, innermode, op, byte))
4787 return gen_rtx_SUBREG (outermode, op, byte);
4788
4789 return NULL_RTX;
4790}
4791
4792/* Simplify X, an rtx expression.
4793
4794 Return the simplified expression or NULL if no simplifications
4795 were possible.
4796
4797 This is the preferred entry point into the simplification routines;
4798 however, we still allow passes to call the more specific routines.
4799
4800 Right now GCC has three (yes, three) major bodies of RTL simplification
4801 code that need to be unified.
4802
4803 1. fold_rtx in cse.c. This code uses various CSE specific
4804 information to aid in RTL simplification.
4805
4806 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
4807 it uses combine specific information to aid in RTL
4808 simplification.
4809
4810 3. The routines in this file.
4811
4812
4813 Long term we want to only have one body of simplification code; to
4814 get to that state I recommend the following steps:
4815
4816 1. Pour over fold_rtx & simplify_rtx and move any simplifications
4817 which are not pass dependent state into these routines.
4818
4819 2. As code is moved by #1, change fold_rtx & simplify_rtx to
4820 use this routine whenever possible.
4821
4822 3. Allow for pass dependent state to be provided to these
4823 routines and add simplifications based on the pass dependent
4824 state. Remove code from cse.c & combine.c that becomes
4825 redundant/dead.
4826
4827 It will take time, but ultimately the compiler will be easier to
4828 maintain and improve. It's totally silly that when we add a
4829 simplification that it needs to be added to 4 places (3 for RTL
4830 simplification and 1 for tree simplification. */
4831
4832rtx
4833simplify_rtx (rtx x)
4834{
4835 enum rtx_code code = GET_CODE (x);
4836 enum machine_mode mode = GET_MODE (x);
4837
4838 switch (GET_RTX_CLASS (code))
4839 {
4840 case RTX_UNARY:
4841 return simplify_unary_operation (code, mode,
4842 XEXP (x, 0), GET_MODE (XEXP (x, 0)));
4843 case RTX_COMM_ARITH:
4844 if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
4845 return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
4846
4847 /* Fall through.... */
4848
4849 case RTX_BIN_ARITH:
4850 return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
4851
4852 case RTX_TERNARY:
4853 case RTX_BITFIELD_OPS:
4854 return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
4855 XEXP (x, 0), XEXP (x, 1),
4856 XEXP (x, 2));
4857
4858 case RTX_COMPARE:
4859 case RTX_COMM_COMPARE:
4860 return simplify_relational_operation (code, mode,
4861 ((GET_MODE (XEXP (x, 0))
4862 != VOIDmode)
4863 ? GET_MODE (XEXP (x, 0))
4864 : GET_MODE (XEXP (x, 1))),
4865 XEXP (x, 0),
4866 XEXP (x, 1));
4867
4868 case RTX_EXTRA:
4869 if (code == SUBREG)
4870 return simplify_gen_subreg (mode, SUBREG_REG (x),
4871 GET_MODE (SUBREG_REG (x)),
4872 SUBREG_BYTE (x));
4873 break;
4874
4875 case RTX_OBJ:
4876 if (code == LO_SUM)
4877 {
4878 /* Convert (lo_sum (high FOO) FOO) to FOO. */
4879 if (GET_CODE (XEXP (x, 0)) == HIGH
4880 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
4881 return XEXP (x, 1);
4882 }
4883 break;
4884
4885 default:
4886 break;
4887 }
4888 return NULL;
4889}
4890