optabs.c revision 302408
1/* Expand the basic unary and binary arithmetic operations, for GNU compiler. 2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 4 Free Software Foundation, Inc. 5 6This file is part of GCC. 7 8GCC is free software; you can redistribute it and/or modify it under 9the terms of the GNU General Public License as published by the Free 10Software Foundation; either version 2, or (at your option) any later 11version. 12 13GCC is distributed in the hope that it will be useful, but WITHOUT ANY 14WARRANTY; without even the implied warranty of MERCHANTABILITY or 15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16for more details. 17 18You should have received a copy of the GNU General Public License 19along with GCC; see the file COPYING. If not, write to the Free 20Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 2102110-1301, USA. */ 22 23 24#include "config.h" 25#include "system.h" 26#include "coretypes.h" 27#include "tm.h" 28#include "toplev.h" 29 30/* Include insn-config.h before expr.h so that HAVE_conditional_move 31 is properly defined. */ 32#include "insn-config.h" 33#include "rtl.h" 34#include "tree.h" 35#include "tm_p.h" 36#include "flags.h" 37#include "function.h" 38#include "except.h" 39#include "expr.h" 40#include "optabs.h" 41#include "libfuncs.h" 42#include "recog.h" 43#include "reload.h" 44#include "ggc.h" 45#include "real.h" 46#include "basic-block.h" 47#include "target.h" 48 49/* Each optab contains info on how this target machine 50 can perform a particular operation 51 for all sizes and kinds of operands. 52 53 The operation to be performed is often specified 54 by passing one of these optabs as an argument. 55 56 See expr.h for documentation of these optabs. */ 57 58optab optab_table[OTI_MAX]; 59 60rtx libfunc_table[LTI_MAX]; 61 62/* Tables of patterns for converting one mode to another. */ 63convert_optab convert_optab_table[COI_MAX]; 64 65/* Contains the optab used for each rtx code. */ 66optab code_to_optab[NUM_RTX_CODE + 1]; 67 68/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) 69 gives the gen_function to make a branch to test that condition. */ 70 71rtxfun bcc_gen_fctn[NUM_RTX_CODE]; 72 73/* Indexed by the rtx-code for a conditional (eg. EQ, LT,...) 74 gives the insn code to make a store-condition insn 75 to test that condition. */ 76 77enum insn_code setcc_gen_code[NUM_RTX_CODE]; 78 79#ifdef HAVE_conditional_move 80/* Indexed by the machine mode, gives the insn code to make a conditional 81 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and 82 setcc_gen_code to cut down on the number of named patterns. Consider a day 83 when a lot more rtx codes are conditional (eg: for the ARM). */ 84 85enum insn_code movcc_gen_code[NUM_MACHINE_MODES]; 86#endif 87 88/* Indexed by the machine mode, gives the insn code for vector conditional 89 operation. */ 90 91enum insn_code vcond_gen_code[NUM_MACHINE_MODES]; 92enum insn_code vcondu_gen_code[NUM_MACHINE_MODES]; 93 94/* The insn generating function can not take an rtx_code argument. 95 TRAP_RTX is used as an rtx argument. Its code is replaced with 96 the code to be used in the trap insn and all other fields are ignored. */ 97static GTY(()) rtx trap_rtx; 98 99static int add_equal_note (rtx, rtx, enum rtx_code, rtx, rtx); 100static rtx widen_operand (rtx, enum machine_mode, enum machine_mode, int, 101 int); 102static void prepare_cmp_insn (rtx *, rtx *, enum rtx_code *, rtx, 103 enum machine_mode *, int *, 104 enum can_compare_purpose); 105static enum insn_code can_fix_p (enum machine_mode, enum machine_mode, int, 106 int *); 107static enum insn_code can_float_p (enum machine_mode, enum machine_mode, int); 108static optab new_optab (void); 109static convert_optab new_convert_optab (void); 110static inline optab init_optab (enum rtx_code); 111static inline optab init_optabv (enum rtx_code); 112static inline convert_optab init_convert_optab (enum rtx_code); 113static void init_libfuncs (optab, int, int, const char *, int); 114static void init_integral_libfuncs (optab, const char *, int); 115static void init_floating_libfuncs (optab, const char *, int); 116static void init_interclass_conv_libfuncs (convert_optab, const char *, 117 enum mode_class, enum mode_class); 118static void init_intraclass_conv_libfuncs (convert_optab, const char *, 119 enum mode_class, bool); 120static void emit_cmp_and_jump_insn_1 (rtx, rtx, enum machine_mode, 121 enum rtx_code, int, rtx); 122static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *, 123 enum machine_mode *, int *); 124static rtx widen_clz (enum machine_mode, rtx, rtx); 125static rtx expand_parity (enum machine_mode, rtx, rtx); 126static enum rtx_code get_rtx_code (enum tree_code, bool); 127static rtx vector_compare_rtx (tree, bool, enum insn_code); 128 129#ifndef HAVE_conditional_trap 130#define HAVE_conditional_trap 0 131#define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX) 132#endif 133 134/* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to 135 the result of operation CODE applied to OP0 (and OP1 if it is a binary 136 operation). 137 138 If the last insn does not set TARGET, don't do anything, but return 1. 139 140 If a previous insn sets TARGET and TARGET is one of OP0 or OP1, 141 don't add the REG_EQUAL note but return 0. Our caller can then try 142 again, ensuring that TARGET is not one of the operands. */ 143 144static int 145add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1) 146{ 147 rtx last_insn, insn, set; 148 rtx note; 149 150 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns)); 151 152 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH 153 && GET_RTX_CLASS (code) != RTX_BIN_ARITH 154 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE 155 && GET_RTX_CLASS (code) != RTX_COMPARE 156 && GET_RTX_CLASS (code) != RTX_UNARY) 157 return 1; 158 159 if (GET_CODE (target) == ZERO_EXTRACT) 160 return 1; 161 162 for (last_insn = insns; 163 NEXT_INSN (last_insn) != NULL_RTX; 164 last_insn = NEXT_INSN (last_insn)) 165 ; 166 167 set = single_set (last_insn); 168 if (set == NULL_RTX) 169 return 1; 170 171 if (! rtx_equal_p (SET_DEST (set), target) 172 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */ 173 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART 174 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target))) 175 return 1; 176 177 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET 178 besides the last insn. */ 179 if (reg_overlap_mentioned_p (target, op0) 180 || (op1 && reg_overlap_mentioned_p (target, op1))) 181 { 182 insn = PREV_INSN (last_insn); 183 while (insn != NULL_RTX) 184 { 185 if (reg_set_p (target, insn)) 186 return 0; 187 188 insn = PREV_INSN (insn); 189 } 190 } 191 192 if (GET_RTX_CLASS (code) == RTX_UNARY) 193 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0)); 194 else 195 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1)); 196 197 set_unique_reg_note (last_insn, REG_EQUAL, note); 198 199 return 1; 200} 201 202/* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP 203 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need 204 not actually do a sign-extend or zero-extend, but can leave the 205 higher-order bits of the result rtx undefined, for example, in the case 206 of logical operations, but not right shifts. */ 207 208static rtx 209widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode, 210 int unsignedp, int no_extend) 211{ 212 rtx result; 213 214 /* If we don't have to extend and this is a constant, return it. */ 215 if (no_extend && GET_MODE (op) == VOIDmode) 216 return op; 217 218 /* If we must extend do so. If OP is a SUBREG for a promoted object, also 219 extend since it will be more efficient to do so unless the signedness of 220 a promoted object differs from our extension. */ 221 if (! no_extend 222 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op) 223 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp)) 224 return convert_modes (mode, oldmode, op, unsignedp); 225 226 /* If MODE is no wider than a single word, we return a paradoxical 227 SUBREG. */ 228 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 229 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0); 230 231 /* Otherwise, get an object of MODE, clobber it, and set the low-order 232 part to OP. */ 233 234 result = gen_reg_rtx (mode); 235 emit_insn (gen_rtx_CLOBBER (VOIDmode, result)); 236 emit_move_insn (gen_lowpart (GET_MODE (op), result), op); 237 return result; 238} 239 240/* Return the optab used for computing the operation given by 241 the tree code, CODE. This function is not always usable (for 242 example, it cannot give complete results for multiplication 243 or division) but probably ought to be relied on more widely 244 throughout the expander. */ 245optab 246optab_for_tree_code (enum tree_code code, tree type) 247{ 248 bool trapv; 249 switch (code) 250 { 251 case BIT_AND_EXPR: 252 return and_optab; 253 254 case BIT_IOR_EXPR: 255 return ior_optab; 256 257 case BIT_NOT_EXPR: 258 return one_cmpl_optab; 259 260 case BIT_XOR_EXPR: 261 return xor_optab; 262 263 case TRUNC_MOD_EXPR: 264 case CEIL_MOD_EXPR: 265 case FLOOR_MOD_EXPR: 266 case ROUND_MOD_EXPR: 267 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab; 268 269 case RDIV_EXPR: 270 case TRUNC_DIV_EXPR: 271 case CEIL_DIV_EXPR: 272 case FLOOR_DIV_EXPR: 273 case ROUND_DIV_EXPR: 274 case EXACT_DIV_EXPR: 275 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab; 276 277 case LSHIFT_EXPR: 278 return ashl_optab; 279 280 case RSHIFT_EXPR: 281 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab; 282 283 case LROTATE_EXPR: 284 return rotl_optab; 285 286 case RROTATE_EXPR: 287 return rotr_optab; 288 289 case MAX_EXPR: 290 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab; 291 292 case MIN_EXPR: 293 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab; 294 295 case REALIGN_LOAD_EXPR: 296 return vec_realign_load_optab; 297 298 case WIDEN_SUM_EXPR: 299 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab; 300 301 case DOT_PROD_EXPR: 302 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab; 303 304 case REDUC_MAX_EXPR: 305 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab; 306 307 case REDUC_MIN_EXPR: 308 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab; 309 310 case REDUC_PLUS_EXPR: 311 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab; 312 313 case VEC_LSHIFT_EXPR: 314 return vec_shl_optab; 315 316 case VEC_RSHIFT_EXPR: 317 return vec_shr_optab; 318 319 default: 320 break; 321 } 322 323 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type); 324 switch (code) 325 { 326 case PLUS_EXPR: 327 return trapv ? addv_optab : add_optab; 328 329 case MINUS_EXPR: 330 return trapv ? subv_optab : sub_optab; 331 332 case MULT_EXPR: 333 return trapv ? smulv_optab : smul_optab; 334 335 case NEGATE_EXPR: 336 return trapv ? negv_optab : neg_optab; 337 338 case ABS_EXPR: 339 return trapv ? absv_optab : abs_optab; 340 341 default: 342 return NULL; 343 } 344} 345 346 347/* Expand vector widening operations. 348 349 There are two different classes of operations handled here: 350 1) Operations whose result is wider than all the arguments to the operation. 351 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR 352 In this case OP0 and optionally OP1 would be initialized, 353 but WIDE_OP wouldn't (not relevant for this case). 354 2) Operations whose result is of the same size as the last argument to the 355 operation, but wider than all the other arguments to the operation. 356 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR. 357 In the case WIDE_OP, OP0 and optionally OP1 would be initialized. 358 359 E.g, when called to expand the following operations, this is how 360 the arguments will be initialized: 361 nops OP0 OP1 WIDE_OP 362 widening-sum 2 oprnd0 - oprnd1 363 widening-dot-product 3 oprnd0 oprnd1 oprnd2 364 widening-mult 2 oprnd0 oprnd1 - 365 type-promotion (vec-unpack) 1 oprnd0 - - */ 366 367rtx 368expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target, 369 int unsignedp) 370{ 371 tree oprnd0, oprnd1, oprnd2; 372 enum machine_mode wmode = 0, tmode0, tmode1 = 0; 373 optab widen_pattern_optab; 374 int icode; 375 enum machine_mode xmode0, xmode1 = 0, wxmode = 0; 376 rtx temp; 377 rtx pat; 378 rtx xop0, xop1, wxop; 379 int nops = TREE_CODE_LENGTH (TREE_CODE (exp)); 380 381 oprnd0 = TREE_OPERAND (exp, 0); 382 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0)); 383 widen_pattern_optab = 384 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0)); 385 icode = (int) widen_pattern_optab->handlers[(int) tmode0].insn_code; 386 gcc_assert (icode != CODE_FOR_nothing); 387 xmode0 = insn_data[icode].operand[1].mode; 388 389 if (nops >= 2) 390 { 391 oprnd1 = TREE_OPERAND (exp, 1); 392 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1)); 393 xmode1 = insn_data[icode].operand[2].mode; 394 } 395 396 /* The last operand is of a wider mode than the rest of the operands. */ 397 if (nops == 2) 398 { 399 wmode = tmode1; 400 wxmode = xmode1; 401 } 402 else if (nops == 3) 403 { 404 gcc_assert (tmode1 == tmode0); 405 gcc_assert (op1); 406 oprnd2 = TREE_OPERAND (exp, 2); 407 wmode = TYPE_MODE (TREE_TYPE (oprnd2)); 408 wxmode = insn_data[icode].operand[3].mode; 409 } 410 411 if (!wide_op) 412 wmode = wxmode = insn_data[icode].operand[0].mode; 413 414 if (!target 415 || ! (*insn_data[icode].operand[0].predicate) (target, wmode)) 416 temp = gen_reg_rtx (wmode); 417 else 418 temp = target; 419 420 xop0 = op0; 421 xop1 = op1; 422 wxop = wide_op; 423 424 /* In case the insn wants input operands in modes different from 425 those of the actual operands, convert the operands. It would 426 seem that we don't need to convert CONST_INTs, but we do, so 427 that they're properly zero-extended, sign-extended or truncated 428 for their mode. */ 429 430 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode) 431 xop0 = convert_modes (xmode0, 432 GET_MODE (op0) != VOIDmode 433 ? GET_MODE (op0) 434 : tmode0, 435 xop0, unsignedp); 436 437 if (op1) 438 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode) 439 xop1 = convert_modes (xmode1, 440 GET_MODE (op1) != VOIDmode 441 ? GET_MODE (op1) 442 : tmode1, 443 xop1, unsignedp); 444 445 if (wide_op) 446 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode) 447 wxop = convert_modes (wxmode, 448 GET_MODE (wide_op) != VOIDmode 449 ? GET_MODE (wide_op) 450 : wmode, 451 wxop, unsignedp); 452 453 /* Now, if insn's predicates don't allow our operands, put them into 454 pseudo regs. */ 455 456 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0) 457 && xmode0 != VOIDmode) 458 xop0 = copy_to_mode_reg (xmode0, xop0); 459 460 if (op1) 461 { 462 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1) 463 && xmode1 != VOIDmode) 464 xop1 = copy_to_mode_reg (xmode1, xop1); 465 466 if (wide_op) 467 { 468 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode) 469 && wxmode != VOIDmode) 470 wxop = copy_to_mode_reg (wxmode, wxop); 471 472 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop); 473 } 474 else 475 pat = GEN_FCN (icode) (temp, xop0, xop1); 476 } 477 else 478 { 479 if (wide_op) 480 { 481 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode) 482 && wxmode != VOIDmode) 483 wxop = copy_to_mode_reg (wxmode, wxop); 484 485 pat = GEN_FCN (icode) (temp, xop0, wxop); 486 } 487 else 488 pat = GEN_FCN (icode) (temp, xop0); 489 } 490 491 emit_insn (pat); 492 return temp; 493} 494 495/* Generate code to perform an operation specified by TERNARY_OPTAB 496 on operands OP0, OP1 and OP2, with result having machine-mode MODE. 497 498 UNSIGNEDP is for the case where we have to widen the operands 499 to perform the operation. It says to use zero-extension. 500 501 If TARGET is nonzero, the value 502 is generated there, if it is convenient to do so. 503 In all cases an rtx is returned for the locus of the value; 504 this may or may not be TARGET. */ 505 506rtx 507expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0, 508 rtx op1, rtx op2, rtx target, int unsignedp) 509{ 510 int icode = (int) ternary_optab->handlers[(int) mode].insn_code; 511 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 512 enum machine_mode mode1 = insn_data[icode].operand[2].mode; 513 enum machine_mode mode2 = insn_data[icode].operand[3].mode; 514 rtx temp; 515 rtx pat; 516 rtx xop0 = op0, xop1 = op1, xop2 = op2; 517 518 gcc_assert (ternary_optab->handlers[(int) mode].insn_code 519 != CODE_FOR_nothing); 520 521 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 522 temp = gen_reg_rtx (mode); 523 else 524 temp = target; 525 526 /* In case the insn wants input operands in modes different from 527 those of the actual operands, convert the operands. It would 528 seem that we don't need to convert CONST_INTs, but we do, so 529 that they're properly zero-extended, sign-extended or truncated 530 for their mode. */ 531 532 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) 533 xop0 = convert_modes (mode0, 534 GET_MODE (op0) != VOIDmode 535 ? GET_MODE (op0) 536 : mode, 537 xop0, unsignedp); 538 539 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) 540 xop1 = convert_modes (mode1, 541 GET_MODE (op1) != VOIDmode 542 ? GET_MODE (op1) 543 : mode, 544 xop1, unsignedp); 545 546 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode) 547 xop2 = convert_modes (mode2, 548 GET_MODE (op2) != VOIDmode 549 ? GET_MODE (op2) 550 : mode, 551 xop2, unsignedp); 552 553 /* Now, if insn's predicates don't allow our operands, put them into 554 pseudo regs. */ 555 556 if (!insn_data[icode].operand[1].predicate (xop0, mode0) 557 && mode0 != VOIDmode) 558 xop0 = copy_to_mode_reg (mode0, xop0); 559 560 if (!insn_data[icode].operand[2].predicate (xop1, mode1) 561 && mode1 != VOIDmode) 562 xop1 = copy_to_mode_reg (mode1, xop1); 563 564 if (!insn_data[icode].operand[3].predicate (xop2, mode2) 565 && mode2 != VOIDmode) 566 xop2 = copy_to_mode_reg (mode2, xop2); 567 568 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2); 569 570 emit_insn (pat); 571 return temp; 572} 573 574 575/* Like expand_binop, but return a constant rtx if the result can be 576 calculated at compile time. The arguments and return value are 577 otherwise the same as for expand_binop. */ 578 579static rtx 580simplify_expand_binop (enum machine_mode mode, optab binoptab, 581 rtx op0, rtx op1, rtx target, int unsignedp, 582 enum optab_methods methods) 583{ 584 if (CONSTANT_P (op0) && CONSTANT_P (op1)) 585 { 586 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1); 587 588 if (x) 589 return x; 590 } 591 592 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods); 593} 594 595/* Like simplify_expand_binop, but always put the result in TARGET. 596 Return true if the expansion succeeded. */ 597 598bool 599force_expand_binop (enum machine_mode mode, optab binoptab, 600 rtx op0, rtx op1, rtx target, int unsignedp, 601 enum optab_methods methods) 602{ 603 rtx x = simplify_expand_binop (mode, binoptab, op0, op1, 604 target, unsignedp, methods); 605 if (x == 0) 606 return false; 607 if (x != target) 608 emit_move_insn (target, x); 609 return true; 610} 611 612/* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */ 613 614rtx 615expand_vec_shift_expr (tree vec_shift_expr, rtx target) 616{ 617 enum insn_code icode; 618 rtx rtx_op1, rtx_op2; 619 enum machine_mode mode1; 620 enum machine_mode mode2; 621 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr)); 622 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0); 623 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1); 624 optab shift_optab; 625 rtx pat; 626 627 switch (TREE_CODE (vec_shift_expr)) 628 { 629 case VEC_RSHIFT_EXPR: 630 shift_optab = vec_shr_optab; 631 break; 632 case VEC_LSHIFT_EXPR: 633 shift_optab = vec_shl_optab; 634 break; 635 default: 636 gcc_unreachable (); 637 } 638 639 icode = (int) shift_optab->handlers[(int) mode].insn_code; 640 gcc_assert (icode != CODE_FOR_nothing); 641 642 mode1 = insn_data[icode].operand[1].mode; 643 mode2 = insn_data[icode].operand[2].mode; 644 645 rtx_op1 = expand_expr (vec_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL); 646 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1) 647 && mode1 != VOIDmode) 648 rtx_op1 = force_reg (mode1, rtx_op1); 649 650 rtx_op2 = expand_expr (shift_oprnd, NULL_RTX, VOIDmode, EXPAND_NORMAL); 651 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2) 652 && mode2 != VOIDmode) 653 rtx_op2 = force_reg (mode2, rtx_op2); 654 655 if (!target 656 || ! (*insn_data[icode].operand[0].predicate) (target, mode)) 657 target = gen_reg_rtx (mode); 658 659 /* Emit instruction */ 660 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2); 661 gcc_assert (pat); 662 emit_insn (pat); 663 664 return target; 665} 666 667/* This subroutine of expand_doubleword_shift handles the cases in which 668 the effective shift value is >= BITS_PER_WORD. The arguments and return 669 value are the same as for the parent routine, except that SUPERWORD_OP1 670 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET. 671 INTO_TARGET may be null if the caller has decided to calculate it. */ 672 673static bool 674expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1, 675 rtx outof_target, rtx into_target, 676 int unsignedp, enum optab_methods methods) 677{ 678 if (into_target != 0) 679 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1, 680 into_target, unsignedp, methods)) 681 return false; 682 683 if (outof_target != 0) 684 { 685 /* For a signed right shift, we must fill OUTOF_TARGET with copies 686 of the sign bit, otherwise we must fill it with zeros. */ 687 if (binoptab != ashr_optab) 688 emit_move_insn (outof_target, CONST0_RTX (word_mode)); 689 else 690 if (!force_expand_binop (word_mode, binoptab, 691 outof_input, GEN_INT (BITS_PER_WORD - 1), 692 outof_target, unsignedp, methods)) 693 return false; 694 } 695 return true; 696} 697 698/* This subroutine of expand_doubleword_shift handles the cases in which 699 the effective shift value is < BITS_PER_WORD. The arguments and return 700 value are the same as for the parent routine. */ 701 702static bool 703expand_subword_shift (enum machine_mode op1_mode, optab binoptab, 704 rtx outof_input, rtx into_input, rtx op1, 705 rtx outof_target, rtx into_target, 706 int unsignedp, enum optab_methods methods, 707 unsigned HOST_WIDE_INT shift_mask) 708{ 709 optab reverse_unsigned_shift, unsigned_shift; 710 rtx tmp, carries; 711 712 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab); 713 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab); 714 715 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT. 716 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in 717 the opposite direction to BINOPTAB. */ 718 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD) 719 { 720 carries = outof_input; 721 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); 722 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 723 0, true, methods); 724 } 725 else 726 { 727 /* We must avoid shifting by BITS_PER_WORD bits since that is either 728 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or 729 has unknown behavior. Do a single shift first, then shift by the 730 remainder. It's OK to use ~OP1 as the remainder if shift counts 731 are truncated to the mode size. */ 732 carries = expand_binop (word_mode, reverse_unsigned_shift, 733 outof_input, const1_rtx, 0, unsignedp, methods); 734 if (shift_mask == BITS_PER_WORD - 1) 735 { 736 tmp = immed_double_const (-1, -1, op1_mode); 737 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp, 738 0, true, methods); 739 } 740 else 741 { 742 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode); 743 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1, 744 0, true, methods); 745 } 746 } 747 if (tmp == 0 || carries == 0) 748 return false; 749 carries = expand_binop (word_mode, reverse_unsigned_shift, 750 carries, tmp, 0, unsignedp, methods); 751 if (carries == 0) 752 return false; 753 754 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT 755 so the result can go directly into INTO_TARGET if convenient. */ 756 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1, 757 into_target, unsignedp, methods); 758 if (tmp == 0) 759 return false; 760 761 /* Now OR in the bits carried over from OUTOF_INPUT. */ 762 if (!force_expand_binop (word_mode, ior_optab, tmp, carries, 763 into_target, unsignedp, methods)) 764 return false; 765 766 /* Use a standard word_mode shift for the out-of half. */ 767 if (outof_target != 0) 768 if (!force_expand_binop (word_mode, binoptab, outof_input, op1, 769 outof_target, unsignedp, methods)) 770 return false; 771 772 return true; 773} 774 775 776#ifdef HAVE_conditional_move 777/* Try implementing expand_doubleword_shift using conditional moves. 778 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true, 779 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1 780 are the shift counts to use in the former and latter case. All other 781 arguments are the same as the parent routine. */ 782 783static bool 784expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab, 785 enum rtx_code cmp_code, rtx cmp1, rtx cmp2, 786 rtx outof_input, rtx into_input, 787 rtx subword_op1, rtx superword_op1, 788 rtx outof_target, rtx into_target, 789 int unsignedp, enum optab_methods methods, 790 unsigned HOST_WIDE_INT shift_mask) 791{ 792 rtx outof_superword, into_superword; 793 794 /* Put the superword version of the output into OUTOF_SUPERWORD and 795 INTO_SUPERWORD. */ 796 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0; 797 if (outof_target != 0 && subword_op1 == superword_op1) 798 { 799 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in 800 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */ 801 into_superword = outof_target; 802 if (!expand_superword_shift (binoptab, outof_input, superword_op1, 803 outof_superword, 0, unsignedp, methods)) 804 return false; 805 } 806 else 807 { 808 into_superword = gen_reg_rtx (word_mode); 809 if (!expand_superword_shift (binoptab, outof_input, superword_op1, 810 outof_superword, into_superword, 811 unsignedp, methods)) 812 return false; 813 } 814 815 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */ 816 if (!expand_subword_shift (op1_mode, binoptab, 817 outof_input, into_input, subword_op1, 818 outof_target, into_target, 819 unsignedp, methods, shift_mask)) 820 return false; 821 822 /* Select between them. Do the INTO half first because INTO_SUPERWORD 823 might be the current value of OUTOF_TARGET. */ 824 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode, 825 into_target, into_superword, word_mode, false)) 826 return false; 827 828 if (outof_target != 0) 829 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode, 830 outof_target, outof_superword, 831 word_mode, false)) 832 return false; 833 834 return true; 835} 836#endif 837 838/* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts. 839 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first 840 input operand; the shift moves bits in the direction OUTOF_INPUT-> 841 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words 842 of the target. OP1 is the shift count and OP1_MODE is its mode. 843 If OP1 is constant, it will have been truncated as appropriate 844 and is known to be nonzero. 845 846 If SHIFT_MASK is zero, the result of word shifts is undefined when the 847 shift count is outside the range [0, BITS_PER_WORD). This routine must 848 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2). 849 850 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively 851 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will 852 fill with zeros or sign bits as appropriate. 853 854 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize 855 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1. 856 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED. 857 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2) 858 are undefined. 859 860 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function 861 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for 862 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent 863 function wants to calculate it itself. 864 865 Return true if the shift could be successfully synthesized. */ 866 867static bool 868expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab, 869 rtx outof_input, rtx into_input, rtx op1, 870 rtx outof_target, rtx into_target, 871 int unsignedp, enum optab_methods methods, 872 unsigned HOST_WIDE_INT shift_mask) 873{ 874 rtx superword_op1, tmp, cmp1, cmp2; 875 rtx subword_label, done_label; 876 enum rtx_code cmp_code; 877 878 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will 879 fill the result with sign or zero bits as appropriate. If so, the value 880 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call 881 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT 882 and INTO_INPUT), then emit code to set up OUTOF_TARGET. 883 884 This isn't worthwhile for constant shifts since the optimizers will 885 cope better with in-range shift counts. */ 886 if (shift_mask >= BITS_PER_WORD 887 && outof_target != 0 888 && !CONSTANT_P (op1)) 889 { 890 if (!expand_doubleword_shift (op1_mode, binoptab, 891 outof_input, into_input, op1, 892 0, into_target, 893 unsignedp, methods, shift_mask)) 894 return false; 895 if (!force_expand_binop (word_mode, binoptab, outof_input, op1, 896 outof_target, unsignedp, methods)) 897 return false; 898 return true; 899 } 900 901 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2) 902 is true when the effective shift value is less than BITS_PER_WORD. 903 Set SUPERWORD_OP1 to the shift count that should be used to shift 904 OUTOF_INPUT into INTO_TARGET when the condition is false. */ 905 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode); 906 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1) 907 { 908 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1 909 is a subword shift count. */ 910 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp, 911 0, true, methods); 912 cmp2 = CONST0_RTX (op1_mode); 913 cmp_code = EQ; 914 superword_op1 = op1; 915 } 916 else 917 { 918 /* Set CMP1 to OP1 - BITS_PER_WORD. */ 919 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp, 920 0, true, methods); 921 cmp2 = CONST0_RTX (op1_mode); 922 cmp_code = LT; 923 superword_op1 = cmp1; 924 } 925 if (cmp1 == 0) 926 return false; 927 928 /* If we can compute the condition at compile time, pick the 929 appropriate subroutine. */ 930 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2); 931 if (tmp != 0 && GET_CODE (tmp) == CONST_INT) 932 { 933 if (tmp == const0_rtx) 934 return expand_superword_shift (binoptab, outof_input, superword_op1, 935 outof_target, into_target, 936 unsignedp, methods); 937 else 938 return expand_subword_shift (op1_mode, binoptab, 939 outof_input, into_input, op1, 940 outof_target, into_target, 941 unsignedp, methods, shift_mask); 942 } 943 944#ifdef HAVE_conditional_move 945 /* Try using conditional moves to generate straight-line code. */ 946 { 947 rtx start = get_last_insn (); 948 if (expand_doubleword_shift_condmove (op1_mode, binoptab, 949 cmp_code, cmp1, cmp2, 950 outof_input, into_input, 951 op1, superword_op1, 952 outof_target, into_target, 953 unsignedp, methods, shift_mask)) 954 return true; 955 delete_insns_since (start); 956 } 957#endif 958 959 /* As a last resort, use branches to select the correct alternative. */ 960 subword_label = gen_label_rtx (); 961 done_label = gen_label_rtx (); 962 963 NO_DEFER_POP; 964 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode, 965 0, 0, subword_label); 966 OK_DEFER_POP; 967 968 if (!expand_superword_shift (binoptab, outof_input, superword_op1, 969 outof_target, into_target, 970 unsignedp, methods)) 971 return false; 972 973 emit_jump_insn (gen_jump (done_label)); 974 emit_barrier (); 975 emit_label (subword_label); 976 977 if (!expand_subword_shift (op1_mode, binoptab, 978 outof_input, into_input, op1, 979 outof_target, into_target, 980 unsignedp, methods, shift_mask)) 981 return false; 982 983 emit_label (done_label); 984 return true; 985} 986 987/* Subroutine of expand_binop. Perform a double word multiplication of 988 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide 989 as the target's word_mode. This function return NULL_RTX if anything 990 goes wrong, in which case it may have already emitted instructions 991 which need to be deleted. 992 993 If we want to multiply two two-word values and have normal and widening 994 multiplies of single-word values, we can do this with three smaller 995 multiplications. Note that we do not make a REG_NO_CONFLICT block here 996 because we are not operating on one word at a time. 997 998 The multiplication proceeds as follows: 999 _______________________ 1000 [__op0_high_|__op0_low__] 1001 _______________________ 1002 * [__op1_high_|__op1_low__] 1003 _______________________________________________ 1004 _______________________ 1005 (1) [__op0_low__*__op1_low__] 1006 _______________________ 1007 (2a) [__op0_low__*__op1_high_] 1008 _______________________ 1009 (2b) [__op0_high_*__op1_low__] 1010 _______________________ 1011 (3) [__op0_high_*__op1_high_] 1012 1013 1014 This gives a 4-word result. Since we are only interested in the 1015 lower 2 words, partial result (3) and the upper words of (2a) and 1016 (2b) don't need to be calculated. Hence (2a) and (2b) can be 1017 calculated using non-widening multiplication. 1018 1019 (1), however, needs to be calculated with an unsigned widening 1020 multiplication. If this operation is not directly supported we 1021 try using a signed widening multiplication and adjust the result. 1022 This adjustment works as follows: 1023 1024 If both operands are positive then no adjustment is needed. 1025 1026 If the operands have different signs, for example op0_low < 0 and 1027 op1_low >= 0, the instruction treats the most significant bit of 1028 op0_low as a sign bit instead of a bit with significance 1029 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low 1030 with 2**BITS_PER_WORD - op0_low, and two's complements the 1031 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to 1032 the result. 1033 1034 Similarly, if both operands are negative, we need to add 1035 (op0_low + op1_low) * 2**BITS_PER_WORD. 1036 1037 We use a trick to adjust quickly. We logically shift op0_low right 1038 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to 1039 op0_high (op1_high) before it is used to calculate 2b (2a). If no 1040 logical shift exists, we do an arithmetic right shift and subtract 1041 the 0 or -1. */ 1042 1043static rtx 1044expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target, 1045 bool umulp, enum optab_methods methods) 1046{ 1047 int low = (WORDS_BIG_ENDIAN ? 1 : 0); 1048 int high = (WORDS_BIG_ENDIAN ? 0 : 1); 1049 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1); 1050 rtx product, adjust, product_high, temp; 1051 1052 rtx op0_high = operand_subword_force (op0, high, mode); 1053 rtx op0_low = operand_subword_force (op0, low, mode); 1054 rtx op1_high = operand_subword_force (op1, high, mode); 1055 rtx op1_low = operand_subword_force (op1, low, mode); 1056 1057 /* If we're using an unsigned multiply to directly compute the product 1058 of the low-order words of the operands and perform any required 1059 adjustments of the operands, we begin by trying two more multiplications 1060 and then computing the appropriate sum. 1061 1062 We have checked above that the required addition is provided. 1063 Full-word addition will normally always succeed, especially if 1064 it is provided at all, so we don't worry about its failure. The 1065 multiplication may well fail, however, so we do handle that. */ 1066 1067 if (!umulp) 1068 { 1069 /* ??? This could be done with emit_store_flag where available. */ 1070 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1, 1071 NULL_RTX, 1, methods); 1072 if (temp) 1073 op0_high = expand_binop (word_mode, add_optab, op0_high, temp, 1074 NULL_RTX, 0, OPTAB_DIRECT); 1075 else 1076 { 1077 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1, 1078 NULL_RTX, 0, methods); 1079 if (!temp) 1080 return NULL_RTX; 1081 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp, 1082 NULL_RTX, 0, OPTAB_DIRECT); 1083 } 1084 1085 if (!op0_high) 1086 return NULL_RTX; 1087 } 1088 1089 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low, 1090 NULL_RTX, 0, OPTAB_DIRECT); 1091 if (!adjust) 1092 return NULL_RTX; 1093 1094 /* OP0_HIGH should now be dead. */ 1095 1096 if (!umulp) 1097 { 1098 /* ??? This could be done with emit_store_flag where available. */ 1099 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1, 1100 NULL_RTX, 1, methods); 1101 if (temp) 1102 op1_high = expand_binop (word_mode, add_optab, op1_high, temp, 1103 NULL_RTX, 0, OPTAB_DIRECT); 1104 else 1105 { 1106 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1, 1107 NULL_RTX, 0, methods); 1108 if (!temp) 1109 return NULL_RTX; 1110 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp, 1111 NULL_RTX, 0, OPTAB_DIRECT); 1112 } 1113 1114 if (!op1_high) 1115 return NULL_RTX; 1116 } 1117 1118 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low, 1119 NULL_RTX, 0, OPTAB_DIRECT); 1120 if (!temp) 1121 return NULL_RTX; 1122 1123 /* OP1_HIGH should now be dead. */ 1124 1125 adjust = expand_binop (word_mode, add_optab, adjust, temp, 1126 adjust, 0, OPTAB_DIRECT); 1127 1128 if (target && !REG_P (target)) 1129 target = NULL_RTX; 1130 1131 if (umulp) 1132 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low, 1133 target, 1, OPTAB_DIRECT); 1134 else 1135 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low, 1136 target, 1, OPTAB_DIRECT); 1137 1138 if (!product) 1139 return NULL_RTX; 1140 1141 product_high = operand_subword (product, high, 1, mode); 1142 adjust = expand_binop (word_mode, add_optab, product_high, adjust, 1143 REG_P (product_high) ? product_high : adjust, 1144 0, OPTAB_DIRECT); 1145 emit_move_insn (product_high, adjust); 1146 return product; 1147} 1148 1149/* Wrapper around expand_binop which takes an rtx code to specify 1150 the operation to perform, not an optab pointer. All other 1151 arguments are the same. */ 1152rtx 1153expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0, 1154 rtx op1, rtx target, int unsignedp, 1155 enum optab_methods methods) 1156{ 1157 optab binop = code_to_optab[(int) code]; 1158 gcc_assert (binop); 1159 1160 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods); 1161} 1162 1163/* Return whether OP0 and OP1 should be swapped when expanding a commutative 1164 binop. Order them according to commutative_operand_precedence and, if 1165 possible, try to put TARGET or a pseudo first. */ 1166static bool 1167swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1) 1168{ 1169 int op0_prec = commutative_operand_precedence (op0); 1170 int op1_prec = commutative_operand_precedence (op1); 1171 1172 if (op0_prec < op1_prec) 1173 return true; 1174 1175 if (op0_prec > op1_prec) 1176 return false; 1177 1178 /* With equal precedence, both orders are ok, but it is better if the 1179 first operand is TARGET, or if both TARGET and OP0 are pseudos. */ 1180 if (target == 0 || REG_P (target)) 1181 return (REG_P (op1) && !REG_P (op0)) || target == op1; 1182 else 1183 return rtx_equal_p (op1, target); 1184} 1185 1186 1187/* Generate code to perform an operation specified by BINOPTAB 1188 on operands OP0 and OP1, with result having machine-mode MODE. 1189 1190 UNSIGNEDP is for the case where we have to widen the operands 1191 to perform the operation. It says to use zero-extension. 1192 1193 If TARGET is nonzero, the value 1194 is generated there, if it is convenient to do so. 1195 In all cases an rtx is returned for the locus of the value; 1196 this may or may not be TARGET. */ 1197 1198rtx 1199expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1, 1200 rtx target, int unsignedp, enum optab_methods methods) 1201{ 1202 enum optab_methods next_methods 1203 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN 1204 ? OPTAB_WIDEN : methods); 1205 enum mode_class class; 1206 enum machine_mode wider_mode; 1207 rtx temp; 1208 int commutative_op = 0; 1209 int shift_op = (binoptab->code == ASHIFT 1210 || binoptab->code == ASHIFTRT 1211 || binoptab->code == LSHIFTRT 1212 || binoptab->code == ROTATE 1213 || binoptab->code == ROTATERT); 1214 rtx entry_last = get_last_insn (); 1215 rtx last; 1216 bool first_pass_p = true; 1217 1218 class = GET_MODE_CLASS (mode); 1219 1220 /* If subtracting an integer constant, convert this into an addition of 1221 the negated constant. */ 1222 1223 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT) 1224 { 1225 op1 = negate_rtx (mode, op1); 1226 binoptab = add_optab; 1227 } 1228 1229 /* If we are inside an appropriately-short loop and we are optimizing, 1230 force expensive constants into a register. */ 1231 if (CONSTANT_P (op0) && optimize 1232 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1)) 1233 { 1234 if (GET_MODE (op0) != VOIDmode) 1235 op0 = convert_modes (mode, VOIDmode, op0, unsignedp); 1236 op0 = force_reg (mode, op0); 1237 } 1238 1239 if (CONSTANT_P (op1) && optimize 1240 && ! shift_op && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1)) 1241 { 1242 if (GET_MODE (op1) != VOIDmode) 1243 op1 = convert_modes (mode, VOIDmode, op1, unsignedp); 1244 op1 = force_reg (mode, op1); 1245 } 1246 1247 /* Record where to delete back to if we backtrack. */ 1248 last = get_last_insn (); 1249 1250 /* If operation is commutative, 1251 try to make the first operand a register. 1252 Even better, try to make it the same as the target. 1253 Also try to make the last operand a constant. */ 1254 if (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH 1255 || binoptab == smul_widen_optab 1256 || binoptab == umul_widen_optab 1257 || binoptab == smul_highpart_optab 1258 || binoptab == umul_highpart_optab) 1259 { 1260 commutative_op = 1; 1261 1262 if (swap_commutative_operands_with_target (target, op0, op1)) 1263 { 1264 temp = op1; 1265 op1 = op0; 1266 op0 = temp; 1267 } 1268 } 1269 1270 retry: 1271 1272 /* If we can do it with a three-operand insn, do so. */ 1273 1274 if (methods != OPTAB_MUST_WIDEN 1275 && binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 1276 { 1277 int icode = (int) binoptab->handlers[(int) mode].insn_code; 1278 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 1279 enum machine_mode mode1 = insn_data[icode].operand[2].mode; 1280 rtx pat; 1281 rtx xop0 = op0, xop1 = op1; 1282 1283 if (target) 1284 temp = target; 1285 else 1286 temp = gen_reg_rtx (mode); 1287 1288 /* If it is a commutative operator and the modes would match 1289 if we would swap the operands, we can save the conversions. */ 1290 if (commutative_op) 1291 { 1292 if (GET_MODE (op0) != mode0 && GET_MODE (op1) != mode1 1293 && GET_MODE (op0) == mode1 && GET_MODE (op1) == mode0) 1294 { 1295 rtx tmp; 1296 1297 tmp = op0; op0 = op1; op1 = tmp; 1298 tmp = xop0; xop0 = xop1; xop1 = tmp; 1299 } 1300 } 1301 1302 /* In case the insn wants input operands in modes different from 1303 those of the actual operands, convert the operands. It would 1304 seem that we don't need to convert CONST_INTs, but we do, so 1305 that they're properly zero-extended, sign-extended or truncated 1306 for their mode. */ 1307 1308 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) 1309 xop0 = convert_modes (mode0, 1310 GET_MODE (op0) != VOIDmode 1311 ? GET_MODE (op0) 1312 : mode, 1313 xop0, unsignedp); 1314 1315 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) 1316 xop1 = convert_modes (mode1, 1317 GET_MODE (op1) != VOIDmode 1318 ? GET_MODE (op1) 1319 : mode, 1320 xop1, unsignedp); 1321 1322 /* Now, if insn's predicates don't allow our operands, put them into 1323 pseudo regs. */ 1324 1325 if (!insn_data[icode].operand[1].predicate (xop0, mode0) 1326 && mode0 != VOIDmode) 1327 xop0 = copy_to_mode_reg (mode0, xop0); 1328 1329 if (!insn_data[icode].operand[2].predicate (xop1, mode1) 1330 && mode1 != VOIDmode) 1331 xop1 = copy_to_mode_reg (mode1, xop1); 1332 1333 if (!insn_data[icode].operand[0].predicate (temp, mode)) 1334 temp = gen_reg_rtx (mode); 1335 1336 pat = GEN_FCN (icode) (temp, xop0, xop1); 1337 if (pat) 1338 { 1339 /* If PAT is composed of more than one insn, try to add an appropriate 1340 REG_EQUAL note to it. If we can't because TEMP conflicts with an 1341 operand, call ourselves again, this time without a target. */ 1342 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX 1343 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1)) 1344 { 1345 delete_insns_since (last); 1346 return expand_binop (mode, binoptab, op0, op1, NULL_RTX, 1347 unsignedp, methods); 1348 } 1349 1350 emit_insn (pat); 1351 return temp; 1352 } 1353 else 1354 delete_insns_since (last); 1355 } 1356 1357 /* If we were trying to rotate by a constant value, and that didn't 1358 work, try rotating the other direction before falling back to 1359 shifts and bitwise-or. */ 1360 if (first_pass_p 1361 && (binoptab == rotl_optab || binoptab == rotr_optab) 1362 && class == MODE_INT 1363 && GET_CODE (op1) == CONST_INT 1364 && INTVAL (op1) > 0 1365 && (unsigned int) INTVAL (op1) < GET_MODE_BITSIZE (mode)) 1366 { 1367 first_pass_p = false; 1368 op1 = GEN_INT (GET_MODE_BITSIZE (mode) - INTVAL (op1)); 1369 binoptab = binoptab == rotl_optab ? rotr_optab : rotl_optab; 1370 goto retry; 1371 } 1372 1373 /* If this is a multiply, see if we can do a widening operation that 1374 takes operands of this mode and makes a wider mode. */ 1375 1376 if (binoptab == smul_optab 1377 && GET_MODE_WIDER_MODE (mode) != VOIDmode 1378 && (((unsignedp ? umul_widen_optab : smul_widen_optab) 1379 ->handlers[(int) GET_MODE_WIDER_MODE (mode)].insn_code) 1380 != CODE_FOR_nothing)) 1381 { 1382 temp = expand_binop (GET_MODE_WIDER_MODE (mode), 1383 unsignedp ? umul_widen_optab : smul_widen_optab, 1384 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT); 1385 1386 if (temp != 0) 1387 { 1388 if (GET_MODE_CLASS (mode) == MODE_INT 1389 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1390 GET_MODE_BITSIZE (GET_MODE (temp)))) 1391 return gen_lowpart (mode, temp); 1392 else 1393 return convert_to_mode (mode, temp, unsignedp); 1394 } 1395 } 1396 1397 /* Look for a wider mode of the same class for which we think we 1398 can open-code the operation. Check for a widening multiply at the 1399 wider mode as well. */ 1400 1401 if (CLASS_HAS_WIDER_MODES_P (class) 1402 && methods != OPTAB_DIRECT && methods != OPTAB_LIB) 1403 for (wider_mode = GET_MODE_WIDER_MODE (mode); 1404 wider_mode != VOIDmode; 1405 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 1406 { 1407 if (binoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing 1408 || (binoptab == smul_optab 1409 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode 1410 && (((unsignedp ? umul_widen_optab : smul_widen_optab) 1411 ->handlers[(int) GET_MODE_WIDER_MODE (wider_mode)].insn_code) 1412 != CODE_FOR_nothing))) 1413 { 1414 rtx xop0 = op0, xop1 = op1; 1415 int no_extend = 0; 1416 1417 /* For certain integer operations, we need not actually extend 1418 the narrow operands, as long as we will truncate 1419 the results to the same narrowness. */ 1420 1421 if ((binoptab == ior_optab || binoptab == and_optab 1422 || binoptab == xor_optab 1423 || binoptab == add_optab || binoptab == sub_optab 1424 || binoptab == smul_optab || binoptab == ashl_optab) 1425 && class == MODE_INT) 1426 no_extend = 1; 1427 1428 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend); 1429 1430 /* The second operand of a shift must always be extended. */ 1431 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp, 1432 no_extend && binoptab != ashl_optab); 1433 1434 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, 1435 unsignedp, OPTAB_DIRECT); 1436 if (temp) 1437 { 1438 if (class != MODE_INT 1439 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1440 GET_MODE_BITSIZE (wider_mode))) 1441 { 1442 if (target == 0) 1443 target = gen_reg_rtx (mode); 1444 convert_move (target, temp, 0); 1445 return target; 1446 } 1447 else 1448 return gen_lowpart (mode, temp); 1449 } 1450 else 1451 delete_insns_since (last); 1452 } 1453 } 1454 1455 /* These can be done a word at a time. */ 1456 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab) 1457 && class == MODE_INT 1458 && GET_MODE_SIZE (mode) > UNITS_PER_WORD 1459 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 1460 { 1461 int i; 1462 rtx insns; 1463 rtx equiv_value; 1464 1465 /* If TARGET is the same as one of the operands, the REG_EQUAL note 1466 won't be accurate, so use a new target. */ 1467 if (target == 0 || target == op0 || target == op1) 1468 target = gen_reg_rtx (mode); 1469 1470 start_sequence (); 1471 1472 /* Do the actual arithmetic. */ 1473 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) 1474 { 1475 rtx target_piece = operand_subword (target, i, 1, mode); 1476 rtx x = expand_binop (word_mode, binoptab, 1477 operand_subword_force (op0, i, mode), 1478 operand_subword_force (op1, i, mode), 1479 target_piece, unsignedp, next_methods); 1480 1481 if (x == 0) 1482 break; 1483 1484 if (target_piece != x) 1485 emit_move_insn (target_piece, x); 1486 } 1487 1488 insns = get_insns (); 1489 end_sequence (); 1490 1491 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD) 1492 { 1493 if (binoptab->code != UNKNOWN) 1494 equiv_value 1495 = gen_rtx_fmt_ee (binoptab->code, mode, 1496 copy_rtx (op0), copy_rtx (op1)); 1497 else 1498 equiv_value = 0; 1499 1500 emit_no_conflict_block (insns, target, op0, op1, equiv_value); 1501 return target; 1502 } 1503 } 1504 1505 /* Synthesize double word shifts from single word shifts. */ 1506 if ((binoptab == lshr_optab || binoptab == ashl_optab 1507 || binoptab == ashr_optab) 1508 && class == MODE_INT 1509 && (GET_CODE (op1) == CONST_INT || !optimize_size) 1510 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1511 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing 1512 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing 1513 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 1514 { 1515 unsigned HOST_WIDE_INT shift_mask, double_shift_mask; 1516 enum machine_mode op1_mode; 1517 1518 double_shift_mask = targetm.shift_truncation_mask (mode); 1519 shift_mask = targetm.shift_truncation_mask (word_mode); 1520 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode; 1521 1522 /* Apply the truncation to constant shifts. */ 1523 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT) 1524 op1 = GEN_INT (INTVAL (op1) & double_shift_mask); 1525 1526 if (op1 == CONST0_RTX (op1_mode)) 1527 return op0; 1528 1529 /* Make sure that this is a combination that expand_doubleword_shift 1530 can handle. See the comments there for details. */ 1531 if (double_shift_mask == 0 1532 || (shift_mask == BITS_PER_WORD - 1 1533 && double_shift_mask == BITS_PER_WORD * 2 - 1)) 1534 { 1535 rtx insns, equiv_value; 1536 rtx into_target, outof_target; 1537 rtx into_input, outof_input; 1538 int left_shift, outof_word; 1539 1540 /* If TARGET is the same as one of the operands, the REG_EQUAL note 1541 won't be accurate, so use a new target. */ 1542 if (target == 0 || target == op0 || target == op1) 1543 target = gen_reg_rtx (mode); 1544 1545 start_sequence (); 1546 1547 /* OUTOF_* is the word we are shifting bits away from, and 1548 INTO_* is the word that we are shifting bits towards, thus 1549 they differ depending on the direction of the shift and 1550 WORDS_BIG_ENDIAN. */ 1551 1552 left_shift = binoptab == ashl_optab; 1553 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; 1554 1555 outof_target = operand_subword (target, outof_word, 1, mode); 1556 into_target = operand_subword (target, 1 - outof_word, 1, mode); 1557 1558 outof_input = operand_subword_force (op0, outof_word, mode); 1559 into_input = operand_subword_force (op0, 1 - outof_word, mode); 1560 1561 if (expand_doubleword_shift (op1_mode, binoptab, 1562 outof_input, into_input, op1, 1563 outof_target, into_target, 1564 unsignedp, next_methods, shift_mask)) 1565 { 1566 insns = get_insns (); 1567 end_sequence (); 1568 1569 equiv_value = gen_rtx_fmt_ee (binoptab->code, mode, op0, op1); 1570 emit_no_conflict_block (insns, target, op0, op1, equiv_value); 1571 return target; 1572 } 1573 end_sequence (); 1574 } 1575 } 1576 1577 /* Synthesize double word rotates from single word shifts. */ 1578 if ((binoptab == rotl_optab || binoptab == rotr_optab) 1579 && class == MODE_INT 1580 && GET_CODE (op1) == CONST_INT 1581 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1582 && ashl_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing 1583 && lshr_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 1584 { 1585 rtx insns; 1586 rtx into_target, outof_target; 1587 rtx into_input, outof_input; 1588 rtx inter; 1589 int shift_count, left_shift, outof_word; 1590 1591 /* If TARGET is the same as one of the operands, the REG_EQUAL note 1592 won't be accurate, so use a new target. Do this also if target is not 1593 a REG, first because having a register instead may open optimization 1594 opportunities, and second because if target and op0 happen to be MEMs 1595 designating the same location, we would risk clobbering it too early 1596 in the code sequence we generate below. */ 1597 if (target == 0 || target == op0 || target == op1 || ! REG_P (target)) 1598 target = gen_reg_rtx (mode); 1599 1600 start_sequence (); 1601 1602 shift_count = INTVAL (op1); 1603 1604 /* OUTOF_* is the word we are shifting bits away from, and 1605 INTO_* is the word that we are shifting bits towards, thus 1606 they differ depending on the direction of the shift and 1607 WORDS_BIG_ENDIAN. */ 1608 1609 left_shift = (binoptab == rotl_optab); 1610 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN; 1611 1612 outof_target = operand_subword (target, outof_word, 1, mode); 1613 into_target = operand_subword (target, 1 - outof_word, 1, mode); 1614 1615 outof_input = operand_subword_force (op0, outof_word, mode); 1616 into_input = operand_subword_force (op0, 1 - outof_word, mode); 1617 1618 if (shift_count == BITS_PER_WORD) 1619 { 1620 /* This is just a word swap. */ 1621 emit_move_insn (outof_target, into_input); 1622 emit_move_insn (into_target, outof_input); 1623 inter = const0_rtx; 1624 } 1625 else 1626 { 1627 rtx into_temp1, into_temp2, outof_temp1, outof_temp2; 1628 rtx first_shift_count, second_shift_count; 1629 optab reverse_unsigned_shift, unsigned_shift; 1630 1631 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) 1632 ? lshr_optab : ashl_optab); 1633 1634 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD) 1635 ? ashl_optab : lshr_optab); 1636 1637 if (shift_count > BITS_PER_WORD) 1638 { 1639 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD); 1640 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count); 1641 } 1642 else 1643 { 1644 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count); 1645 second_shift_count = GEN_INT (shift_count); 1646 } 1647 1648 into_temp1 = expand_binop (word_mode, unsigned_shift, 1649 outof_input, first_shift_count, 1650 NULL_RTX, unsignedp, next_methods); 1651 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift, 1652 into_input, second_shift_count, 1653 NULL_RTX, unsignedp, next_methods); 1654 1655 if (into_temp1 != 0 && into_temp2 != 0) 1656 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2, 1657 into_target, unsignedp, next_methods); 1658 else 1659 inter = 0; 1660 1661 if (inter != 0 && inter != into_target) 1662 emit_move_insn (into_target, inter); 1663 1664 outof_temp1 = expand_binop (word_mode, unsigned_shift, 1665 into_input, first_shift_count, 1666 NULL_RTX, unsignedp, next_methods); 1667 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift, 1668 outof_input, second_shift_count, 1669 NULL_RTX, unsignedp, next_methods); 1670 1671 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0) 1672 inter = expand_binop (word_mode, ior_optab, 1673 outof_temp1, outof_temp2, 1674 outof_target, unsignedp, next_methods); 1675 1676 if (inter != 0 && inter != outof_target) 1677 emit_move_insn (outof_target, inter); 1678 } 1679 1680 insns = get_insns (); 1681 end_sequence (); 1682 1683 if (inter != 0) 1684 { 1685 /* One may be tempted to wrap the insns in a REG_NO_CONFLICT 1686 block to help the register allocator a bit. But a multi-word 1687 rotate will need all the input bits when setting the output 1688 bits, so there clearly is a conflict between the input and 1689 output registers. So we can't use a no-conflict block here. */ 1690 emit_insn (insns); 1691 return target; 1692 } 1693 } 1694 1695 /* These can be done a word at a time by propagating carries. */ 1696 if ((binoptab == add_optab || binoptab == sub_optab) 1697 && class == MODE_INT 1698 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD 1699 && binoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 1700 { 1701 unsigned int i; 1702 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab; 1703 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD; 1704 rtx carry_in = NULL_RTX, carry_out = NULL_RTX; 1705 rtx xop0, xop1, xtarget; 1706 1707 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG 1708 value is one of those, use it. Otherwise, use 1 since it is the 1709 one easiest to get. */ 1710#if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1 1711 int normalizep = STORE_FLAG_VALUE; 1712#else 1713 int normalizep = 1; 1714#endif 1715 1716 /* Prepare the operands. */ 1717 xop0 = force_reg (mode, op0); 1718 xop1 = force_reg (mode, op1); 1719 1720 xtarget = gen_reg_rtx (mode); 1721 1722 if (target == 0 || !REG_P (target)) 1723 target = xtarget; 1724 1725 /* Indicate for flow that the entire target reg is being set. */ 1726 if (REG_P (target)) 1727 emit_insn (gen_rtx_CLOBBER (VOIDmode, xtarget)); 1728 1729 /* Do the actual arithmetic. */ 1730 for (i = 0; i < nwords; i++) 1731 { 1732 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i); 1733 rtx target_piece = operand_subword (xtarget, index, 1, mode); 1734 rtx op0_piece = operand_subword_force (xop0, index, mode); 1735 rtx op1_piece = operand_subword_force (xop1, index, mode); 1736 rtx x; 1737 1738 /* Main add/subtract of the input operands. */ 1739 x = expand_binop (word_mode, binoptab, 1740 op0_piece, op1_piece, 1741 target_piece, unsignedp, next_methods); 1742 if (x == 0) 1743 break; 1744 1745 if (i + 1 < nwords) 1746 { 1747 /* Store carry from main add/subtract. */ 1748 carry_out = gen_reg_rtx (word_mode); 1749 carry_out = emit_store_flag_force (carry_out, 1750 (binoptab == add_optab 1751 ? LT : GT), 1752 x, op0_piece, 1753 word_mode, 1, normalizep); 1754 } 1755 1756 if (i > 0) 1757 { 1758 rtx newx; 1759 1760 /* Add/subtract previous carry to main result. */ 1761 newx = expand_binop (word_mode, 1762 normalizep == 1 ? binoptab : otheroptab, 1763 x, carry_in, 1764 NULL_RTX, 1, next_methods); 1765 1766 if (i + 1 < nwords) 1767 { 1768 /* Get out carry from adding/subtracting carry in. */ 1769 rtx carry_tmp = gen_reg_rtx (word_mode); 1770 carry_tmp = emit_store_flag_force (carry_tmp, 1771 (binoptab == add_optab 1772 ? LT : GT), 1773 newx, x, 1774 word_mode, 1, normalizep); 1775 1776 /* Logical-ior the two poss. carry together. */ 1777 carry_out = expand_binop (word_mode, ior_optab, 1778 carry_out, carry_tmp, 1779 carry_out, 0, next_methods); 1780 if (carry_out == 0) 1781 break; 1782 } 1783 emit_move_insn (target_piece, newx); 1784 } 1785 else 1786 { 1787 if (x != target_piece) 1788 emit_move_insn (target_piece, x); 1789 } 1790 1791 carry_in = carry_out; 1792 } 1793 1794 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD) 1795 { 1796 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing 1797 || ! rtx_equal_p (target, xtarget)) 1798 { 1799 rtx temp = emit_move_insn (target, xtarget); 1800 1801 set_unique_reg_note (temp, 1802 REG_EQUAL, 1803 gen_rtx_fmt_ee (binoptab->code, mode, 1804 copy_rtx (xop0), 1805 copy_rtx (xop1))); 1806 } 1807 else 1808 target = xtarget; 1809 1810 return target; 1811 } 1812 1813 else 1814 delete_insns_since (last); 1815 } 1816 1817 /* Attempt to synthesize double word multiplies using a sequence of word 1818 mode multiplications. We first attempt to generate a sequence using a 1819 more efficient unsigned widening multiply, and if that fails we then 1820 try using a signed widening multiply. */ 1821 1822 if (binoptab == smul_optab 1823 && class == MODE_INT 1824 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD 1825 && smul_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing 1826 && add_optab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 1827 { 1828 rtx product = NULL_RTX; 1829 1830 if (umul_widen_optab->handlers[(int) mode].insn_code 1831 != CODE_FOR_nothing) 1832 { 1833 product = expand_doubleword_mult (mode, op0, op1, target, 1834 true, methods); 1835 if (!product) 1836 delete_insns_since (last); 1837 } 1838 1839 if (product == NULL_RTX 1840 && smul_widen_optab->handlers[(int) mode].insn_code 1841 != CODE_FOR_nothing) 1842 { 1843 product = expand_doubleword_mult (mode, op0, op1, target, 1844 false, methods); 1845 if (!product) 1846 delete_insns_since (last); 1847 } 1848 1849 if (product != NULL_RTX) 1850 { 1851 if (mov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 1852 { 1853 temp = emit_move_insn (target ? target : product, product); 1854 set_unique_reg_note (temp, 1855 REG_EQUAL, 1856 gen_rtx_fmt_ee (MULT, mode, 1857 copy_rtx (op0), 1858 copy_rtx (op1))); 1859 } 1860 return product; 1861 } 1862 } 1863 1864 /* It can't be open-coded in this mode. 1865 Use a library call if one is available and caller says that's ok. */ 1866 1867 if (binoptab->handlers[(int) mode].libfunc 1868 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN)) 1869 { 1870 rtx insns; 1871 rtx op1x = op1; 1872 enum machine_mode op1_mode = mode; 1873 rtx value; 1874 1875 start_sequence (); 1876 1877 if (shift_op) 1878 { 1879 op1_mode = word_mode; 1880 /* Specify unsigned here, 1881 since negative shift counts are meaningless. */ 1882 op1x = convert_to_mode (word_mode, op1, 1); 1883 } 1884 1885 if (GET_MODE (op0) != VOIDmode 1886 && GET_MODE (op0) != mode) 1887 op0 = convert_to_mode (mode, op0, unsignedp); 1888 1889 /* Pass 1 for NO_QUEUE so we don't lose any increments 1890 if the libcall is cse'd or moved. */ 1891 value = emit_library_call_value (binoptab->handlers[(int) mode].libfunc, 1892 NULL_RTX, LCT_CONST, mode, 2, 1893 op0, mode, op1x, op1_mode); 1894 1895 insns = get_insns (); 1896 end_sequence (); 1897 1898 target = gen_reg_rtx (mode); 1899 emit_libcall_block (insns, target, value, 1900 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1)); 1901 1902 return target; 1903 } 1904 1905 delete_insns_since (last); 1906 1907 /* It can't be done in this mode. Can we do it in a wider mode? */ 1908 1909 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN 1910 || methods == OPTAB_MUST_WIDEN)) 1911 { 1912 /* Caller says, don't even try. */ 1913 delete_insns_since (entry_last); 1914 return 0; 1915 } 1916 1917 /* Compute the value of METHODS to pass to recursive calls. 1918 Don't allow widening to be tried recursively. */ 1919 1920 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT); 1921 1922 /* Look for a wider mode of the same class for which it appears we can do 1923 the operation. */ 1924 1925 if (CLASS_HAS_WIDER_MODES_P (class)) 1926 { 1927 for (wider_mode = GET_MODE_WIDER_MODE (mode); 1928 wider_mode != VOIDmode; 1929 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 1930 { 1931 if ((binoptab->handlers[(int) wider_mode].insn_code 1932 != CODE_FOR_nothing) 1933 || (methods == OPTAB_LIB 1934 && binoptab->handlers[(int) wider_mode].libfunc)) 1935 { 1936 rtx xop0 = op0, xop1 = op1; 1937 int no_extend = 0; 1938 1939 /* For certain integer operations, we need not actually extend 1940 the narrow operands, as long as we will truncate 1941 the results to the same narrowness. */ 1942 1943 if ((binoptab == ior_optab || binoptab == and_optab 1944 || binoptab == xor_optab 1945 || binoptab == add_optab || binoptab == sub_optab 1946 || binoptab == smul_optab || binoptab == ashl_optab) 1947 && class == MODE_INT) 1948 no_extend = 1; 1949 1950 xop0 = widen_operand (xop0, wider_mode, mode, 1951 unsignedp, no_extend); 1952 1953 /* The second operand of a shift must always be extended. */ 1954 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp, 1955 no_extend && binoptab != ashl_optab); 1956 1957 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX, 1958 unsignedp, methods); 1959 if (temp) 1960 { 1961 if (class != MODE_INT 1962 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 1963 GET_MODE_BITSIZE (wider_mode))) 1964 { 1965 if (target == 0) 1966 target = gen_reg_rtx (mode); 1967 convert_move (target, temp, 0); 1968 return target; 1969 } 1970 else 1971 return gen_lowpart (mode, temp); 1972 } 1973 else 1974 delete_insns_since (last); 1975 } 1976 } 1977 } 1978 1979 delete_insns_since (entry_last); 1980 return 0; 1981} 1982 1983/* Expand a binary operator which has both signed and unsigned forms. 1984 UOPTAB is the optab for unsigned operations, and SOPTAB is for 1985 signed operations. 1986 1987 If we widen unsigned operands, we may use a signed wider operation instead 1988 of an unsigned wider operation, since the result would be the same. */ 1989 1990rtx 1991sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab, 1992 rtx op0, rtx op1, rtx target, int unsignedp, 1993 enum optab_methods methods) 1994{ 1995 rtx temp; 1996 optab direct_optab = unsignedp ? uoptab : soptab; 1997 struct optab wide_soptab; 1998 1999 /* Do it without widening, if possible. */ 2000 temp = expand_binop (mode, direct_optab, op0, op1, target, 2001 unsignedp, OPTAB_DIRECT); 2002 if (temp || methods == OPTAB_DIRECT) 2003 return temp; 2004 2005 /* Try widening to a signed int. Make a fake signed optab that 2006 hides any signed insn for direct use. */ 2007 wide_soptab = *soptab; 2008 wide_soptab.handlers[(int) mode].insn_code = CODE_FOR_nothing; 2009 wide_soptab.handlers[(int) mode].libfunc = 0; 2010 2011 temp = expand_binop (mode, &wide_soptab, op0, op1, target, 2012 unsignedp, OPTAB_WIDEN); 2013 2014 /* For unsigned operands, try widening to an unsigned int. */ 2015 if (temp == 0 && unsignedp) 2016 temp = expand_binop (mode, uoptab, op0, op1, target, 2017 unsignedp, OPTAB_WIDEN); 2018 if (temp || methods == OPTAB_WIDEN) 2019 return temp; 2020 2021 /* Use the right width lib call if that exists. */ 2022 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB); 2023 if (temp || methods == OPTAB_LIB) 2024 return temp; 2025 2026 /* Must widen and use a lib call, use either signed or unsigned. */ 2027 temp = expand_binop (mode, &wide_soptab, op0, op1, target, 2028 unsignedp, methods); 2029 if (temp != 0) 2030 return temp; 2031 if (unsignedp) 2032 return expand_binop (mode, uoptab, op0, op1, target, 2033 unsignedp, methods); 2034 return 0; 2035} 2036 2037/* Generate code to perform an operation specified by UNOPPTAB 2038 on operand OP0, with two results to TARG0 and TARG1. 2039 We assume that the order of the operands for the instruction 2040 is TARG0, TARG1, OP0. 2041 2042 Either TARG0 or TARG1 may be zero, but what that means is that 2043 the result is not actually wanted. We will generate it into 2044 a dummy pseudo-reg and discard it. They may not both be zero. 2045 2046 Returns 1 if this operation can be performed; 0 if not. */ 2047 2048int 2049expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1, 2050 int unsignedp) 2051{ 2052 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); 2053 enum mode_class class; 2054 enum machine_mode wider_mode; 2055 rtx entry_last = get_last_insn (); 2056 rtx last; 2057 2058 class = GET_MODE_CLASS (mode); 2059 2060 if (!targ0) 2061 targ0 = gen_reg_rtx (mode); 2062 if (!targ1) 2063 targ1 = gen_reg_rtx (mode); 2064 2065 /* Record where to go back to if we fail. */ 2066 last = get_last_insn (); 2067 2068 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 2069 { 2070 int icode = (int) unoptab->handlers[(int) mode].insn_code; 2071 enum machine_mode mode0 = insn_data[icode].operand[2].mode; 2072 rtx pat; 2073 rtx xop0 = op0; 2074 2075 if (GET_MODE (xop0) != VOIDmode 2076 && GET_MODE (xop0) != mode0) 2077 xop0 = convert_to_mode (mode0, xop0, unsignedp); 2078 2079 /* Now, if insn doesn't accept these operands, put them into pseudos. */ 2080 if (!insn_data[icode].operand[2].predicate (xop0, mode0)) 2081 xop0 = copy_to_mode_reg (mode0, xop0); 2082 2083 /* We could handle this, but we should always be called with a pseudo 2084 for our targets and all insns should take them as outputs. */ 2085 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode)); 2086 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode)); 2087 2088 pat = GEN_FCN (icode) (targ0, targ1, xop0); 2089 if (pat) 2090 { 2091 emit_insn (pat); 2092 return 1; 2093 } 2094 else 2095 delete_insns_since (last); 2096 } 2097 2098 /* It can't be done in this mode. Can we do it in a wider mode? */ 2099 2100 if (CLASS_HAS_WIDER_MODES_P (class)) 2101 { 2102 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2103 wider_mode != VOIDmode; 2104 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2105 { 2106 if (unoptab->handlers[(int) wider_mode].insn_code 2107 != CODE_FOR_nothing) 2108 { 2109 rtx t0 = gen_reg_rtx (wider_mode); 2110 rtx t1 = gen_reg_rtx (wider_mode); 2111 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp); 2112 2113 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp)) 2114 { 2115 convert_move (targ0, t0, unsignedp); 2116 convert_move (targ1, t1, unsignedp); 2117 return 1; 2118 } 2119 else 2120 delete_insns_since (last); 2121 } 2122 } 2123 } 2124 2125 delete_insns_since (entry_last); 2126 return 0; 2127} 2128 2129/* Generate code to perform an operation specified by BINOPTAB 2130 on operands OP0 and OP1, with two results to TARG1 and TARG2. 2131 We assume that the order of the operands for the instruction 2132 is TARG0, OP0, OP1, TARG1, which would fit a pattern like 2133 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))]. 2134 2135 Either TARG0 or TARG1 may be zero, but what that means is that 2136 the result is not actually wanted. We will generate it into 2137 a dummy pseudo-reg and discard it. They may not both be zero. 2138 2139 Returns 1 if this operation can be performed; 0 if not. */ 2140 2141int 2142expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1, 2143 int unsignedp) 2144{ 2145 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1); 2146 enum mode_class class; 2147 enum machine_mode wider_mode; 2148 rtx entry_last = get_last_insn (); 2149 rtx last; 2150 2151 class = GET_MODE_CLASS (mode); 2152 2153 /* If we are inside an appropriately-short loop and we are optimizing, 2154 force expensive constants into a register. */ 2155 if (CONSTANT_P (op0) && optimize 2156 && rtx_cost (op0, binoptab->code) > COSTS_N_INSNS (1)) 2157 op0 = force_reg (mode, op0); 2158 2159 if (CONSTANT_P (op1) && optimize 2160 && rtx_cost (op1, binoptab->code) > COSTS_N_INSNS (1)) 2161 op1 = force_reg (mode, op1); 2162 2163 if (!targ0) 2164 targ0 = gen_reg_rtx (mode); 2165 if (!targ1) 2166 targ1 = gen_reg_rtx (mode); 2167 2168 /* Record where to go back to if we fail. */ 2169 last = get_last_insn (); 2170 2171 if (binoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 2172 { 2173 int icode = (int) binoptab->handlers[(int) mode].insn_code; 2174 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 2175 enum machine_mode mode1 = insn_data[icode].operand[2].mode; 2176 rtx pat; 2177 rtx xop0 = op0, xop1 = op1; 2178 2179 /* In case the insn wants input operands in modes different from 2180 those of the actual operands, convert the operands. It would 2181 seem that we don't need to convert CONST_INTs, but we do, so 2182 that they're properly zero-extended, sign-extended or truncated 2183 for their mode. */ 2184 2185 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode) 2186 xop0 = convert_modes (mode0, 2187 GET_MODE (op0) != VOIDmode 2188 ? GET_MODE (op0) 2189 : mode, 2190 xop0, unsignedp); 2191 2192 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode) 2193 xop1 = convert_modes (mode1, 2194 GET_MODE (op1) != VOIDmode 2195 ? GET_MODE (op1) 2196 : mode, 2197 xop1, unsignedp); 2198 2199 /* Now, if insn doesn't accept these operands, put them into pseudos. */ 2200 if (!insn_data[icode].operand[1].predicate (xop0, mode0)) 2201 xop0 = copy_to_mode_reg (mode0, xop0); 2202 2203 if (!insn_data[icode].operand[2].predicate (xop1, mode1)) 2204 xop1 = copy_to_mode_reg (mode1, xop1); 2205 2206 /* We could handle this, but we should always be called with a pseudo 2207 for our targets and all insns should take them as outputs. */ 2208 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode)); 2209 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode)); 2210 2211 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1); 2212 if (pat) 2213 { 2214 emit_insn (pat); 2215 return 1; 2216 } 2217 else 2218 delete_insns_since (last); 2219 } 2220 2221 /* It can't be done in this mode. Can we do it in a wider mode? */ 2222 2223 if (CLASS_HAS_WIDER_MODES_P (class)) 2224 { 2225 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2226 wider_mode != VOIDmode; 2227 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2228 { 2229 if (binoptab->handlers[(int) wider_mode].insn_code 2230 != CODE_FOR_nothing) 2231 { 2232 rtx t0 = gen_reg_rtx (wider_mode); 2233 rtx t1 = gen_reg_rtx (wider_mode); 2234 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp); 2235 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp); 2236 2237 if (expand_twoval_binop (binoptab, cop0, cop1, 2238 t0, t1, unsignedp)) 2239 { 2240 convert_move (targ0, t0, unsignedp); 2241 convert_move (targ1, t1, unsignedp); 2242 return 1; 2243 } 2244 else 2245 delete_insns_since (last); 2246 } 2247 } 2248 } 2249 2250 delete_insns_since (entry_last); 2251 return 0; 2252} 2253 2254/* Expand the two-valued library call indicated by BINOPTAB, but 2255 preserve only one of the values. If TARG0 is non-NULL, the first 2256 value is placed into TARG0; otherwise the second value is placed 2257 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The 2258 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1). 2259 This routine assumes that the value returned by the library call is 2260 as if the return value was of an integral mode twice as wide as the 2261 mode of OP0. Returns 1 if the call was successful. */ 2262 2263bool 2264expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1, 2265 rtx targ0, rtx targ1, enum rtx_code code) 2266{ 2267 enum machine_mode mode; 2268 enum machine_mode libval_mode; 2269 rtx libval; 2270 rtx insns; 2271 2272 /* Exactly one of TARG0 or TARG1 should be non-NULL. */ 2273 gcc_assert (!targ0 != !targ1); 2274 2275 mode = GET_MODE (op0); 2276 if (!binoptab->handlers[(int) mode].libfunc) 2277 return false; 2278 2279 /* The value returned by the library function will have twice as 2280 many bits as the nominal MODE. */ 2281 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode), 2282 MODE_INT); 2283 start_sequence (); 2284 libval = emit_library_call_value (binoptab->handlers[(int) mode].libfunc, 2285 NULL_RTX, LCT_CONST, 2286 libval_mode, 2, 2287 op0, mode, 2288 op1, mode); 2289 /* Get the part of VAL containing the value that we want. */ 2290 libval = simplify_gen_subreg (mode, libval, libval_mode, 2291 targ0 ? 0 : GET_MODE_SIZE (mode)); 2292 insns = get_insns (); 2293 end_sequence (); 2294 /* Move the into the desired location. */ 2295 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval, 2296 gen_rtx_fmt_ee (code, mode, op0, op1)); 2297 2298 return true; 2299} 2300 2301 2302/* Wrapper around expand_unop which takes an rtx code to specify 2303 the operation to perform, not an optab pointer. All other 2304 arguments are the same. */ 2305rtx 2306expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0, 2307 rtx target, int unsignedp) 2308{ 2309 optab unop = code_to_optab[(int) code]; 2310 gcc_assert (unop); 2311 2312 return expand_unop (mode, unop, op0, target, unsignedp); 2313} 2314 2315/* Try calculating 2316 (clz:narrow x) 2317 as 2318 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */ 2319static rtx 2320widen_clz (enum machine_mode mode, rtx op0, rtx target) 2321{ 2322 enum mode_class class = GET_MODE_CLASS (mode); 2323 if (CLASS_HAS_WIDER_MODES_P (class)) 2324 { 2325 enum machine_mode wider_mode; 2326 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2327 wider_mode != VOIDmode; 2328 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2329 { 2330 if (clz_optab->handlers[(int) wider_mode].insn_code 2331 != CODE_FOR_nothing) 2332 { 2333 rtx xop0, temp, last; 2334 2335 last = get_last_insn (); 2336 2337 if (target == 0) 2338 target = gen_reg_rtx (mode); 2339 xop0 = widen_operand (op0, wider_mode, mode, true, false); 2340 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true); 2341 if (temp != 0) 2342 temp = expand_binop (wider_mode, sub_optab, temp, 2343 GEN_INT (GET_MODE_BITSIZE (wider_mode) 2344 - GET_MODE_BITSIZE (mode)), 2345 target, true, OPTAB_DIRECT); 2346 if (temp == 0) 2347 delete_insns_since (last); 2348 2349 return temp; 2350 } 2351 } 2352 } 2353 return 0; 2354} 2355 2356/* Try calculating (parity x) as (and (popcount x) 1), where 2357 popcount can also be done in a wider mode. */ 2358static rtx 2359expand_parity (enum machine_mode mode, rtx op0, rtx target) 2360{ 2361 enum mode_class class = GET_MODE_CLASS (mode); 2362 if (CLASS_HAS_WIDER_MODES_P (class)) 2363 { 2364 enum machine_mode wider_mode; 2365 for (wider_mode = mode; wider_mode != VOIDmode; 2366 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2367 { 2368 if (popcount_optab->handlers[(int) wider_mode].insn_code 2369 != CODE_FOR_nothing) 2370 { 2371 rtx xop0, temp, last; 2372 2373 last = get_last_insn (); 2374 2375 if (target == 0) 2376 target = gen_reg_rtx (mode); 2377 xop0 = widen_operand (op0, wider_mode, mode, true, false); 2378 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX, 2379 true); 2380 if (temp != 0) 2381 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx, 2382 target, true, OPTAB_DIRECT); 2383 if (temp == 0) 2384 delete_insns_since (last); 2385 2386 return temp; 2387 } 2388 } 2389 } 2390 return 0; 2391} 2392 2393/* Extract the OMODE lowpart from VAL, which has IMODE. Under certain 2394 conditions, VAL may already be a SUBREG against which we cannot generate 2395 a further SUBREG. In this case, we expect forcing the value into a 2396 register will work around the situation. */ 2397 2398static rtx 2399lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val, 2400 enum machine_mode imode) 2401{ 2402 rtx ret; 2403 ret = lowpart_subreg (omode, val, imode); 2404 if (ret == NULL) 2405 { 2406 val = force_reg (imode, val); 2407 ret = lowpart_subreg (omode, val, imode); 2408 gcc_assert (ret != NULL); 2409 } 2410 return ret; 2411} 2412 2413/* Expand a floating point absolute value or negation operation via a 2414 logical operation on the sign bit. */ 2415 2416static rtx 2417expand_absneg_bit (enum rtx_code code, enum machine_mode mode, 2418 rtx op0, rtx target) 2419{ 2420 const struct real_format *fmt; 2421 int bitpos, word, nwords, i; 2422 enum machine_mode imode; 2423 HOST_WIDE_INT hi, lo; 2424 rtx temp, insns; 2425 2426 /* The format has to have a simple sign bit. */ 2427 fmt = REAL_MODE_FORMAT (mode); 2428 if (fmt == NULL) 2429 return NULL_RTX; 2430 2431 bitpos = fmt->signbit_rw; 2432 if (bitpos < 0) 2433 return NULL_RTX; 2434 2435 /* Don't create negative zeros if the format doesn't support them. */ 2436 if (code == NEG && !fmt->has_signed_zero) 2437 return NULL_RTX; 2438 2439 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 2440 { 2441 imode = int_mode_for_mode (mode); 2442 if (imode == BLKmode) 2443 return NULL_RTX; 2444 word = 0; 2445 nwords = 1; 2446 } 2447 else 2448 { 2449 imode = word_mode; 2450 2451 if (FLOAT_WORDS_BIG_ENDIAN) 2452 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; 2453 else 2454 word = bitpos / BITS_PER_WORD; 2455 bitpos = bitpos % BITS_PER_WORD; 2456 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; 2457 } 2458 2459 if (bitpos < HOST_BITS_PER_WIDE_INT) 2460 { 2461 hi = 0; 2462 lo = (HOST_WIDE_INT) 1 << bitpos; 2463 } 2464 else 2465 { 2466 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); 2467 lo = 0; 2468 } 2469 if (code == ABS) 2470 lo = ~lo, hi = ~hi; 2471 2472 if (target == 0 || target == op0) 2473 target = gen_reg_rtx (mode); 2474 2475 if (nwords > 1) 2476 { 2477 start_sequence (); 2478 2479 for (i = 0; i < nwords; ++i) 2480 { 2481 rtx targ_piece = operand_subword (target, i, 1, mode); 2482 rtx op0_piece = operand_subword_force (op0, i, mode); 2483 2484 if (i == word) 2485 { 2486 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, 2487 op0_piece, 2488 immed_double_const (lo, hi, imode), 2489 targ_piece, 1, OPTAB_LIB_WIDEN); 2490 if (temp != targ_piece) 2491 emit_move_insn (targ_piece, temp); 2492 } 2493 else 2494 emit_move_insn (targ_piece, op0_piece); 2495 } 2496 2497 insns = get_insns (); 2498 end_sequence (); 2499 2500 temp = gen_rtx_fmt_e (code, mode, copy_rtx (op0)); 2501 emit_no_conflict_block (insns, target, op0, NULL_RTX, temp); 2502 } 2503 else 2504 { 2505 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab, 2506 gen_lowpart (imode, op0), 2507 immed_double_const (lo, hi, imode), 2508 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); 2509 target = lowpart_subreg_maybe_copy (mode, temp, imode); 2510 2511 set_unique_reg_note (get_last_insn (), REG_EQUAL, 2512 gen_rtx_fmt_e (code, mode, copy_rtx (op0))); 2513 } 2514 2515 return target; 2516} 2517 2518/* Generate code to perform an operation specified by UNOPTAB 2519 on operand OP0, with result having machine-mode MODE. 2520 2521 UNSIGNEDP is for the case where we have to widen the operands 2522 to perform the operation. It says to use zero-extension. 2523 2524 If TARGET is nonzero, the value 2525 is generated there, if it is convenient to do so. 2526 In all cases an rtx is returned for the locus of the value; 2527 this may or may not be TARGET. */ 2528 2529rtx 2530expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target, 2531 int unsignedp) 2532{ 2533 enum mode_class class; 2534 enum machine_mode wider_mode; 2535 rtx temp; 2536 rtx last = get_last_insn (); 2537 rtx pat; 2538 2539 class = GET_MODE_CLASS (mode); 2540 2541 if (unoptab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 2542 { 2543 int icode = (int) unoptab->handlers[(int) mode].insn_code; 2544 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 2545 rtx xop0 = op0; 2546 2547 if (target) 2548 temp = target; 2549 else 2550 temp = gen_reg_rtx (mode); 2551 2552 if (GET_MODE (xop0) != VOIDmode 2553 && GET_MODE (xop0) != mode0) 2554 xop0 = convert_to_mode (mode0, xop0, unsignedp); 2555 2556 /* Now, if insn doesn't accept our operand, put it into a pseudo. */ 2557 2558 if (!insn_data[icode].operand[1].predicate (xop0, mode0)) 2559 xop0 = copy_to_mode_reg (mode0, xop0); 2560 2561 if (!insn_data[icode].operand[0].predicate (temp, mode)) 2562 temp = gen_reg_rtx (mode); 2563 2564 pat = GEN_FCN (icode) (temp, xop0); 2565 if (pat) 2566 { 2567 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX 2568 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX)) 2569 { 2570 delete_insns_since (last); 2571 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp); 2572 } 2573 2574 emit_insn (pat); 2575 2576 return temp; 2577 } 2578 else 2579 delete_insns_since (last); 2580 } 2581 2582 /* It can't be done in this mode. Can we open-code it in a wider mode? */ 2583 2584 /* Widening clz needs special treatment. */ 2585 if (unoptab == clz_optab) 2586 { 2587 temp = widen_clz (mode, op0, target); 2588 if (temp) 2589 return temp; 2590 else 2591 goto try_libcall; 2592 } 2593 2594 /* We can't widen a bswap. */ 2595 if (unoptab == bswap_optab) 2596 goto try_libcall; 2597 2598 if (CLASS_HAS_WIDER_MODES_P (class)) 2599 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2600 wider_mode != VOIDmode; 2601 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2602 { 2603 if (unoptab->handlers[(int) wider_mode].insn_code != CODE_FOR_nothing) 2604 { 2605 rtx xop0 = op0; 2606 2607 /* For certain operations, we need not actually extend 2608 the narrow operand, as long as we will truncate the 2609 results to the same narrowness. */ 2610 2611 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, 2612 (unoptab == neg_optab 2613 || unoptab == one_cmpl_optab) 2614 && class == MODE_INT); 2615 2616 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, 2617 unsignedp); 2618 2619 if (temp) 2620 { 2621 if (class != MODE_INT 2622 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode), 2623 GET_MODE_BITSIZE (wider_mode))) 2624 { 2625 if (target == 0) 2626 target = gen_reg_rtx (mode); 2627 convert_move (target, temp, 0); 2628 return target; 2629 } 2630 else 2631 return gen_lowpart (mode, temp); 2632 } 2633 else 2634 delete_insns_since (last); 2635 } 2636 } 2637 2638 /* These can be done a word at a time. */ 2639 if (unoptab == one_cmpl_optab 2640 && class == MODE_INT 2641 && GET_MODE_SIZE (mode) > UNITS_PER_WORD 2642 && unoptab->handlers[(int) word_mode].insn_code != CODE_FOR_nothing) 2643 { 2644 int i; 2645 rtx insns; 2646 2647 if (target == 0 || target == op0) 2648 target = gen_reg_rtx (mode); 2649 2650 start_sequence (); 2651 2652 /* Do the actual arithmetic. */ 2653 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++) 2654 { 2655 rtx target_piece = operand_subword (target, i, 1, mode); 2656 rtx x = expand_unop (word_mode, unoptab, 2657 operand_subword_force (op0, i, mode), 2658 target_piece, unsignedp); 2659 2660 if (target_piece != x) 2661 emit_move_insn (target_piece, x); 2662 } 2663 2664 insns = get_insns (); 2665 end_sequence (); 2666 2667 emit_no_conflict_block (insns, target, op0, NULL_RTX, 2668 gen_rtx_fmt_e (unoptab->code, mode, 2669 copy_rtx (op0))); 2670 return target; 2671 } 2672 2673 if (unoptab->code == NEG) 2674 { 2675 /* Try negating floating point values by flipping the sign bit. */ 2676 if (SCALAR_FLOAT_MODE_P (mode)) 2677 { 2678 temp = expand_absneg_bit (NEG, mode, op0, target); 2679 if (temp) 2680 return temp; 2681 } 2682 2683 /* If there is no negation pattern, and we have no negative zero, 2684 try subtracting from zero. */ 2685 if (!HONOR_SIGNED_ZEROS (mode)) 2686 { 2687 temp = expand_binop (mode, (unoptab == negv_optab 2688 ? subv_optab : sub_optab), 2689 CONST0_RTX (mode), op0, target, 2690 unsignedp, OPTAB_DIRECT); 2691 if (temp) 2692 return temp; 2693 } 2694 } 2695 2696 /* Try calculating parity (x) as popcount (x) % 2. */ 2697 if (unoptab == parity_optab) 2698 { 2699 temp = expand_parity (mode, op0, target); 2700 if (temp) 2701 return temp; 2702 } 2703 2704 try_libcall: 2705 /* Now try a library call in this mode. */ 2706 if (unoptab->handlers[(int) mode].libfunc) 2707 { 2708 rtx insns; 2709 rtx value; 2710 enum machine_mode outmode = mode; 2711 2712 /* All of these functions return small values. Thus we choose to 2713 have them return something that isn't a double-word. */ 2714 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab 2715 || unoptab == popcount_optab || unoptab == parity_optab) 2716 outmode 2717 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node))); 2718 2719 start_sequence (); 2720 2721 /* Pass 1 for NO_QUEUE so we don't lose any increments 2722 if the libcall is cse'd or moved. */ 2723 value = emit_library_call_value (unoptab->handlers[(int) mode].libfunc, 2724 NULL_RTX, LCT_CONST, outmode, 2725 1, op0, mode); 2726 insns = get_insns (); 2727 end_sequence (); 2728 2729 target = gen_reg_rtx (outmode); 2730 emit_libcall_block (insns, target, value, 2731 gen_rtx_fmt_e (unoptab->code, outmode, op0)); 2732 2733 return target; 2734 } 2735 2736 /* It can't be done in this mode. Can we do it in a wider mode? */ 2737 2738 if (CLASS_HAS_WIDER_MODES_P (class)) 2739 { 2740 for (wider_mode = GET_MODE_WIDER_MODE (mode); 2741 wider_mode != VOIDmode; 2742 wider_mode = GET_MODE_WIDER_MODE (wider_mode)) 2743 { 2744 if ((unoptab->handlers[(int) wider_mode].insn_code 2745 != CODE_FOR_nothing) 2746 || unoptab->handlers[(int) wider_mode].libfunc) 2747 { 2748 rtx xop0 = op0; 2749 2750 /* For certain operations, we need not actually extend 2751 the narrow operand, as long as we will truncate the 2752 results to the same narrowness. */ 2753 2754 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, 2755 (unoptab == neg_optab 2756 || unoptab == one_cmpl_optab) 2757 && class == MODE_INT); 2758 2759 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX, 2760 unsignedp); 2761 2762 /* If we are generating clz using wider mode, adjust the 2763 result. */ 2764 if (unoptab == clz_optab && temp != 0) 2765 temp = expand_binop (wider_mode, sub_optab, temp, 2766 GEN_INT (GET_MODE_BITSIZE (wider_mode) 2767 - GET_MODE_BITSIZE (mode)), 2768 target, true, OPTAB_DIRECT); 2769 2770 if (temp) 2771 { 2772 if (class != MODE_INT) 2773 { 2774 if (target == 0) 2775 target = gen_reg_rtx (mode); 2776 convert_move (target, temp, 0); 2777 return target; 2778 } 2779 else 2780 return gen_lowpart (mode, temp); 2781 } 2782 else 2783 delete_insns_since (last); 2784 } 2785 } 2786 } 2787 2788 /* One final attempt at implementing negation via subtraction, 2789 this time allowing widening of the operand. */ 2790 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode)) 2791 { 2792 rtx temp; 2793 temp = expand_binop (mode, 2794 unoptab == negv_optab ? subv_optab : sub_optab, 2795 CONST0_RTX (mode), op0, 2796 target, unsignedp, OPTAB_LIB_WIDEN); 2797 if (temp) 2798 return temp; 2799 } 2800 2801 return 0; 2802} 2803 2804/* Emit code to compute the absolute value of OP0, with result to 2805 TARGET if convenient. (TARGET may be 0.) The return value says 2806 where the result actually is to be found. 2807 2808 MODE is the mode of the operand; the mode of the result is 2809 different but can be deduced from MODE. 2810 2811 */ 2812 2813rtx 2814expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target, 2815 int result_unsignedp) 2816{ 2817 rtx temp; 2818 2819 if (! flag_trapv) 2820 result_unsignedp = 1; 2821 2822 /* First try to do it with a special abs instruction. */ 2823 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab, 2824 op0, target, 0); 2825 if (temp != 0) 2826 return temp; 2827 2828 /* For floating point modes, try clearing the sign bit. */ 2829 if (SCALAR_FLOAT_MODE_P (mode)) 2830 { 2831 temp = expand_absneg_bit (ABS, mode, op0, target); 2832 if (temp) 2833 return temp; 2834 } 2835 2836 /* If we have a MAX insn, we can do this as MAX (x, -x). */ 2837 if (smax_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing 2838 && !HONOR_SIGNED_ZEROS (mode)) 2839 { 2840 rtx last = get_last_insn (); 2841 2842 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0); 2843 if (temp != 0) 2844 temp = expand_binop (mode, smax_optab, op0, temp, target, 0, 2845 OPTAB_WIDEN); 2846 2847 if (temp != 0) 2848 return temp; 2849 2850 delete_insns_since (last); 2851 } 2852 2853 /* If this machine has expensive jumps, we can do integer absolute 2854 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)), 2855 where W is the width of MODE. */ 2856 2857 if (GET_MODE_CLASS (mode) == MODE_INT && BRANCH_COST >= 2) 2858 { 2859 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0, 2860 size_int (GET_MODE_BITSIZE (mode) - 1), 2861 NULL_RTX, 0); 2862 2863 temp = expand_binop (mode, xor_optab, extended, op0, target, 0, 2864 OPTAB_LIB_WIDEN); 2865 if (temp != 0) 2866 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab, 2867 temp, extended, target, 0, OPTAB_LIB_WIDEN); 2868 2869 if (temp != 0) 2870 return temp; 2871 } 2872 2873 return NULL_RTX; 2874} 2875 2876rtx 2877expand_abs (enum machine_mode mode, rtx op0, rtx target, 2878 int result_unsignedp, int safe) 2879{ 2880 rtx temp, op1; 2881 2882 if (! flag_trapv) 2883 result_unsignedp = 1; 2884 2885 temp = expand_abs_nojump (mode, op0, target, result_unsignedp); 2886 if (temp != 0) 2887 return temp; 2888 2889 /* If that does not win, use conditional jump and negate. */ 2890 2891 /* It is safe to use the target if it is the same 2892 as the source if this is also a pseudo register */ 2893 if (op0 == target && REG_P (op0) 2894 && REGNO (op0) >= FIRST_PSEUDO_REGISTER) 2895 safe = 1; 2896 2897 op1 = gen_label_rtx (); 2898 if (target == 0 || ! safe 2899 || GET_MODE (target) != mode 2900 || (MEM_P (target) && MEM_VOLATILE_P (target)) 2901 || (REG_P (target) 2902 && REGNO (target) < FIRST_PSEUDO_REGISTER)) 2903 target = gen_reg_rtx (mode); 2904 2905 emit_move_insn (target, op0); 2906 NO_DEFER_POP; 2907 2908 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode, 2909 NULL_RTX, NULL_RTX, op1); 2910 2911 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab, 2912 target, target, 0); 2913 if (op0 != target) 2914 emit_move_insn (target, op0); 2915 emit_label (op1); 2916 OK_DEFER_POP; 2917 return target; 2918} 2919 2920/* A subroutine of expand_copysign, perform the copysign operation using the 2921 abs and neg primitives advertised to exist on the target. The assumption 2922 is that we have a split register file, and leaving op0 in fp registers, 2923 and not playing with subregs so much, will help the register allocator. */ 2924 2925static rtx 2926expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target, 2927 int bitpos, bool op0_is_abs) 2928{ 2929 enum machine_mode imode; 2930 HOST_WIDE_INT hi, lo; 2931 int word; 2932 rtx label; 2933 2934 if (target == op1) 2935 target = NULL_RTX; 2936 2937 if (!op0_is_abs) 2938 { 2939 op0 = expand_unop (mode, abs_optab, op0, target, 0); 2940 if (op0 == NULL) 2941 return NULL_RTX; 2942 target = op0; 2943 } 2944 else 2945 { 2946 if (target == NULL_RTX) 2947 target = copy_to_reg (op0); 2948 else 2949 emit_move_insn (target, op0); 2950 } 2951 2952 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 2953 { 2954 imode = int_mode_for_mode (mode); 2955 if (imode == BLKmode) 2956 return NULL_RTX; 2957 op1 = gen_lowpart (imode, op1); 2958 } 2959 else 2960 { 2961 imode = word_mode; 2962 if (FLOAT_WORDS_BIG_ENDIAN) 2963 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; 2964 else 2965 word = bitpos / BITS_PER_WORD; 2966 bitpos = bitpos % BITS_PER_WORD; 2967 op1 = operand_subword_force (op1, word, mode); 2968 } 2969 2970 if (bitpos < HOST_BITS_PER_WIDE_INT) 2971 { 2972 hi = 0; 2973 lo = (HOST_WIDE_INT) 1 << bitpos; 2974 } 2975 else 2976 { 2977 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); 2978 lo = 0; 2979 } 2980 2981 op1 = expand_binop (imode, and_optab, op1, 2982 immed_double_const (lo, hi, imode), 2983 NULL_RTX, 1, OPTAB_LIB_WIDEN); 2984 2985 label = gen_label_rtx (); 2986 emit_cmp_and_jump_insns (op1, const0_rtx, EQ, NULL_RTX, imode, 1, label); 2987 2988 if (GET_CODE (op0) == CONST_DOUBLE) 2989 op0 = simplify_unary_operation (NEG, mode, op0, mode); 2990 else 2991 op0 = expand_unop (mode, neg_optab, op0, target, 0); 2992 if (op0 != target) 2993 emit_move_insn (target, op0); 2994 2995 emit_label (label); 2996 2997 return target; 2998} 2999 3000 3001/* A subroutine of expand_copysign, perform the entire copysign operation 3002 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS 3003 is true if op0 is known to have its sign bit clear. */ 3004 3005static rtx 3006expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target, 3007 int bitpos, bool op0_is_abs) 3008{ 3009 enum machine_mode imode; 3010 HOST_WIDE_INT hi, lo; 3011 int word, nwords, i; 3012 rtx temp, insns; 3013 3014 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD) 3015 { 3016 imode = int_mode_for_mode (mode); 3017 if (imode == BLKmode) 3018 return NULL_RTX; 3019 word = 0; 3020 nwords = 1; 3021 } 3022 else 3023 { 3024 imode = word_mode; 3025 3026 if (FLOAT_WORDS_BIG_ENDIAN) 3027 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD; 3028 else 3029 word = bitpos / BITS_PER_WORD; 3030 bitpos = bitpos % BITS_PER_WORD; 3031 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD; 3032 } 3033 3034 if (bitpos < HOST_BITS_PER_WIDE_INT) 3035 { 3036 hi = 0; 3037 lo = (HOST_WIDE_INT) 1 << bitpos; 3038 } 3039 else 3040 { 3041 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT); 3042 lo = 0; 3043 } 3044 3045 if (target == 0 || target == op0 || target == op1) 3046 target = gen_reg_rtx (mode); 3047 3048 if (nwords > 1) 3049 { 3050 start_sequence (); 3051 3052 for (i = 0; i < nwords; ++i) 3053 { 3054 rtx targ_piece = operand_subword (target, i, 1, mode); 3055 rtx op0_piece = operand_subword_force (op0, i, mode); 3056 3057 if (i == word) 3058 { 3059 if (!op0_is_abs) 3060 op0_piece = expand_binop (imode, and_optab, op0_piece, 3061 immed_double_const (~lo, ~hi, imode), 3062 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3063 3064 op1 = expand_binop (imode, and_optab, 3065 operand_subword_force (op1, i, mode), 3066 immed_double_const (lo, hi, imode), 3067 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3068 3069 temp = expand_binop (imode, ior_optab, op0_piece, op1, 3070 targ_piece, 1, OPTAB_LIB_WIDEN); 3071 if (temp != targ_piece) 3072 emit_move_insn (targ_piece, temp); 3073 } 3074 else 3075 emit_move_insn (targ_piece, op0_piece); 3076 } 3077 3078 insns = get_insns (); 3079 end_sequence (); 3080 3081 emit_no_conflict_block (insns, target, op0, op1, NULL_RTX); 3082 } 3083 else 3084 { 3085 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1), 3086 immed_double_const (lo, hi, imode), 3087 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3088 3089 op0 = gen_lowpart (imode, op0); 3090 if (!op0_is_abs) 3091 op0 = expand_binop (imode, and_optab, op0, 3092 immed_double_const (~lo, ~hi, imode), 3093 NULL_RTX, 1, OPTAB_LIB_WIDEN); 3094 3095 temp = expand_binop (imode, ior_optab, op0, op1, 3096 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN); 3097 target = lowpart_subreg_maybe_copy (mode, temp, imode); 3098 } 3099 3100 return target; 3101} 3102 3103/* Expand the C99 copysign operation. OP0 and OP1 must be the same 3104 scalar floating point mode. Return NULL if we do not know how to 3105 expand the operation inline. */ 3106 3107rtx 3108expand_copysign (rtx op0, rtx op1, rtx target) 3109{ 3110 enum machine_mode mode = GET_MODE (op0); 3111 const struct real_format *fmt; 3112 bool op0_is_abs; 3113 rtx temp; 3114 3115 gcc_assert (SCALAR_FLOAT_MODE_P (mode)); 3116 gcc_assert (GET_MODE (op1) == mode); 3117 3118 /* First try to do it with a special instruction. */ 3119 temp = expand_binop (mode, copysign_optab, op0, op1, 3120 target, 0, OPTAB_DIRECT); 3121 if (temp) 3122 return temp; 3123 3124 fmt = REAL_MODE_FORMAT (mode); 3125 if (fmt == NULL || !fmt->has_signed_zero) 3126 return NULL_RTX; 3127 3128 op0_is_abs = false; 3129 if (GET_CODE (op0) == CONST_DOUBLE) 3130 { 3131 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0))) 3132 op0 = simplify_unary_operation (ABS, mode, op0, mode); 3133 op0_is_abs = true; 3134 } 3135 3136 if (fmt->signbit_ro >= 0 3137 && (GET_CODE (op0) == CONST_DOUBLE 3138 || (neg_optab->handlers[mode].insn_code != CODE_FOR_nothing 3139 && abs_optab->handlers[mode].insn_code != CODE_FOR_nothing))) 3140 { 3141 temp = expand_copysign_absneg (mode, op0, op1, target, 3142 fmt->signbit_ro, op0_is_abs); 3143 if (temp) 3144 return temp; 3145 } 3146 3147 if (fmt->signbit_rw < 0) 3148 return NULL_RTX; 3149 return expand_copysign_bit (mode, op0, op1, target, 3150 fmt->signbit_rw, op0_is_abs); 3151} 3152 3153/* Generate an instruction whose insn-code is INSN_CODE, 3154 with two operands: an output TARGET and an input OP0. 3155 TARGET *must* be nonzero, and the output is always stored there. 3156 CODE is an rtx code such that (CODE OP0) is an rtx that describes 3157 the value that is stored into TARGET. */ 3158 3159void 3160emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code) 3161{ 3162 rtx temp; 3163 enum machine_mode mode0 = insn_data[icode].operand[1].mode; 3164 rtx pat; 3165 3166 temp = target; 3167 3168 /* Now, if insn does not accept our operands, put them into pseudos. */ 3169 3170 if (!insn_data[icode].operand[1].predicate (op0, mode0)) 3171 op0 = copy_to_mode_reg (mode0, op0); 3172 3173 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp))) 3174 temp = gen_reg_rtx (GET_MODE (temp)); 3175 3176 pat = GEN_FCN (icode) (temp, op0); 3177 3178 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN) 3179 add_equal_note (pat, temp, code, op0, NULL_RTX); 3180 3181 emit_insn (pat); 3182 3183 if (temp != target) 3184 emit_move_insn (target, temp); 3185} 3186 3187struct no_conflict_data 3188{ 3189 rtx target, first, insn; 3190 bool must_stay; 3191}; 3192 3193/* Called via note_stores by emit_no_conflict_block and emit_libcall_block. 3194 Set P->must_stay if the currently examined clobber / store has to stay 3195 in the list of insns that constitute the actual no_conflict block / 3196 libcall block. */ 3197static void 3198no_conflict_move_test (rtx dest, rtx set, void *p0) 3199{ 3200 struct no_conflict_data *p= p0; 3201 3202 /* If this inns directly contributes to setting the target, it must stay. */ 3203 if (reg_overlap_mentioned_p (p->target, dest)) 3204 p->must_stay = true; 3205 /* If we haven't committed to keeping any other insns in the list yet, 3206 there is nothing more to check. */ 3207 else if (p->insn == p->first) 3208 return; 3209 /* If this insn sets / clobbers a register that feeds one of the insns 3210 already in the list, this insn has to stay too. */ 3211 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first)) 3212 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest))) 3213 || reg_used_between_p (dest, p->first, p->insn) 3214 /* Likewise if this insn depends on a register set by a previous 3215 insn in the list, or if it sets a result (presumably a hard 3216 register) that is set or clobbered by a previous insn. 3217 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM 3218 SET_DEST perform the former check on the address, and the latter 3219 check on the MEM. */ 3220 || (GET_CODE (set) == SET 3221 && (modified_in_p (SET_SRC (set), p->first) 3222 || modified_in_p (SET_DEST (set), p->first) 3223 || modified_between_p (SET_SRC (set), p->first, p->insn) 3224 || modified_between_p (SET_DEST (set), p->first, p->insn)))) 3225 p->must_stay = true; 3226} 3227 3228/* Encapsulate the block starting at FIRST and ending with LAST, which is 3229 logically equivalent to EQUIV, so it gets manipulated as a unit if it 3230 is possible to do so. */ 3231 3232static void 3233maybe_encapsulate_block (rtx first, rtx last, rtx equiv) 3234{ 3235 if (!flag_non_call_exceptions || !may_trap_p (equiv)) 3236 { 3237 /* We can't attach the REG_LIBCALL and REG_RETVAL notes when the 3238 encapsulated region would not be in one basic block, i.e. when 3239 there is a control_flow_insn_p insn between FIRST and LAST. */ 3240 bool attach_libcall_retval_notes = true; 3241 rtx insn, next = NEXT_INSN (last); 3242 3243 for (insn = first; insn != next; insn = NEXT_INSN (insn)) 3244 if (control_flow_insn_p (insn)) 3245 { 3246 attach_libcall_retval_notes = false; 3247 break; 3248 } 3249 3250 if (attach_libcall_retval_notes) 3251 { 3252 REG_NOTES (first) = gen_rtx_INSN_LIST (REG_LIBCALL, last, 3253 REG_NOTES (first)); 3254 REG_NOTES (last) = gen_rtx_INSN_LIST (REG_RETVAL, first, 3255 REG_NOTES (last)); 3256 } 3257 } 3258} 3259 3260/* Emit code to perform a series of operations on a multi-word quantity, one 3261 word at a time. 3262 3263 Such a block is preceded by a CLOBBER of the output, consists of multiple 3264 insns, each setting one word of the output, and followed by a SET copying 3265 the output to itself. 3266 3267 Each of the insns setting words of the output receives a REG_NO_CONFLICT 3268 note indicating that it doesn't conflict with the (also multi-word) 3269 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL 3270 notes. 3271 3272 INSNS is a block of code generated to perform the operation, not including 3273 the CLOBBER and final copy. All insns that compute intermediate values 3274 are first emitted, followed by the block as described above. 3275 3276 TARGET, OP0, and OP1 are the output and inputs of the operations, 3277 respectively. OP1 may be zero for a unary operation. 3278 3279 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note 3280 on the last insn. 3281 3282 If TARGET is not a register, INSNS is simply emitted with no special 3283 processing. Likewise if anything in INSNS is not an INSN or if 3284 there is a libcall block inside INSNS. 3285 3286 The final insn emitted is returned. */ 3287 3288rtx 3289emit_no_conflict_block (rtx insns, rtx target, rtx op0, rtx op1, rtx equiv) 3290{ 3291 rtx prev, next, first, last, insn; 3292 3293 if (!REG_P (target) || reload_in_progress) 3294 return emit_insn (insns); 3295 else 3296 for (insn = insns; insn; insn = NEXT_INSN (insn)) 3297 if (!NONJUMP_INSN_P (insn) 3298 || find_reg_note (insn, REG_LIBCALL, NULL_RTX)) 3299 return emit_insn (insns); 3300 3301 /* First emit all insns that do not store into words of the output and remove 3302 these from the list. */ 3303 for (insn = insns; insn; insn = next) 3304 { 3305 rtx note; 3306 struct no_conflict_data data; 3307 3308 next = NEXT_INSN (insn); 3309 3310 /* Some ports (cris) create a libcall regions at their own. We must 3311 avoid any potential nesting of LIBCALLs. */ 3312 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) 3313 remove_note (insn, note); 3314 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) 3315 remove_note (insn, note); 3316 3317 data.target = target; 3318 data.first = insns; 3319 data.insn = insn; 3320 data.must_stay = 0; 3321 note_stores (PATTERN (insn), no_conflict_move_test, &data); 3322 if (! data.must_stay) 3323 { 3324 if (PREV_INSN (insn)) 3325 NEXT_INSN (PREV_INSN (insn)) = next; 3326 else 3327 insns = next; 3328 3329 if (next) 3330 PREV_INSN (next) = PREV_INSN (insn); 3331 3332 add_insn (insn); 3333 } 3334 } 3335 3336 prev = get_last_insn (); 3337 3338 /* Now write the CLOBBER of the output, followed by the setting of each 3339 of the words, followed by the final copy. */ 3340 if (target != op0 && target != op1) 3341 emit_insn (gen_rtx_CLOBBER (VOIDmode, target)); 3342 3343 for (insn = insns; insn; insn = next) 3344 { 3345 next = NEXT_INSN (insn); 3346 add_insn (insn); 3347 3348 if (op1 && REG_P (op1)) 3349 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op1, 3350 REG_NOTES (insn)); 3351 3352 if (op0 && REG_P (op0)) 3353 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT, op0, 3354 REG_NOTES (insn)); 3355 } 3356 3357 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code 3358 != CODE_FOR_nothing) 3359 { 3360 last = emit_move_insn (target, target); 3361 if (equiv) 3362 set_unique_reg_note (last, REG_EQUAL, equiv); 3363 } 3364 else 3365 { 3366 last = get_last_insn (); 3367 3368 /* Remove any existing REG_EQUAL note from "last", or else it will 3369 be mistaken for a note referring to the full contents of the 3370 alleged libcall value when found together with the REG_RETVAL 3371 note added below. An existing note can come from an insn 3372 expansion at "last". */ 3373 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX)); 3374 } 3375 3376 if (prev == 0) 3377 first = get_insns (); 3378 else 3379 first = NEXT_INSN (prev); 3380 3381 maybe_encapsulate_block (first, last, equiv); 3382 3383 return last; 3384} 3385 3386/* Emit code to make a call to a constant function or a library call. 3387 3388 INSNS is a list containing all insns emitted in the call. 3389 These insns leave the result in RESULT. Our block is to copy RESULT 3390 to TARGET, which is logically equivalent to EQUIV. 3391 3392 We first emit any insns that set a pseudo on the assumption that these are 3393 loading constants into registers; doing so allows them to be safely cse'ed 3394 between blocks. Then we emit all the other insns in the block, followed by 3395 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL 3396 note with an operand of EQUIV. 3397 3398 Moving assignments to pseudos outside of the block is done to improve 3399 the generated code, but is not required to generate correct code, 3400 hence being unable to move an assignment is not grounds for not making 3401 a libcall block. There are two reasons why it is safe to leave these 3402 insns inside the block: First, we know that these pseudos cannot be 3403 used in generated RTL outside the block since they are created for 3404 temporary purposes within the block. Second, CSE will not record the 3405 values of anything set inside a libcall block, so we know they must 3406 be dead at the end of the block. 3407 3408 Except for the first group of insns (the ones setting pseudos), the 3409 block is delimited by REG_RETVAL and REG_LIBCALL notes. */ 3410 3411void 3412emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv) 3413{ 3414 rtx final_dest = target; 3415 rtx prev, next, first, last, insn; 3416 3417 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn 3418 into a MEM later. Protect the libcall block from this change. */ 3419 if (! REG_P (target) || REG_USERVAR_P (target)) 3420 target = gen_reg_rtx (GET_MODE (target)); 3421 3422 /* If we're using non-call exceptions, a libcall corresponding to an 3423 operation that may trap may also trap. */ 3424 if (flag_non_call_exceptions && may_trap_p (equiv)) 3425 { 3426 for (insn = insns; insn; insn = NEXT_INSN (insn)) 3427 if (CALL_P (insn)) 3428 { 3429 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 3430 3431 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0) 3432 remove_note (insn, note); 3433 } 3434 } 3435 else 3436 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION 3437 reg note to indicate that this call cannot throw or execute a nonlocal 3438 goto (unless there is already a REG_EH_REGION note, in which case 3439 we update it). */ 3440 for (insn = insns; insn; insn = NEXT_INSN (insn)) 3441 if (CALL_P (insn)) 3442 { 3443 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); 3444 3445 if (note != 0) 3446 XEXP (note, 0) = constm1_rtx; 3447 else 3448 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, 3449 REG_NOTES (insn)); 3450 } 3451 3452 /* First emit all insns that set pseudos. Remove them from the list as 3453 we go. Avoid insns that set pseudos which were referenced in previous 3454 insns. These can be generated by move_by_pieces, for example, 3455 to update an address. Similarly, avoid insns that reference things 3456 set in previous insns. */ 3457 3458 for (insn = insns; insn; insn = next) 3459 { 3460 rtx set = single_set (insn); 3461 rtx note; 3462 3463 /* Some ports (cris) create a libcall regions at their own. We must 3464 avoid any potential nesting of LIBCALLs. */ 3465 if ((note = find_reg_note (insn, REG_LIBCALL, NULL)) != NULL) 3466 remove_note (insn, note); 3467 if ((note = find_reg_note (insn, REG_RETVAL, NULL)) != NULL) 3468 remove_note (insn, note); 3469 3470 next = NEXT_INSN (insn); 3471 3472 if (set != 0 && REG_P (SET_DEST (set)) 3473 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER) 3474 { 3475 struct no_conflict_data data; 3476 3477 data.target = const0_rtx; 3478 data.first = insns; 3479 data.insn = insn; 3480 data.must_stay = 0; 3481 note_stores (PATTERN (insn), no_conflict_move_test, &data); 3482 if (! data.must_stay) 3483 { 3484 if (PREV_INSN (insn)) 3485 NEXT_INSN (PREV_INSN (insn)) = next; 3486 else 3487 insns = next; 3488 3489 if (next) 3490 PREV_INSN (next) = PREV_INSN (insn); 3491 3492 add_insn (insn); 3493 } 3494 } 3495 3496 /* Some ports use a loop to copy large arguments onto the stack. 3497 Don't move anything outside such a loop. */ 3498 if (LABEL_P (insn)) 3499 break; 3500 } 3501 3502 prev = get_last_insn (); 3503 3504 /* Write the remaining insns followed by the final copy. */ 3505 3506 for (insn = insns; insn; insn = next) 3507 { 3508 next = NEXT_INSN (insn); 3509 3510 add_insn (insn); 3511 } 3512 3513 last = emit_move_insn (target, result); 3514 if (mov_optab->handlers[(int) GET_MODE (target)].insn_code 3515 != CODE_FOR_nothing) 3516 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv)); 3517 else 3518 { 3519 /* Remove any existing REG_EQUAL note from "last", or else it will 3520 be mistaken for a note referring to the full contents of the 3521 libcall value when found together with the REG_RETVAL note added 3522 below. An existing note can come from an insn expansion at 3523 "last". */ 3524 remove_note (last, find_reg_note (last, REG_EQUAL, NULL_RTX)); 3525 } 3526 3527 if (final_dest != target) 3528 emit_move_insn (final_dest, target); 3529 3530 if (prev == 0) 3531 first = get_insns (); 3532 else 3533 first = NEXT_INSN (prev); 3534 3535 maybe_encapsulate_block (first, last, equiv); 3536} 3537 3538/* Nonzero if we can perform a comparison of mode MODE straightforwardly. 3539 PURPOSE describes how this comparison will be used. CODE is the rtx 3540 comparison code we will be using. 3541 3542 ??? Actually, CODE is slightly weaker than that. A target is still 3543 required to implement all of the normal bcc operations, but not 3544 required to implement all (or any) of the unordered bcc operations. */ 3545 3546int 3547can_compare_p (enum rtx_code code, enum machine_mode mode, 3548 enum can_compare_purpose purpose) 3549{ 3550 do 3551 { 3552 if (cmp_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 3553 { 3554 if (purpose == ccp_jump) 3555 return bcc_gen_fctn[(int) code] != NULL; 3556 else if (purpose == ccp_store_flag) 3557 return setcc_gen_code[(int) code] != CODE_FOR_nothing; 3558 else 3559 /* There's only one cmov entry point, and it's allowed to fail. */ 3560 return 1; 3561 } 3562 if (purpose == ccp_jump 3563 && cbranch_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 3564 return 1; 3565 if (purpose == ccp_cmov 3566 && cmov_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 3567 return 1; 3568 if (purpose == ccp_store_flag 3569 && cstore_optab->handlers[(int) mode].insn_code != CODE_FOR_nothing) 3570 return 1; 3571 mode = GET_MODE_WIDER_MODE (mode); 3572 } 3573 while (mode != VOIDmode); 3574 3575 return 0; 3576} 3577 3578/* This function is called when we are going to emit a compare instruction that 3579 compares the values found in *PX and *PY, using the rtl operator COMPARISON. 3580 3581 *PMODE is the mode of the inputs (in case they are const_int). 3582 *PUNSIGNEDP nonzero says that the operands are unsigned; 3583 this matters if they need to be widened. 3584 3585 If they have mode BLKmode, then SIZE specifies the size of both operands. 3586 3587 This function performs all the setup necessary so that the caller only has 3588 to emit a single comparison insn. This setup can involve doing a BLKmode 3589 comparison or emitting a library call to perform the comparison if no insn 3590 is available to handle it. 3591 The values which are passed in through pointers can be modified; the caller 3592 should perform the comparison on the modified values. Constant 3593 comparisons must have already been folded. */ 3594 3595static void 3596prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size, 3597 enum machine_mode *pmode, int *punsignedp, 3598 enum can_compare_purpose purpose) 3599{ 3600 enum machine_mode mode = *pmode; 3601 rtx x = *px, y = *py; 3602 int unsignedp = *punsignedp; 3603 3604 /* If we are inside an appropriately-short loop and we are optimizing, 3605 force expensive constants into a register. */ 3606 if (CONSTANT_P (x) && optimize 3607 && rtx_cost (x, COMPARE) > COSTS_N_INSNS (1)) 3608 x = force_reg (mode, x); 3609 3610 if (CONSTANT_P (y) && optimize 3611 && rtx_cost (y, COMPARE) > COSTS_N_INSNS (1)) 3612 y = force_reg (mode, y); 3613 3614#ifdef HAVE_cc0 3615 /* Make sure if we have a canonical comparison. The RTL 3616 documentation states that canonical comparisons are required only 3617 for targets which have cc0. */ 3618 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y)); 3619#endif 3620 3621 /* Don't let both operands fail to indicate the mode. */ 3622 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode) 3623 x = force_reg (mode, x); 3624 3625 /* Handle all BLKmode compares. */ 3626 3627 if (mode == BLKmode) 3628 { 3629 enum machine_mode cmp_mode, result_mode; 3630 enum insn_code cmp_code; 3631 tree length_type; 3632 rtx libfunc; 3633 rtx result; 3634 rtx opalign 3635 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT); 3636 3637 gcc_assert (size); 3638 3639 /* Try to use a memory block compare insn - either cmpstr 3640 or cmpmem will do. */ 3641 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); 3642 cmp_mode != VOIDmode; 3643 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode)) 3644 { 3645 cmp_code = cmpmem_optab[cmp_mode]; 3646 if (cmp_code == CODE_FOR_nothing) 3647 cmp_code = cmpstr_optab[cmp_mode]; 3648 if (cmp_code == CODE_FOR_nothing) 3649 cmp_code = cmpstrn_optab[cmp_mode]; 3650 if (cmp_code == CODE_FOR_nothing) 3651 continue; 3652 3653 /* Must make sure the size fits the insn's mode. */ 3654 if ((GET_CODE (size) == CONST_INT 3655 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))) 3656 || (GET_MODE_BITSIZE (GET_MODE (size)) 3657 > GET_MODE_BITSIZE (cmp_mode))) 3658 continue; 3659 3660 result_mode = insn_data[cmp_code].operand[0].mode; 3661 result = gen_reg_rtx (result_mode); 3662 size = convert_to_mode (cmp_mode, size, 1); 3663 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign)); 3664 3665 *px = result; 3666 *py = const0_rtx; 3667 *pmode = result_mode; 3668 return; 3669 } 3670 3671 /* Otherwise call a library function, memcmp. */ 3672 libfunc = memcmp_libfunc; 3673 length_type = sizetype; 3674 result_mode = TYPE_MODE (integer_type_node); 3675 cmp_mode = TYPE_MODE (length_type); 3676 size = convert_to_mode (TYPE_MODE (length_type), size, 3677 TYPE_UNSIGNED (length_type)); 3678 3679 result = emit_library_call_value (libfunc, 0, LCT_PURE_MAKE_BLOCK, 3680 result_mode, 3, 3681 XEXP (x, 0), Pmode, 3682 XEXP (y, 0), Pmode, 3683 size, cmp_mode); 3684 *px = result; 3685 *py = const0_rtx; 3686 *pmode = result_mode; 3687 return; 3688 } 3689 3690 /* Don't allow operands to the compare to trap, as that can put the 3691 compare and branch in different basic blocks. */ 3692 if (flag_non_call_exceptions) 3693 { 3694 if (may_trap_p (x)) 3695 x = force_reg (mode, x); 3696 if (may_trap_p (y)) 3697 y = force_reg (mode, y); 3698 } 3699 3700 *px = x; 3701 *py = y; 3702 if (can_compare_p (*pcomparison, mode, purpose)) 3703 return; 3704 3705 /* Handle a lib call just for the mode we are using. */ 3706 3707 if (cmp_optab->handlers[(int) mode].libfunc && !SCALAR_FLOAT_MODE_P (mode)) 3708 { 3709 rtx libfunc = cmp_optab->handlers[(int) mode].libfunc; 3710 rtx result; 3711 3712 /* If we want unsigned, and this mode has a distinct unsigned 3713 comparison routine, use that. */ 3714 if (unsignedp && ucmp_optab->handlers[(int) mode].libfunc) 3715 libfunc = ucmp_optab->handlers[(int) mode].libfunc; 3716 3717 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST_MAKE_BLOCK, 3718 word_mode, 2, x, mode, y, mode); 3719 3720 /* There are two kinds of comparison routines. Biased routines 3721 return 0/1/2, and unbiased routines return -1/0/1. Other parts 3722 of gcc expect that the comparison operation is equivalent 3723 to the modified comparison. For signed comparisons compare the 3724 result against 1 in the biased case, and zero in the unbiased 3725 case. For unsigned comparisons always compare against 1 after 3726 biasing the unbiased result by adding 1. This gives us a way to 3727 represent LTU. */ 3728 *px = result; 3729 *pmode = word_mode; 3730 *py = const1_rtx; 3731 3732 if (!TARGET_LIB_INT_CMP_BIASED) 3733 { 3734 if (*punsignedp) 3735 *px = plus_constant (result, 1); 3736 else 3737 *py = const0_rtx; 3738 } 3739 return; 3740 } 3741 3742 gcc_assert (SCALAR_FLOAT_MODE_P (mode)); 3743 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp); 3744} 3745 3746/* Before emitting an insn with code ICODE, make sure that X, which is going 3747 to be used for operand OPNUM of the insn, is converted from mode MODE to 3748 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and 3749 that it is accepted by the operand predicate. Return the new value. */ 3750 3751static rtx 3752prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode, 3753 enum machine_mode wider_mode, int unsignedp) 3754{ 3755 if (mode != wider_mode) 3756 x = convert_modes (wider_mode, mode, x, unsignedp); 3757 3758 if (!insn_data[icode].operand[opnum].predicate 3759 (x, insn_data[icode].operand[opnum].mode)) 3760 { 3761 if (no_new_pseudos) 3762 return NULL_RTX; 3763 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x); 3764 } 3765 3766 return x; 3767} 3768 3769/* Subroutine of emit_cmp_and_jump_insns; this function is called when we know 3770 we can do the comparison. 3771 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may 3772 be NULL_RTX which indicates that only a comparison is to be generated. */ 3773 3774static void 3775emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode, 3776 enum rtx_code comparison, int unsignedp, rtx label) 3777{ 3778 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y); 3779 enum mode_class class = GET_MODE_CLASS (mode); 3780 enum machine_mode wider_mode = mode; 3781 3782 /* Try combined insns first. */ 3783 do 3784 { 3785 enum insn_code icode; 3786 PUT_MODE (test, wider_mode); 3787 3788 if (label) 3789 { 3790 icode = cbranch_optab->handlers[(int) wider_mode].insn_code; 3791 3792 if (icode != CODE_FOR_nothing 3793 && insn_data[icode].operand[0].predicate (test, wider_mode)) 3794 { 3795 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp); 3796 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp); 3797 emit_jump_insn (GEN_FCN (icode) (test, x, y, label)); 3798 return; 3799 } 3800 } 3801 3802 /* Handle some compares against zero. */ 3803 icode = (int) tst_optab->handlers[(int) wider_mode].insn_code; 3804 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing) 3805 { 3806 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp); 3807 emit_insn (GEN_FCN (icode) (x)); 3808 if (label) 3809 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label)); 3810 return; 3811 } 3812 3813 /* Handle compares for which there is a directly suitable insn. */ 3814 3815 icode = (int) cmp_optab->handlers[(int) wider_mode].insn_code; 3816 if (icode != CODE_FOR_nothing) 3817 { 3818 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp); 3819 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp); 3820 emit_insn (GEN_FCN (icode) (x, y)); 3821 if (label) 3822 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label)); 3823 return; 3824 } 3825 3826 if (!CLASS_HAS_WIDER_MODES_P (class)) 3827 break; 3828 3829 wider_mode = GET_MODE_WIDER_MODE (wider_mode); 3830 } 3831 while (wider_mode != VOIDmode); 3832 3833 gcc_unreachable (); 3834} 3835 3836/* Generate code to compare X with Y so that the condition codes are 3837 set and to jump to LABEL if the condition is true. If X is a 3838 constant and Y is not a constant, then the comparison is swapped to 3839 ensure that the comparison RTL has the canonical form. 3840 3841 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they 3842 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select 3843 the proper branch condition code. 3844 3845 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y. 3846 3847 MODE is the mode of the inputs (in case they are const_int). 3848 3849 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will 3850 be passed unchanged to emit_cmp_insn, then potentially converted into an 3851 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */ 3852 3853void 3854emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size, 3855 enum machine_mode mode, int unsignedp, rtx label) 3856{ 3857 rtx op0 = x, op1 = y; 3858 3859 /* Swap operands and condition to ensure canonical RTL. */ 3860 if (swap_commutative_operands_p (x, y)) 3861 { 3862 /* If we're not emitting a branch, this means some caller 3863 is out of sync. */ 3864 gcc_assert (label); 3865 3866 op0 = y, op1 = x; 3867 comparison = swap_condition (comparison); 3868 } 3869 3870#ifdef HAVE_cc0 3871 /* If OP0 is still a constant, then both X and Y must be constants. 3872 Force X into a register to create canonical RTL. */ 3873 if (CONSTANT_P (op0)) 3874 op0 = force_reg (mode, op0); 3875#endif 3876 3877 if (unsignedp) 3878 comparison = unsigned_condition (comparison); 3879 3880 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp, 3881 ccp_jump); 3882 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label); 3883} 3884 3885/* Like emit_cmp_and_jump_insns, but generate only the comparison. */ 3886 3887void 3888emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size, 3889 enum machine_mode mode, int unsignedp) 3890{ 3891 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0); 3892} 3893 3894/* Emit a library call comparison between floating point X and Y. 3895 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */ 3896 3897static void 3898prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison, 3899 enum machine_mode *pmode, int *punsignedp) 3900{ 3901 enum rtx_code comparison = *pcomparison; 3902 enum rtx_code swapped = swap_condition (comparison); 3903 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison); 3904 rtx x = *px; 3905 rtx y = *py; 3906 enum machine_mode orig_mode = GET_MODE (x); 3907 enum machine_mode mode; 3908 rtx value, target, insns, equiv; 3909 rtx libfunc = 0; 3910 bool reversed_p = false; 3911 3912 for (mode = orig_mode; 3913 mode != VOIDmode; 3914 mode = GET_MODE_WIDER_MODE (mode)) 3915 { 3916 if ((libfunc = code_to_optab[comparison]->handlers[mode].libfunc)) 3917 break; 3918 3919 if ((libfunc = code_to_optab[swapped]->handlers[mode].libfunc)) 3920 { 3921 rtx tmp; 3922 tmp = x; x = y; y = tmp; 3923 comparison = swapped; 3924 break; 3925 } 3926 3927 if ((libfunc = code_to_optab[reversed]->handlers[mode].libfunc) 3928 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed)) 3929 { 3930 comparison = reversed; 3931 reversed_p = true; 3932 break; 3933 } 3934 } 3935 3936 gcc_assert (mode != VOIDmode); 3937 3938 if (mode != orig_mode) 3939 { 3940 x = convert_to_mode (mode, x, 0); 3941 y = convert_to_mode (mode, y, 0); 3942 } 3943 3944 /* Attach a REG_EQUAL note describing the semantics of the libcall to 3945 the RTL. The allows the RTL optimizers to delete the libcall if the 3946 condition can be determined at compile-time. */ 3947 if (comparison == UNORDERED) 3948 { 3949 rtx temp = simplify_gen_relational (NE, word_mode, mode, x, x); 3950 equiv = simplify_gen_relational (NE, word_mode, mode, y, y); 3951 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode, 3952 temp, const_true_rtx, equiv); 3953 } 3954 else 3955 { 3956 equiv = simplify_gen_relational (comparison, word_mode, mode, x, y); 3957 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) 3958 { 3959 rtx true_rtx, false_rtx; 3960 3961 switch (comparison) 3962 { 3963 case EQ: 3964 true_rtx = const0_rtx; 3965 false_rtx = const_true_rtx; 3966 break; 3967 3968 case NE: 3969 true_rtx = const_true_rtx; 3970 false_rtx = const0_rtx; 3971 break; 3972 3973 case GT: 3974 true_rtx = const1_rtx; 3975 false_rtx = const0_rtx; 3976 break; 3977 3978 case GE: 3979 true_rtx = const0_rtx; 3980 false_rtx = constm1_rtx; 3981 break; 3982 3983 case LT: 3984 true_rtx = constm1_rtx; 3985 false_rtx = const0_rtx; 3986 break; 3987 3988 case LE: 3989 true_rtx = const0_rtx; 3990 false_rtx = const1_rtx; 3991 break; 3992 3993 default: 3994 gcc_unreachable (); 3995 } 3996 equiv = simplify_gen_ternary (IF_THEN_ELSE, word_mode, word_mode, 3997 equiv, true_rtx, false_rtx); 3998 } 3999 } 4000 4001 start_sequence (); 4002 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 4003 word_mode, 2, x, mode, y, mode); 4004 insns = get_insns (); 4005 end_sequence (); 4006 4007 target = gen_reg_rtx (word_mode); 4008 emit_libcall_block (insns, target, value, equiv); 4009 4010 if (comparison == UNORDERED 4011 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison)) 4012 comparison = reversed_p ? EQ : NE; 4013 4014 *px = target; 4015 *py = const0_rtx; 4016 *pmode = word_mode; 4017 *pcomparison = comparison; 4018 *punsignedp = 0; 4019} 4020 4021/* Generate code to indirectly jump to a location given in the rtx LOC. */ 4022 4023void 4024emit_indirect_jump (rtx loc) 4025{ 4026 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate 4027 (loc, Pmode)) 4028 loc = copy_to_mode_reg (Pmode, loc); 4029 4030 emit_jump_insn (gen_indirect_jump (loc)); 4031 emit_barrier (); 4032} 4033 4034#ifdef HAVE_conditional_move 4035 4036/* Emit a conditional move instruction if the machine supports one for that 4037 condition and machine mode. 4038 4039 OP0 and OP1 are the operands that should be compared using CODE. CMODE is 4040 the mode to use should they be constants. If it is VOIDmode, they cannot 4041 both be constants. 4042 4043 OP2 should be stored in TARGET if the comparison is true, otherwise OP3 4044 should be stored there. MODE is the mode to use should they be constants. 4045 If it is VOIDmode, they cannot both be constants. 4046 4047 The result is either TARGET (perhaps modified) or NULL_RTX if the operation 4048 is not supported. */ 4049 4050rtx 4051emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1, 4052 enum machine_mode cmode, rtx op2, rtx op3, 4053 enum machine_mode mode, int unsignedp) 4054{ 4055 rtx tem, subtarget, comparison, insn; 4056 enum insn_code icode; 4057 enum rtx_code reversed; 4058 4059 /* If one operand is constant, make it the second one. Only do this 4060 if the other operand is not constant as well. */ 4061 4062 if (swap_commutative_operands_p (op0, op1)) 4063 { 4064 tem = op0; 4065 op0 = op1; 4066 op1 = tem; 4067 code = swap_condition (code); 4068 } 4069 4070 /* get_condition will prefer to generate LT and GT even if the old 4071 comparison was against zero, so undo that canonicalization here since 4072 comparisons against zero are cheaper. */ 4073 if (code == LT && op1 == const1_rtx) 4074 code = LE, op1 = const0_rtx; 4075 else if (code == GT && op1 == constm1_rtx) 4076 code = GE, op1 = const0_rtx; 4077 4078 if (cmode == VOIDmode) 4079 cmode = GET_MODE (op0); 4080 4081 if (swap_commutative_operands_p (op2, op3) 4082 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) 4083 != UNKNOWN)) 4084 { 4085 tem = op2; 4086 op2 = op3; 4087 op3 = tem; 4088 code = reversed; 4089 } 4090 4091 if (mode == VOIDmode) 4092 mode = GET_MODE (op2); 4093 4094 icode = movcc_gen_code[mode]; 4095 4096 if (icode == CODE_FOR_nothing) 4097 return 0; 4098 4099 if (!target) 4100 target = gen_reg_rtx (mode); 4101 4102 subtarget = target; 4103 4104 /* If the insn doesn't accept these operands, put them in pseudos. */ 4105 4106 if (!insn_data[icode].operand[0].predicate 4107 (subtarget, insn_data[icode].operand[0].mode)) 4108 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); 4109 4110 if (!insn_data[icode].operand[2].predicate 4111 (op2, insn_data[icode].operand[2].mode)) 4112 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); 4113 4114 if (!insn_data[icode].operand[3].predicate 4115 (op3, insn_data[icode].operand[3].mode)) 4116 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); 4117 4118 /* Everything should now be in the suitable form, so emit the compare insn 4119 and then the conditional move. */ 4120 4121 comparison 4122 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); 4123 4124 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ 4125 /* We can get const0_rtx or const_true_rtx in some circumstances. Just 4126 return NULL and let the caller figure out how best to deal with this 4127 situation. */ 4128 if (GET_CODE (comparison) != code) 4129 return NULL_RTX; 4130 4131 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); 4132 4133 /* If that failed, then give up. */ 4134 if (insn == 0) 4135 return 0; 4136 4137 emit_insn (insn); 4138 4139 if (subtarget != target) 4140 convert_move (target, subtarget, 0); 4141 4142 return target; 4143} 4144 4145/* Return nonzero if a conditional move of mode MODE is supported. 4146 4147 This function is for combine so it can tell whether an insn that looks 4148 like a conditional move is actually supported by the hardware. If we 4149 guess wrong we lose a bit on optimization, but that's it. */ 4150/* ??? sparc64 supports conditionally moving integers values based on fp 4151 comparisons, and vice versa. How do we handle them? */ 4152 4153int 4154can_conditionally_move_p (enum machine_mode mode) 4155{ 4156 if (movcc_gen_code[mode] != CODE_FOR_nothing) 4157 return 1; 4158 4159 return 0; 4160} 4161 4162#endif /* HAVE_conditional_move */ 4163 4164/* Emit a conditional addition instruction if the machine supports one for that 4165 condition and machine mode. 4166 4167 OP0 and OP1 are the operands that should be compared using CODE. CMODE is 4168 the mode to use should they be constants. If it is VOIDmode, they cannot 4169 both be constants. 4170 4171 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3 4172 should be stored there. MODE is the mode to use should they be constants. 4173 If it is VOIDmode, they cannot both be constants. 4174 4175 The result is either TARGET (perhaps modified) or NULL_RTX if the operation 4176 is not supported. */ 4177 4178rtx 4179emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1, 4180 enum machine_mode cmode, rtx op2, rtx op3, 4181 enum machine_mode mode, int unsignedp) 4182{ 4183 rtx tem, subtarget, comparison, insn; 4184 enum insn_code icode; 4185 enum rtx_code reversed; 4186 4187 /* If one operand is constant, make it the second one. Only do this 4188 if the other operand is not constant as well. */ 4189 4190 if (swap_commutative_operands_p (op0, op1)) 4191 { 4192 tem = op0; 4193 op0 = op1; 4194 op1 = tem; 4195 code = swap_condition (code); 4196 } 4197 4198 /* get_condition will prefer to generate LT and GT even if the old 4199 comparison was against zero, so undo that canonicalization here since 4200 comparisons against zero are cheaper. */ 4201 if (code == LT && op1 == const1_rtx) 4202 code = LE, op1 = const0_rtx; 4203 else if (code == GT && op1 == constm1_rtx) 4204 code = GE, op1 = const0_rtx; 4205 4206 if (cmode == VOIDmode) 4207 cmode = GET_MODE (op0); 4208 4209 if (swap_commutative_operands_p (op2, op3) 4210 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL)) 4211 != UNKNOWN)) 4212 { 4213 tem = op2; 4214 op2 = op3; 4215 op3 = tem; 4216 code = reversed; 4217 } 4218 4219 if (mode == VOIDmode) 4220 mode = GET_MODE (op2); 4221 4222 icode = addcc_optab->handlers[(int) mode].insn_code; 4223 4224 if (icode == CODE_FOR_nothing) 4225 return 0; 4226 4227 if (!target) 4228 target = gen_reg_rtx (mode); 4229 4230 /* If the insn doesn't accept these operands, put them in pseudos. */ 4231 4232 if (!insn_data[icode].operand[0].predicate 4233 (target, insn_data[icode].operand[0].mode)) 4234 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode); 4235 else 4236 subtarget = target; 4237 4238 if (!insn_data[icode].operand[2].predicate 4239 (op2, insn_data[icode].operand[2].mode)) 4240 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2); 4241 4242 if (!insn_data[icode].operand[3].predicate 4243 (op3, insn_data[icode].operand[3].mode)) 4244 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3); 4245 4246 /* Everything should now be in the suitable form, so emit the compare insn 4247 and then the conditional move. */ 4248 4249 comparison 4250 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX); 4251 4252 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */ 4253 /* We can get const0_rtx or const_true_rtx in some circumstances. Just 4254 return NULL and let the caller figure out how best to deal with this 4255 situation. */ 4256 if (GET_CODE (comparison) != code) 4257 return NULL_RTX; 4258 4259 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3); 4260 4261 /* If that failed, then give up. */ 4262 if (insn == 0) 4263 return 0; 4264 4265 emit_insn (insn); 4266 4267 if (subtarget != target) 4268 convert_move (target, subtarget, 0); 4269 4270 return target; 4271} 4272 4273/* These functions attempt to generate an insn body, rather than 4274 emitting the insn, but if the gen function already emits them, we 4275 make no attempt to turn them back into naked patterns. */ 4276 4277/* Generate and return an insn body to add Y to X. */ 4278 4279rtx 4280gen_add2_insn (rtx x, rtx y) 4281{ 4282 int icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; 4283 4284 gcc_assert (insn_data[icode].operand[0].predicate 4285 (x, insn_data[icode].operand[0].mode)); 4286 gcc_assert (insn_data[icode].operand[1].predicate 4287 (x, insn_data[icode].operand[1].mode)); 4288 gcc_assert (insn_data[icode].operand[2].predicate 4289 (y, insn_data[icode].operand[2].mode)); 4290 4291 return GEN_FCN (icode) (x, x, y); 4292} 4293 4294/* Generate and return an insn body to add r1 and c, 4295 storing the result in r0. */ 4296rtx 4297gen_add3_insn (rtx r0, rtx r1, rtx c) 4298{ 4299 int icode = (int) add_optab->handlers[(int) GET_MODE (r0)].insn_code; 4300 4301 if (icode == CODE_FOR_nothing 4302 || !(insn_data[icode].operand[0].predicate 4303 (r0, insn_data[icode].operand[0].mode)) 4304 || !(insn_data[icode].operand[1].predicate 4305 (r1, insn_data[icode].operand[1].mode)) 4306 || !(insn_data[icode].operand[2].predicate 4307 (c, insn_data[icode].operand[2].mode))) 4308 return NULL_RTX; 4309 4310 return GEN_FCN (icode) (r0, r1, c); 4311} 4312 4313int 4314have_add2_insn (rtx x, rtx y) 4315{ 4316 int icode; 4317 4318 gcc_assert (GET_MODE (x) != VOIDmode); 4319 4320 icode = (int) add_optab->handlers[(int) GET_MODE (x)].insn_code; 4321 4322 if (icode == CODE_FOR_nothing) 4323 return 0; 4324 4325 if (!(insn_data[icode].operand[0].predicate 4326 (x, insn_data[icode].operand[0].mode)) 4327 || !(insn_data[icode].operand[1].predicate 4328 (x, insn_data[icode].operand[1].mode)) 4329 || !(insn_data[icode].operand[2].predicate 4330 (y, insn_data[icode].operand[2].mode))) 4331 return 0; 4332 4333 return 1; 4334} 4335 4336/* Generate and return an insn body to subtract Y from X. */ 4337 4338rtx 4339gen_sub2_insn (rtx x, rtx y) 4340{ 4341 int icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; 4342 4343 gcc_assert (insn_data[icode].operand[0].predicate 4344 (x, insn_data[icode].operand[0].mode)); 4345 gcc_assert (insn_data[icode].operand[1].predicate 4346 (x, insn_data[icode].operand[1].mode)); 4347 gcc_assert (insn_data[icode].operand[2].predicate 4348 (y, insn_data[icode].operand[2].mode)); 4349 4350 return GEN_FCN (icode) (x, x, y); 4351} 4352 4353/* Generate and return an insn body to subtract r1 and c, 4354 storing the result in r0. */ 4355rtx 4356gen_sub3_insn (rtx r0, rtx r1, rtx c) 4357{ 4358 int icode = (int) sub_optab->handlers[(int) GET_MODE (r0)].insn_code; 4359 4360 if (icode == CODE_FOR_nothing 4361 || !(insn_data[icode].operand[0].predicate 4362 (r0, insn_data[icode].operand[0].mode)) 4363 || !(insn_data[icode].operand[1].predicate 4364 (r1, insn_data[icode].operand[1].mode)) 4365 || !(insn_data[icode].operand[2].predicate 4366 (c, insn_data[icode].operand[2].mode))) 4367 return NULL_RTX; 4368 4369 return GEN_FCN (icode) (r0, r1, c); 4370} 4371 4372int 4373have_sub2_insn (rtx x, rtx y) 4374{ 4375 int icode; 4376 4377 gcc_assert (GET_MODE (x) != VOIDmode); 4378 4379 icode = (int) sub_optab->handlers[(int) GET_MODE (x)].insn_code; 4380 4381 if (icode == CODE_FOR_nothing) 4382 return 0; 4383 4384 if (!(insn_data[icode].operand[0].predicate 4385 (x, insn_data[icode].operand[0].mode)) 4386 || !(insn_data[icode].operand[1].predicate 4387 (x, insn_data[icode].operand[1].mode)) 4388 || !(insn_data[icode].operand[2].predicate 4389 (y, insn_data[icode].operand[2].mode))) 4390 return 0; 4391 4392 return 1; 4393} 4394 4395/* Generate the body of an instruction to copy Y into X. 4396 It may be a list of insns, if one insn isn't enough. */ 4397 4398rtx 4399gen_move_insn (rtx x, rtx y) 4400{ 4401 rtx seq; 4402 4403 start_sequence (); 4404 emit_move_insn_1 (x, y); 4405 seq = get_insns (); 4406 end_sequence (); 4407 return seq; 4408} 4409 4410/* Return the insn code used to extend FROM_MODE to TO_MODE. 4411 UNSIGNEDP specifies zero-extension instead of sign-extension. If 4412 no such operation exists, CODE_FOR_nothing will be returned. */ 4413 4414enum insn_code 4415can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode, 4416 int unsignedp) 4417{ 4418 convert_optab tab; 4419#ifdef HAVE_ptr_extend 4420 if (unsignedp < 0) 4421 return CODE_FOR_ptr_extend; 4422#endif 4423 4424 tab = unsignedp ? zext_optab : sext_optab; 4425 return tab->handlers[to_mode][from_mode].insn_code; 4426} 4427 4428/* Generate the body of an insn to extend Y (with mode MFROM) 4429 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */ 4430 4431rtx 4432gen_extend_insn (rtx x, rtx y, enum machine_mode mto, 4433 enum machine_mode mfrom, int unsignedp) 4434{ 4435 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp); 4436 return GEN_FCN (icode) (x, y); 4437} 4438 4439/* can_fix_p and can_float_p say whether the target machine 4440 can directly convert a given fixed point type to 4441 a given floating point type, or vice versa. 4442 The returned value is the CODE_FOR_... value to use, 4443 or CODE_FOR_nothing if these modes cannot be directly converted. 4444 4445 *TRUNCP_PTR is set to 1 if it is necessary to output 4446 an explicit FTRUNC insn before the fix insn; otherwise 0. */ 4447 4448static enum insn_code 4449can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode, 4450 int unsignedp, int *truncp_ptr) 4451{ 4452 convert_optab tab; 4453 enum insn_code icode; 4454 4455 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab; 4456 icode = tab->handlers[fixmode][fltmode].insn_code; 4457 if (icode != CODE_FOR_nothing) 4458 { 4459 *truncp_ptr = 0; 4460 return icode; 4461 } 4462 4463 /* FIXME: This requires a port to define both FIX and FTRUNC pattern 4464 for this to work. We need to rework the fix* and ftrunc* patterns 4465 and documentation. */ 4466 tab = unsignedp ? ufix_optab : sfix_optab; 4467 icode = tab->handlers[fixmode][fltmode].insn_code; 4468 if (icode != CODE_FOR_nothing 4469 && ftrunc_optab->handlers[fltmode].insn_code != CODE_FOR_nothing) 4470 { 4471 *truncp_ptr = 1; 4472 return icode; 4473 } 4474 4475 *truncp_ptr = 0; 4476 return CODE_FOR_nothing; 4477} 4478 4479static enum insn_code 4480can_float_p (enum machine_mode fltmode, enum machine_mode fixmode, 4481 int unsignedp) 4482{ 4483 convert_optab tab; 4484 4485 tab = unsignedp ? ufloat_optab : sfloat_optab; 4486 return tab->handlers[fltmode][fixmode].insn_code; 4487} 4488 4489/* Generate code to convert FROM to floating point 4490 and store in TO. FROM must be fixed point and not VOIDmode. 4491 UNSIGNEDP nonzero means regard FROM as unsigned. 4492 Normally this is done by correcting the final value 4493 if it is negative. */ 4494 4495void 4496expand_float (rtx to, rtx from, int unsignedp) 4497{ 4498 enum insn_code icode; 4499 rtx target = to; 4500 enum machine_mode fmode, imode; 4501 bool can_do_signed = false; 4502 4503 /* Crash now, because we won't be able to decide which mode to use. */ 4504 gcc_assert (GET_MODE (from) != VOIDmode); 4505 4506 /* Look for an insn to do the conversion. Do it in the specified 4507 modes if possible; otherwise convert either input, output or both to 4508 wider mode. If the integer mode is wider than the mode of FROM, 4509 we can do the conversion signed even if the input is unsigned. */ 4510 4511 for (fmode = GET_MODE (to); fmode != VOIDmode; 4512 fmode = GET_MODE_WIDER_MODE (fmode)) 4513 for (imode = GET_MODE (from); imode != VOIDmode; 4514 imode = GET_MODE_WIDER_MODE (imode)) 4515 { 4516 int doing_unsigned = unsignedp; 4517 4518 if (fmode != GET_MODE (to) 4519 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from))) 4520 continue; 4521 4522 icode = can_float_p (fmode, imode, unsignedp); 4523 if (icode == CODE_FOR_nothing && unsignedp) 4524 { 4525 enum insn_code scode = can_float_p (fmode, imode, 0); 4526 if (scode != CODE_FOR_nothing) 4527 can_do_signed = true; 4528 if (imode != GET_MODE (from)) 4529 icode = scode, doing_unsigned = 0; 4530 } 4531 4532 if (icode != CODE_FOR_nothing) 4533 { 4534 if (imode != GET_MODE (from)) 4535 from = convert_to_mode (imode, from, unsignedp); 4536 4537 if (fmode != GET_MODE (to)) 4538 target = gen_reg_rtx (fmode); 4539 4540 emit_unop_insn (icode, target, from, 4541 doing_unsigned ? UNSIGNED_FLOAT : FLOAT); 4542 4543 if (target != to) 4544 convert_move (to, target, 0); 4545 return; 4546 } 4547 } 4548 4549 /* Unsigned integer, and no way to convert directly. For binary 4550 floating point modes, convert as signed, then conditionally adjust 4551 the result. */ 4552 if (unsignedp && can_do_signed && !DECIMAL_FLOAT_MODE_P (GET_MODE (to))) 4553 { 4554 rtx label = gen_label_rtx (); 4555 rtx temp; 4556 REAL_VALUE_TYPE offset; 4557 4558 /* Look for a usable floating mode FMODE wider than the source and at 4559 least as wide as the target. Using FMODE will avoid rounding woes 4560 with unsigned values greater than the signed maximum value. */ 4561 4562 for (fmode = GET_MODE (to); fmode != VOIDmode; 4563 fmode = GET_MODE_WIDER_MODE (fmode)) 4564 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode) 4565 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing) 4566 break; 4567 4568 if (fmode == VOIDmode) 4569 { 4570 /* There is no such mode. Pretend the target is wide enough. */ 4571 fmode = GET_MODE (to); 4572 4573 /* Avoid double-rounding when TO is narrower than FROM. */ 4574 if ((significand_size (fmode) + 1) 4575 < GET_MODE_BITSIZE (GET_MODE (from))) 4576 { 4577 rtx temp1; 4578 rtx neglabel = gen_label_rtx (); 4579 4580 /* Don't use TARGET if it isn't a register, is a hard register, 4581 or is the wrong mode. */ 4582 if (!REG_P (target) 4583 || REGNO (target) < FIRST_PSEUDO_REGISTER 4584 || GET_MODE (target) != fmode) 4585 target = gen_reg_rtx (fmode); 4586 4587 imode = GET_MODE (from); 4588 do_pending_stack_adjust (); 4589 4590 /* Test whether the sign bit is set. */ 4591 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode, 4592 0, neglabel); 4593 4594 /* The sign bit is not set. Convert as signed. */ 4595 expand_float (target, from, 0); 4596 emit_jump_insn (gen_jump (label)); 4597 emit_barrier (); 4598 4599 /* The sign bit is set. 4600 Convert to a usable (positive signed) value by shifting right 4601 one bit, while remembering if a nonzero bit was shifted 4602 out; i.e., compute (from & 1) | (from >> 1). */ 4603 4604 emit_label (neglabel); 4605 temp = expand_binop (imode, and_optab, from, const1_rtx, 4606 NULL_RTX, 1, OPTAB_LIB_WIDEN); 4607 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node, 4608 NULL_RTX, 1); 4609 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1, 4610 OPTAB_LIB_WIDEN); 4611 expand_float (target, temp, 0); 4612 4613 /* Multiply by 2 to undo the shift above. */ 4614 temp = expand_binop (fmode, add_optab, target, target, 4615 target, 0, OPTAB_LIB_WIDEN); 4616 if (temp != target) 4617 emit_move_insn (target, temp); 4618 4619 do_pending_stack_adjust (); 4620 emit_label (label); 4621 goto done; 4622 } 4623 } 4624 4625 /* If we are about to do some arithmetic to correct for an 4626 unsigned operand, do it in a pseudo-register. */ 4627 4628 if (GET_MODE (to) != fmode 4629 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER) 4630 target = gen_reg_rtx (fmode); 4631 4632 /* Convert as signed integer to floating. */ 4633 expand_float (target, from, 0); 4634 4635 /* If FROM is negative (and therefore TO is negative), 4636 correct its value by 2**bitwidth. */ 4637 4638 do_pending_stack_adjust (); 4639 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from), 4640 0, label); 4641 4642 4643 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from))); 4644 temp = expand_binop (fmode, add_optab, target, 4645 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode), 4646 target, 0, OPTAB_LIB_WIDEN); 4647 if (temp != target) 4648 emit_move_insn (target, temp); 4649 4650 do_pending_stack_adjust (); 4651 emit_label (label); 4652 goto done; 4653 } 4654 4655 /* No hardware instruction available; call a library routine. */ 4656 { 4657 rtx libfunc; 4658 rtx insns; 4659 rtx value; 4660 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab; 4661 4662 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode)) 4663 from = convert_to_mode (SImode, from, unsignedp); 4664 4665 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc; 4666 gcc_assert (libfunc); 4667 4668 start_sequence (); 4669 4670 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 4671 GET_MODE (to), 1, from, 4672 GET_MODE (from)); 4673 insns = get_insns (); 4674 end_sequence (); 4675 4676 emit_libcall_block (insns, target, value, 4677 gen_rtx_FLOAT (GET_MODE (to), from)); 4678 } 4679 4680 done: 4681 4682 /* Copy result to requested destination 4683 if we have been computing in a temp location. */ 4684 4685 if (target != to) 4686 { 4687 if (GET_MODE (target) == GET_MODE (to)) 4688 emit_move_insn (to, target); 4689 else 4690 convert_move (to, target, 0); 4691 } 4692} 4693 4694/* Generate code to convert FROM to fixed point and store in TO. FROM 4695 must be floating point. */ 4696 4697void 4698expand_fix (rtx to, rtx from, int unsignedp) 4699{ 4700 enum insn_code icode; 4701 rtx target = to; 4702 enum machine_mode fmode, imode; 4703 int must_trunc = 0; 4704 4705 /* We first try to find a pair of modes, one real and one integer, at 4706 least as wide as FROM and TO, respectively, in which we can open-code 4707 this conversion. If the integer mode is wider than the mode of TO, 4708 we can do the conversion either signed or unsigned. */ 4709 4710 for (fmode = GET_MODE (from); fmode != VOIDmode; 4711 fmode = GET_MODE_WIDER_MODE (fmode)) 4712 for (imode = GET_MODE (to); imode != VOIDmode; 4713 imode = GET_MODE_WIDER_MODE (imode)) 4714 { 4715 int doing_unsigned = unsignedp; 4716 4717 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc); 4718 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp) 4719 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0; 4720 4721 if (icode != CODE_FOR_nothing) 4722 { 4723 if (fmode != GET_MODE (from)) 4724 from = convert_to_mode (fmode, from, 0); 4725 4726 if (must_trunc) 4727 { 4728 rtx temp = gen_reg_rtx (GET_MODE (from)); 4729 from = expand_unop (GET_MODE (from), ftrunc_optab, from, 4730 temp, 0); 4731 } 4732 4733 if (imode != GET_MODE (to)) 4734 target = gen_reg_rtx (imode); 4735 4736 emit_unop_insn (icode, target, from, 4737 doing_unsigned ? UNSIGNED_FIX : FIX); 4738 if (target != to) 4739 convert_move (to, target, unsignedp); 4740 return; 4741 } 4742 } 4743 4744 /* For an unsigned conversion, there is one more way to do it. 4745 If we have a signed conversion, we generate code that compares 4746 the real value to the largest representable positive number. If if 4747 is smaller, the conversion is done normally. Otherwise, subtract 4748 one plus the highest signed number, convert, and add it back. 4749 4750 We only need to check all real modes, since we know we didn't find 4751 anything with a wider integer mode. 4752 4753 This code used to extend FP value into mode wider than the destination. 4754 This is not needed. Consider, for instance conversion from SFmode 4755 into DImode. 4756 4757 The hot path through the code is dealing with inputs smaller than 2^63 4758 and doing just the conversion, so there is no bits to lose. 4759 4760 In the other path we know the value is positive in the range 2^63..2^64-1 4761 inclusive. (as for other imput overflow happens and result is undefined) 4762 So we know that the most important bit set in mantissa corresponds to 4763 2^63. The subtraction of 2^63 should not generate any rounding as it 4764 simply clears out that bit. The rest is trivial. */ 4765 4766 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT) 4767 for (fmode = GET_MODE (from); fmode != VOIDmode; 4768 fmode = GET_MODE_WIDER_MODE (fmode)) 4769 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, 4770 &must_trunc)) 4771 { 4772 int bitsize; 4773 REAL_VALUE_TYPE offset; 4774 rtx limit, lab1, lab2, insn; 4775 4776 bitsize = GET_MODE_BITSIZE (GET_MODE (to)); 4777 real_2expN (&offset, bitsize - 1); 4778 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode); 4779 lab1 = gen_label_rtx (); 4780 lab2 = gen_label_rtx (); 4781 4782 if (fmode != GET_MODE (from)) 4783 from = convert_to_mode (fmode, from, 0); 4784 4785 /* See if we need to do the subtraction. */ 4786 do_pending_stack_adjust (); 4787 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from), 4788 0, lab1); 4789 4790 /* If not, do the signed "fix" and branch around fixup code. */ 4791 expand_fix (to, from, 0); 4792 emit_jump_insn (gen_jump (lab2)); 4793 emit_barrier (); 4794 4795 /* Otherwise, subtract 2**(N-1), convert to signed number, 4796 then add 2**(N-1). Do the addition using XOR since this 4797 will often generate better code. */ 4798 emit_label (lab1); 4799 target = expand_binop (GET_MODE (from), sub_optab, from, limit, 4800 NULL_RTX, 0, OPTAB_LIB_WIDEN); 4801 expand_fix (to, target, 0); 4802 target = expand_binop (GET_MODE (to), xor_optab, to, 4803 gen_int_mode 4804 ((HOST_WIDE_INT) 1 << (bitsize - 1), 4805 GET_MODE (to)), 4806 to, 1, OPTAB_LIB_WIDEN); 4807 4808 if (target != to) 4809 emit_move_insn (to, target); 4810 4811 emit_label (lab2); 4812 4813 if (mov_optab->handlers[(int) GET_MODE (to)].insn_code 4814 != CODE_FOR_nothing) 4815 { 4816 /* Make a place for a REG_NOTE and add it. */ 4817 insn = emit_move_insn (to, to); 4818 set_unique_reg_note (insn, 4819 REG_EQUAL, 4820 gen_rtx_fmt_e (UNSIGNED_FIX, 4821 GET_MODE (to), 4822 copy_rtx (from))); 4823 } 4824 4825 return; 4826 } 4827 4828 /* We can't do it with an insn, so use a library call. But first ensure 4829 that the mode of TO is at least as wide as SImode, since those are the 4830 only library calls we know about. */ 4831 4832 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode)) 4833 { 4834 target = gen_reg_rtx (SImode); 4835 4836 expand_fix (target, from, unsignedp); 4837 } 4838 else 4839 { 4840 rtx insns; 4841 rtx value; 4842 rtx libfunc; 4843 4844 convert_optab tab = unsignedp ? ufix_optab : sfix_optab; 4845 libfunc = tab->handlers[GET_MODE (to)][GET_MODE (from)].libfunc; 4846 gcc_assert (libfunc); 4847 4848 start_sequence (); 4849 4850 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, 4851 GET_MODE (to), 1, from, 4852 GET_MODE (from)); 4853 insns = get_insns (); 4854 end_sequence (); 4855 4856 emit_libcall_block (insns, target, value, 4857 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX, 4858 GET_MODE (to), from)); 4859 } 4860 4861 if (target != to) 4862 { 4863 if (GET_MODE (to) == GET_MODE (target)) 4864 emit_move_insn (to, target); 4865 else 4866 convert_move (to, target, 0); 4867 } 4868} 4869 4870/* Report whether we have an instruction to perform the operation 4871 specified by CODE on operands of mode MODE. */ 4872int 4873have_insn_for (enum rtx_code code, enum machine_mode mode) 4874{ 4875 return (code_to_optab[(int) code] != 0 4876 && (code_to_optab[(int) code]->handlers[(int) mode].insn_code 4877 != CODE_FOR_nothing)); 4878} 4879 4880/* Create a blank optab. */ 4881static optab 4882new_optab (void) 4883{ 4884 int i; 4885 optab op = ggc_alloc (sizeof (struct optab)); 4886 for (i = 0; i < NUM_MACHINE_MODES; i++) 4887 { 4888 op->handlers[i].insn_code = CODE_FOR_nothing; 4889 op->handlers[i].libfunc = 0; 4890 } 4891 4892 return op; 4893} 4894 4895static convert_optab 4896new_convert_optab (void) 4897{ 4898 int i, j; 4899 convert_optab op = ggc_alloc (sizeof (struct convert_optab)); 4900 for (i = 0; i < NUM_MACHINE_MODES; i++) 4901 for (j = 0; j < NUM_MACHINE_MODES; j++) 4902 { 4903 op->handlers[i][j].insn_code = CODE_FOR_nothing; 4904 op->handlers[i][j].libfunc = 0; 4905 } 4906 return op; 4907} 4908 4909/* Same, but fill in its code as CODE, and write it into the 4910 code_to_optab table. */ 4911static inline optab 4912init_optab (enum rtx_code code) 4913{ 4914 optab op = new_optab (); 4915 op->code = code; 4916 code_to_optab[(int) code] = op; 4917 return op; 4918} 4919 4920/* Same, but fill in its code as CODE, and do _not_ write it into 4921 the code_to_optab table. */ 4922static inline optab 4923init_optabv (enum rtx_code code) 4924{ 4925 optab op = new_optab (); 4926 op->code = code; 4927 return op; 4928} 4929 4930/* Conversion optabs never go in the code_to_optab table. */ 4931static inline convert_optab 4932init_convert_optab (enum rtx_code code) 4933{ 4934 convert_optab op = new_convert_optab (); 4935 op->code = code; 4936 return op; 4937} 4938 4939/* Initialize the libfunc fields of an entire group of entries in some 4940 optab. Each entry is set equal to a string consisting of a leading 4941 pair of underscores followed by a generic operation name followed by 4942 a mode name (downshifted to lowercase) followed by a single character 4943 representing the number of operands for the given operation (which is 4944 usually one of the characters '2', '3', or '4'). 4945 4946 OPTABLE is the table in which libfunc fields are to be initialized. 4947 FIRST_MODE is the first machine mode index in the given optab to 4948 initialize. 4949 LAST_MODE is the last machine mode index in the given optab to 4950 initialize. 4951 OPNAME is the generic (string) name of the operation. 4952 SUFFIX is the character which specifies the number of operands for 4953 the given generic operation. 4954*/ 4955 4956static void 4957init_libfuncs (optab optable, int first_mode, int last_mode, 4958 const char *opname, int suffix) 4959{ 4960 int mode; 4961 unsigned opname_len = strlen (opname); 4962 4963 for (mode = first_mode; (int) mode <= (int) last_mode; 4964 mode = (enum machine_mode) ((int) mode + 1)) 4965 { 4966 const char *mname = GET_MODE_NAME (mode); 4967 unsigned mname_len = strlen (mname); 4968 char *libfunc_name = alloca (2 + opname_len + mname_len + 1 + 1); 4969 char *p; 4970 const char *q; 4971 4972 p = libfunc_name; 4973 *p++ = '_'; 4974 *p++ = '_'; 4975 for (q = opname; *q; ) 4976 *p++ = *q++; 4977 for (q = mname; *q; q++) 4978 *p++ = TOLOWER (*q); 4979 *p++ = suffix; 4980 *p = '\0'; 4981 4982 optable->handlers[(int) mode].libfunc 4983 = init_one_libfunc (ggc_alloc_string (libfunc_name, p - libfunc_name)); 4984 } 4985} 4986 4987/* Initialize the libfunc fields of an entire group of entries in some 4988 optab which correspond to all integer mode operations. The parameters 4989 have the same meaning as similarly named ones for the `init_libfuncs' 4990 routine. (See above). */ 4991 4992static void 4993init_integral_libfuncs (optab optable, const char *opname, int suffix) 4994{ 4995 int maxsize = 2*BITS_PER_WORD; 4996 if (maxsize < LONG_LONG_TYPE_SIZE) 4997 maxsize = LONG_LONG_TYPE_SIZE; 4998 init_libfuncs (optable, word_mode, 4999 mode_for_size (maxsize, MODE_INT, 0), 5000 opname, suffix); 5001} 5002 5003/* Initialize the libfunc fields of an entire group of entries in some 5004 optab which correspond to all real mode operations. The parameters 5005 have the same meaning as similarly named ones for the `init_libfuncs' 5006 routine. (See above). */ 5007 5008static void 5009init_floating_libfuncs (optab optable, const char *opname, int suffix) 5010{ 5011 init_libfuncs (optable, MIN_MODE_FLOAT, MAX_MODE_FLOAT, opname, suffix); 5012 init_libfuncs (optable, MIN_MODE_DECIMAL_FLOAT, MAX_MODE_DECIMAL_FLOAT, 5013 opname, suffix); 5014} 5015 5016/* Initialize the libfunc fields of an entire group of entries of an 5017 inter-mode-class conversion optab. The string formation rules are 5018 similar to the ones for init_libfuncs, above, but instead of having 5019 a mode name and an operand count these functions have two mode names 5020 and no operand count. */ 5021static void 5022init_interclass_conv_libfuncs (convert_optab tab, const char *opname, 5023 enum mode_class from_class, 5024 enum mode_class to_class) 5025{ 5026 enum machine_mode first_from_mode = GET_CLASS_NARROWEST_MODE (from_class); 5027 enum machine_mode first_to_mode = GET_CLASS_NARROWEST_MODE (to_class); 5028 size_t opname_len = strlen (opname); 5029 size_t max_mname_len = 0; 5030 5031 enum machine_mode fmode, tmode; 5032 const char *fname, *tname; 5033 const char *q; 5034 char *libfunc_name, *suffix; 5035 char *p; 5036 5037 for (fmode = first_from_mode; 5038 fmode != VOIDmode; 5039 fmode = GET_MODE_WIDER_MODE (fmode)) 5040 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (fmode))); 5041 5042 for (tmode = first_to_mode; 5043 tmode != VOIDmode; 5044 tmode = GET_MODE_WIDER_MODE (tmode)) 5045 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (tmode))); 5046 5047 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1); 5048 libfunc_name[0] = '_'; 5049 libfunc_name[1] = '_'; 5050 memcpy (&libfunc_name[2], opname, opname_len); 5051 suffix = libfunc_name + opname_len + 2; 5052 5053 for (fmode = first_from_mode; fmode != VOIDmode; 5054 fmode = GET_MODE_WIDER_MODE (fmode)) 5055 for (tmode = first_to_mode; tmode != VOIDmode; 5056 tmode = GET_MODE_WIDER_MODE (tmode)) 5057 { 5058 fname = GET_MODE_NAME (fmode); 5059 tname = GET_MODE_NAME (tmode); 5060 5061 p = suffix; 5062 for (q = fname; *q; p++, q++) 5063 *p = TOLOWER (*q); 5064 for (q = tname; *q; p++, q++) 5065 *p = TOLOWER (*q); 5066 5067 *p = '\0'; 5068 5069 tab->handlers[tmode][fmode].libfunc 5070 = init_one_libfunc (ggc_alloc_string (libfunc_name, 5071 p - libfunc_name)); 5072 } 5073} 5074 5075/* Initialize the libfunc fields of an entire group of entries of an 5076 intra-mode-class conversion optab. The string formation rules are 5077 similar to the ones for init_libfunc, above. WIDENING says whether 5078 the optab goes from narrow to wide modes or vice versa. These functions 5079 have two mode names _and_ an operand count. */ 5080static void 5081init_intraclass_conv_libfuncs (convert_optab tab, const char *opname, 5082 enum mode_class class, bool widening) 5083{ 5084 enum machine_mode first_mode = GET_CLASS_NARROWEST_MODE (class); 5085 size_t opname_len = strlen (opname); 5086 size_t max_mname_len = 0; 5087 5088 enum machine_mode nmode, wmode; 5089 const char *nname, *wname; 5090 const char *q; 5091 char *libfunc_name, *suffix; 5092 char *p; 5093 5094 for (nmode = first_mode; nmode != VOIDmode; 5095 nmode = GET_MODE_WIDER_MODE (nmode)) 5096 max_mname_len = MAX (max_mname_len, strlen (GET_MODE_NAME (nmode))); 5097 5098 libfunc_name = alloca (2 + opname_len + 2*max_mname_len + 1 + 1); 5099 libfunc_name[0] = '_'; 5100 libfunc_name[1] = '_'; 5101 memcpy (&libfunc_name[2], opname, opname_len); 5102 suffix = libfunc_name + opname_len + 2; 5103 5104 for (nmode = first_mode; nmode != VOIDmode; 5105 nmode = GET_MODE_WIDER_MODE (nmode)) 5106 for (wmode = GET_MODE_WIDER_MODE (nmode); wmode != VOIDmode; 5107 wmode = GET_MODE_WIDER_MODE (wmode)) 5108 { 5109 nname = GET_MODE_NAME (nmode); 5110 wname = GET_MODE_NAME (wmode); 5111 5112 p = suffix; 5113 for (q = widening ? nname : wname; *q; p++, q++) 5114 *p = TOLOWER (*q); 5115 for (q = widening ? wname : nname; *q; p++, q++) 5116 *p = TOLOWER (*q); 5117 5118 *p++ = '2'; 5119 *p = '\0'; 5120 5121 tab->handlers[widening ? wmode : nmode] 5122 [widening ? nmode : wmode].libfunc 5123 = init_one_libfunc (ggc_alloc_string (libfunc_name, 5124 p - libfunc_name)); 5125 } 5126} 5127 5128 5129rtx 5130init_one_libfunc (const char *name) 5131{ 5132 rtx symbol; 5133 5134 /* Create a FUNCTION_DECL that can be passed to 5135 targetm.encode_section_info. */ 5136 /* ??? We don't have any type information except for this is 5137 a function. Pretend this is "int foo()". */ 5138 tree decl = build_decl (FUNCTION_DECL, get_identifier (name), 5139 build_function_type (integer_type_node, NULL_TREE)); 5140 DECL_ARTIFICIAL (decl) = 1; 5141 DECL_EXTERNAL (decl) = 1; 5142 TREE_PUBLIC (decl) = 1; 5143 5144 symbol = XEXP (DECL_RTL (decl), 0); 5145 5146 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with 5147 are the flags assigned by targetm.encode_section_info. */ 5148 SET_SYMBOL_REF_DECL (symbol, 0); 5149 5150 return symbol; 5151} 5152 5153/* Call this to reset the function entry for one optab (OPTABLE) in mode 5154 MODE to NAME, which should be either 0 or a string constant. */ 5155void 5156set_optab_libfunc (optab optable, enum machine_mode mode, const char *name) 5157{ 5158 if (name) 5159 optable->handlers[mode].libfunc = init_one_libfunc (name); 5160 else 5161 optable->handlers[mode].libfunc = 0; 5162} 5163 5164/* Call this to reset the function entry for one conversion optab 5165 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be 5166 either 0 or a string constant. */ 5167void 5168set_conv_libfunc (convert_optab optable, enum machine_mode tmode, 5169 enum machine_mode fmode, const char *name) 5170{ 5171 if (name) 5172 optable->handlers[tmode][fmode].libfunc = init_one_libfunc (name); 5173 else 5174 optable->handlers[tmode][fmode].libfunc = 0; 5175} 5176 5177/* Call this once to initialize the contents of the optabs 5178 appropriately for the current target machine. */ 5179 5180void 5181init_optabs (void) 5182{ 5183 unsigned int i; 5184 5185 /* Start by initializing all tables to contain CODE_FOR_nothing. */ 5186 5187 for (i = 0; i < NUM_RTX_CODE; i++) 5188 setcc_gen_code[i] = CODE_FOR_nothing; 5189 5190#ifdef HAVE_conditional_move 5191 for (i = 0; i < NUM_MACHINE_MODES; i++) 5192 movcc_gen_code[i] = CODE_FOR_nothing; 5193#endif 5194 5195 for (i = 0; i < NUM_MACHINE_MODES; i++) 5196 { 5197 vcond_gen_code[i] = CODE_FOR_nothing; 5198 vcondu_gen_code[i] = CODE_FOR_nothing; 5199 } 5200 5201 add_optab = init_optab (PLUS); 5202 addv_optab = init_optabv (PLUS); 5203 sub_optab = init_optab (MINUS); 5204 subv_optab = init_optabv (MINUS); 5205 smul_optab = init_optab (MULT); 5206 smulv_optab = init_optabv (MULT); 5207 smul_highpart_optab = init_optab (UNKNOWN); 5208 umul_highpart_optab = init_optab (UNKNOWN); 5209 smul_widen_optab = init_optab (UNKNOWN); 5210 umul_widen_optab = init_optab (UNKNOWN); 5211 usmul_widen_optab = init_optab (UNKNOWN); 5212 sdiv_optab = init_optab (DIV); 5213 sdivv_optab = init_optabv (DIV); 5214 sdivmod_optab = init_optab (UNKNOWN); 5215 udiv_optab = init_optab (UDIV); 5216 udivmod_optab = init_optab (UNKNOWN); 5217 smod_optab = init_optab (MOD); 5218 umod_optab = init_optab (UMOD); 5219 fmod_optab = init_optab (UNKNOWN); 5220 drem_optab = init_optab (UNKNOWN); 5221 ftrunc_optab = init_optab (UNKNOWN); 5222 and_optab = init_optab (AND); 5223 ior_optab = init_optab (IOR); 5224 xor_optab = init_optab (XOR); 5225 ashl_optab = init_optab (ASHIFT); 5226 ashr_optab = init_optab (ASHIFTRT); 5227 lshr_optab = init_optab (LSHIFTRT); 5228 rotl_optab = init_optab (ROTATE); 5229 rotr_optab = init_optab (ROTATERT); 5230 smin_optab = init_optab (SMIN); 5231 smax_optab = init_optab (SMAX); 5232 umin_optab = init_optab (UMIN); 5233 umax_optab = init_optab (UMAX); 5234 pow_optab = init_optab (UNKNOWN); 5235 atan2_optab = init_optab (UNKNOWN); 5236 5237 /* These three have codes assigned exclusively for the sake of 5238 have_insn_for. */ 5239 mov_optab = init_optab (SET); 5240 movstrict_optab = init_optab (STRICT_LOW_PART); 5241 cmp_optab = init_optab (COMPARE); 5242 5243 ucmp_optab = init_optab (UNKNOWN); 5244 tst_optab = init_optab (UNKNOWN); 5245 5246 eq_optab = init_optab (EQ); 5247 ne_optab = init_optab (NE); 5248 gt_optab = init_optab (GT); 5249 ge_optab = init_optab (GE); 5250 lt_optab = init_optab (LT); 5251 le_optab = init_optab (LE); 5252 unord_optab = init_optab (UNORDERED); 5253 5254 neg_optab = init_optab (NEG); 5255 negv_optab = init_optabv (NEG); 5256 abs_optab = init_optab (ABS); 5257 absv_optab = init_optabv (ABS); 5258 addcc_optab = init_optab (UNKNOWN); 5259 one_cmpl_optab = init_optab (NOT); 5260 bswap_optab = init_optab (BSWAP); 5261 ffs_optab = init_optab (FFS); 5262 clz_optab = init_optab (CLZ); 5263 ctz_optab = init_optab (CTZ); 5264 popcount_optab = init_optab (POPCOUNT); 5265 parity_optab = init_optab (PARITY); 5266 sqrt_optab = init_optab (SQRT); 5267 floor_optab = init_optab (UNKNOWN); 5268 lfloor_optab = init_optab (UNKNOWN); 5269 ceil_optab = init_optab (UNKNOWN); 5270 lceil_optab = init_optab (UNKNOWN); 5271 round_optab = init_optab (UNKNOWN); 5272 btrunc_optab = init_optab (UNKNOWN); 5273 nearbyint_optab = init_optab (UNKNOWN); 5274 rint_optab = init_optab (UNKNOWN); 5275 lrint_optab = init_optab (UNKNOWN); 5276 sincos_optab = init_optab (UNKNOWN); 5277 sin_optab = init_optab (UNKNOWN); 5278 asin_optab = init_optab (UNKNOWN); 5279 cos_optab = init_optab (UNKNOWN); 5280 acos_optab = init_optab (UNKNOWN); 5281 exp_optab = init_optab (UNKNOWN); 5282 exp10_optab = init_optab (UNKNOWN); 5283 exp2_optab = init_optab (UNKNOWN); 5284 expm1_optab = init_optab (UNKNOWN); 5285 ldexp_optab = init_optab (UNKNOWN); 5286 logb_optab = init_optab (UNKNOWN); 5287 ilogb_optab = init_optab (UNKNOWN); 5288 log_optab = init_optab (UNKNOWN); 5289 log10_optab = init_optab (UNKNOWN); 5290 log2_optab = init_optab (UNKNOWN); 5291 log1p_optab = init_optab (UNKNOWN); 5292 tan_optab = init_optab (UNKNOWN); 5293 atan_optab = init_optab (UNKNOWN); 5294 copysign_optab = init_optab (UNKNOWN); 5295 5296 strlen_optab = init_optab (UNKNOWN); 5297 cbranch_optab = init_optab (UNKNOWN); 5298 cmov_optab = init_optab (UNKNOWN); 5299 cstore_optab = init_optab (UNKNOWN); 5300 push_optab = init_optab (UNKNOWN); 5301 5302 reduc_smax_optab = init_optab (UNKNOWN); 5303 reduc_umax_optab = init_optab (UNKNOWN); 5304 reduc_smin_optab = init_optab (UNKNOWN); 5305 reduc_umin_optab = init_optab (UNKNOWN); 5306 reduc_splus_optab = init_optab (UNKNOWN); 5307 reduc_uplus_optab = init_optab (UNKNOWN); 5308 5309 ssum_widen_optab = init_optab (UNKNOWN); 5310 usum_widen_optab = init_optab (UNKNOWN); 5311 sdot_prod_optab = init_optab (UNKNOWN); 5312 udot_prod_optab = init_optab (UNKNOWN); 5313 5314 vec_extract_optab = init_optab (UNKNOWN); 5315 vec_set_optab = init_optab (UNKNOWN); 5316 vec_init_optab = init_optab (UNKNOWN); 5317 vec_shl_optab = init_optab (UNKNOWN); 5318 vec_shr_optab = init_optab (UNKNOWN); 5319 vec_realign_load_optab = init_optab (UNKNOWN); 5320 movmisalign_optab = init_optab (UNKNOWN); 5321 5322 powi_optab = init_optab (UNKNOWN); 5323 5324 /* Conversions. */ 5325 sext_optab = init_convert_optab (SIGN_EXTEND); 5326 zext_optab = init_convert_optab (ZERO_EXTEND); 5327 trunc_optab = init_convert_optab (TRUNCATE); 5328 sfix_optab = init_convert_optab (FIX); 5329 ufix_optab = init_convert_optab (UNSIGNED_FIX); 5330 sfixtrunc_optab = init_convert_optab (UNKNOWN); 5331 ufixtrunc_optab = init_convert_optab (UNKNOWN); 5332 sfloat_optab = init_convert_optab (FLOAT); 5333 ufloat_optab = init_convert_optab (UNSIGNED_FLOAT); 5334 5335 for (i = 0; i < NUM_MACHINE_MODES; i++) 5336 { 5337 movmem_optab[i] = CODE_FOR_nothing; 5338 cmpstr_optab[i] = CODE_FOR_nothing; 5339 cmpstrn_optab[i] = CODE_FOR_nothing; 5340 cmpmem_optab[i] = CODE_FOR_nothing; 5341 setmem_optab[i] = CODE_FOR_nothing; 5342 5343 sync_add_optab[i] = CODE_FOR_nothing; 5344 sync_sub_optab[i] = CODE_FOR_nothing; 5345 sync_ior_optab[i] = CODE_FOR_nothing; 5346 sync_and_optab[i] = CODE_FOR_nothing; 5347 sync_xor_optab[i] = CODE_FOR_nothing; 5348 sync_nand_optab[i] = CODE_FOR_nothing; 5349 sync_old_add_optab[i] = CODE_FOR_nothing; 5350 sync_old_sub_optab[i] = CODE_FOR_nothing; 5351 sync_old_ior_optab[i] = CODE_FOR_nothing; 5352 sync_old_and_optab[i] = CODE_FOR_nothing; 5353 sync_old_xor_optab[i] = CODE_FOR_nothing; 5354 sync_old_nand_optab[i] = CODE_FOR_nothing; 5355 sync_new_add_optab[i] = CODE_FOR_nothing; 5356 sync_new_sub_optab[i] = CODE_FOR_nothing; 5357 sync_new_ior_optab[i] = CODE_FOR_nothing; 5358 sync_new_and_optab[i] = CODE_FOR_nothing; 5359 sync_new_xor_optab[i] = CODE_FOR_nothing; 5360 sync_new_nand_optab[i] = CODE_FOR_nothing; 5361 sync_compare_and_swap[i] = CODE_FOR_nothing; 5362 sync_compare_and_swap_cc[i] = CODE_FOR_nothing; 5363 sync_lock_test_and_set[i] = CODE_FOR_nothing; 5364 sync_lock_release[i] = CODE_FOR_nothing; 5365 5366 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing; 5367 } 5368 5369 /* Fill in the optabs with the insns we support. */ 5370 init_all_optabs (); 5371 5372 /* Initialize the optabs with the names of the library functions. */ 5373 init_integral_libfuncs (add_optab, "add", '3'); 5374 init_floating_libfuncs (add_optab, "add", '3'); 5375 init_integral_libfuncs (addv_optab, "addv", '3'); 5376 init_floating_libfuncs (addv_optab, "add", '3'); 5377 init_integral_libfuncs (sub_optab, "sub", '3'); 5378 init_floating_libfuncs (sub_optab, "sub", '3'); 5379 init_integral_libfuncs (subv_optab, "subv", '3'); 5380 init_floating_libfuncs (subv_optab, "sub", '3'); 5381 init_integral_libfuncs (smul_optab, "mul", '3'); 5382 init_floating_libfuncs (smul_optab, "mul", '3'); 5383 init_integral_libfuncs (smulv_optab, "mulv", '3'); 5384 init_floating_libfuncs (smulv_optab, "mul", '3'); 5385 init_integral_libfuncs (sdiv_optab, "div", '3'); 5386 init_floating_libfuncs (sdiv_optab, "div", '3'); 5387 init_integral_libfuncs (sdivv_optab, "divv", '3'); 5388 init_integral_libfuncs (udiv_optab, "udiv", '3'); 5389 init_integral_libfuncs (sdivmod_optab, "divmod", '4'); 5390 init_integral_libfuncs (udivmod_optab, "udivmod", '4'); 5391 init_integral_libfuncs (smod_optab, "mod", '3'); 5392 init_integral_libfuncs (umod_optab, "umod", '3'); 5393 init_floating_libfuncs (ftrunc_optab, "ftrunc", '2'); 5394 init_integral_libfuncs (and_optab, "and", '3'); 5395 init_integral_libfuncs (ior_optab, "ior", '3'); 5396 init_integral_libfuncs (xor_optab, "xor", '3'); 5397 init_integral_libfuncs (ashl_optab, "ashl", '3'); 5398 init_integral_libfuncs (ashr_optab, "ashr", '3'); 5399 init_integral_libfuncs (lshr_optab, "lshr", '3'); 5400 init_integral_libfuncs (smin_optab, "min", '3'); 5401 init_floating_libfuncs (smin_optab, "min", '3'); 5402 init_integral_libfuncs (smax_optab, "max", '3'); 5403 init_floating_libfuncs (smax_optab, "max", '3'); 5404 init_integral_libfuncs (umin_optab, "umin", '3'); 5405 init_integral_libfuncs (umax_optab, "umax", '3'); 5406 init_integral_libfuncs (neg_optab, "neg", '2'); 5407 init_floating_libfuncs (neg_optab, "neg", '2'); 5408 init_integral_libfuncs (negv_optab, "negv", '2'); 5409 init_floating_libfuncs (negv_optab, "neg", '2'); 5410 init_integral_libfuncs (one_cmpl_optab, "one_cmpl", '2'); 5411 init_integral_libfuncs (ffs_optab, "ffs", '2'); 5412 init_integral_libfuncs (clz_optab, "clz", '2'); 5413 init_integral_libfuncs (ctz_optab, "ctz", '2'); 5414 init_integral_libfuncs (popcount_optab, "popcount", '2'); 5415 init_integral_libfuncs (parity_optab, "parity", '2'); 5416 5417 /* Comparison libcalls for integers MUST come in pairs, 5418 signed/unsigned. */ 5419 init_integral_libfuncs (cmp_optab, "cmp", '2'); 5420 init_integral_libfuncs (ucmp_optab, "ucmp", '2'); 5421 init_floating_libfuncs (cmp_optab, "cmp", '2'); 5422 5423 /* EQ etc are floating point only. */ 5424 init_floating_libfuncs (eq_optab, "eq", '2'); 5425 init_floating_libfuncs (ne_optab, "ne", '2'); 5426 init_floating_libfuncs (gt_optab, "gt", '2'); 5427 init_floating_libfuncs (ge_optab, "ge", '2'); 5428 init_floating_libfuncs (lt_optab, "lt", '2'); 5429 init_floating_libfuncs (le_optab, "le", '2'); 5430 init_floating_libfuncs (unord_optab, "unord", '2'); 5431 5432 init_floating_libfuncs (powi_optab, "powi", '2'); 5433 5434 /* Conversions. */ 5435 init_interclass_conv_libfuncs (sfloat_optab, "float", 5436 MODE_INT, MODE_FLOAT); 5437 init_interclass_conv_libfuncs (sfloat_optab, "float", 5438 MODE_INT, MODE_DECIMAL_FLOAT); 5439 init_interclass_conv_libfuncs (ufloat_optab, "floatun", 5440 MODE_INT, MODE_FLOAT); 5441 init_interclass_conv_libfuncs (ufloat_optab, "floatun", 5442 MODE_INT, MODE_DECIMAL_FLOAT); 5443 init_interclass_conv_libfuncs (sfix_optab, "fix", 5444 MODE_FLOAT, MODE_INT); 5445 init_interclass_conv_libfuncs (sfix_optab, "fix", 5446 MODE_DECIMAL_FLOAT, MODE_INT); 5447 init_interclass_conv_libfuncs (ufix_optab, "fixuns", 5448 MODE_FLOAT, MODE_INT); 5449 init_interclass_conv_libfuncs (ufix_optab, "fixuns", 5450 MODE_DECIMAL_FLOAT, MODE_INT); 5451 init_interclass_conv_libfuncs (ufloat_optab, "floatuns", 5452 MODE_INT, MODE_DECIMAL_FLOAT); 5453 5454 /* sext_optab is also used for FLOAT_EXTEND. */ 5455 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, true); 5456 init_intraclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, true); 5457 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_FLOAT, MODE_DECIMAL_FLOAT); 5458 init_interclass_conv_libfuncs (sext_optab, "extend", MODE_DECIMAL_FLOAT, MODE_FLOAT); 5459 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, false); 5460 init_intraclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, false); 5461 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_FLOAT, MODE_DECIMAL_FLOAT); 5462 init_interclass_conv_libfuncs (trunc_optab, "trunc", MODE_DECIMAL_FLOAT, MODE_FLOAT); 5463 5464 /* Explicitly initialize the bswap libfuncs since we need them to be 5465 valid for things other than word_mode. */ 5466 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2"); 5467 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2"); 5468 5469 /* Use cabs for double complex abs, since systems generally have cabs. 5470 Don't define any libcall for float complex, so that cabs will be used. */ 5471 if (complex_double_type_node) 5472 abs_optab->handlers[TYPE_MODE (complex_double_type_node)].libfunc 5473 = init_one_libfunc ("cabs"); 5474 5475 /* The ffs function operates on `int'. */ 5476 ffs_optab->handlers[(int) mode_for_size (INT_TYPE_SIZE, MODE_INT, 0)].libfunc 5477 = init_one_libfunc ("ffs"); 5478 5479 abort_libfunc = init_one_libfunc ("abort"); 5480 memcpy_libfunc = init_one_libfunc ("memcpy"); 5481 memmove_libfunc = init_one_libfunc ("memmove"); 5482 memcmp_libfunc = init_one_libfunc ("memcmp"); 5483 memset_libfunc = init_one_libfunc ("memset"); 5484 setbits_libfunc = init_one_libfunc ("__setbits"); 5485 5486#ifndef DONT_USE_BUILTIN_SETJMP 5487 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp"); 5488 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp"); 5489#else 5490 setjmp_libfunc = init_one_libfunc ("setjmp"); 5491 longjmp_libfunc = init_one_libfunc ("longjmp"); 5492#endif 5493 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register"); 5494 unwind_sjlj_unregister_libfunc 5495 = init_one_libfunc ("_Unwind_SjLj_Unregister"); 5496 5497 /* For function entry/exit instrumentation. */ 5498 profile_function_entry_libfunc 5499 = init_one_libfunc ("__cyg_profile_func_enter"); 5500 profile_function_exit_libfunc 5501 = init_one_libfunc ("__cyg_profile_func_exit"); 5502 5503 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush"); 5504 5505 if (HAVE_conditional_trap) 5506 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX); 5507 5508 /* Allow the target to add more libcalls or rename some, etc. */ 5509 targetm.init_libfuncs (); 5510} 5511 5512#ifdef DEBUG 5513 5514/* Print information about the current contents of the optabs on 5515 STDERR. */ 5516 5517static void 5518debug_optab_libfuncs (void) 5519{ 5520 int i; 5521 int j; 5522 int k; 5523 5524 /* Dump the arithmetic optabs. */ 5525 for (i = 0; i != (int) OTI_MAX; i++) 5526 for (j = 0; j < NUM_MACHINE_MODES; ++j) 5527 { 5528 optab o; 5529 struct optab_handlers *h; 5530 5531 o = optab_table[i]; 5532 h = &o->handlers[j]; 5533 if (h->libfunc) 5534 { 5535 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF); 5536 fprintf (stderr, "%s\t%s:\t%s\n", 5537 GET_RTX_NAME (o->code), 5538 GET_MODE_NAME (j), 5539 XSTR (h->libfunc, 0)); 5540 } 5541 } 5542 5543 /* Dump the conversion optabs. */ 5544 for (i = 0; i < (int) COI_MAX; ++i) 5545 for (j = 0; j < NUM_MACHINE_MODES; ++j) 5546 for (k = 0; k < NUM_MACHINE_MODES; ++k) 5547 { 5548 convert_optab o; 5549 struct optab_handlers *h; 5550 5551 o = &convert_optab_table[i]; 5552 h = &o->handlers[j][k]; 5553 if (h->libfunc) 5554 { 5555 gcc_assert (GET_CODE (h->libfunc) = SYMBOL_REF); 5556 fprintf (stderr, "%s\t%s\t%s:\t%s\n", 5557 GET_RTX_NAME (o->code), 5558 GET_MODE_NAME (j), 5559 GET_MODE_NAME (k), 5560 XSTR (h->libfunc, 0)); 5561 } 5562 } 5563} 5564 5565#endif /* DEBUG */ 5566 5567 5568/* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition 5569 CODE. Return 0 on failure. */ 5570 5571rtx 5572gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1, 5573 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED) 5574{ 5575 enum machine_mode mode = GET_MODE (op1); 5576 enum insn_code icode; 5577 rtx insn; 5578 5579 if (!HAVE_conditional_trap) 5580 return 0; 5581 5582 if (mode == VOIDmode) 5583 return 0; 5584 5585 icode = cmp_optab->handlers[(int) mode].insn_code; 5586 if (icode == CODE_FOR_nothing) 5587 return 0; 5588 5589 start_sequence (); 5590 op1 = prepare_operand (icode, op1, 0, mode, mode, 0); 5591 op2 = prepare_operand (icode, op2, 1, mode, mode, 0); 5592 if (!op1 || !op2) 5593 { 5594 end_sequence (); 5595 return 0; 5596 } 5597 emit_insn (GEN_FCN (icode) (op1, op2)); 5598 5599 PUT_CODE (trap_rtx, code); 5600 gcc_assert (HAVE_conditional_trap); 5601 insn = gen_conditional_trap (trap_rtx, tcode); 5602 if (insn) 5603 { 5604 emit_insn (insn); 5605 insn = get_insns (); 5606 } 5607 end_sequence (); 5608 5609 return insn; 5610} 5611 5612/* Return rtx code for TCODE. Use UNSIGNEDP to select signed 5613 or unsigned operation code. */ 5614 5615static enum rtx_code 5616get_rtx_code (enum tree_code tcode, bool unsignedp) 5617{ 5618 enum rtx_code code; 5619 switch (tcode) 5620 { 5621 case EQ_EXPR: 5622 code = EQ; 5623 break; 5624 case NE_EXPR: 5625 code = NE; 5626 break; 5627 case LT_EXPR: 5628 code = unsignedp ? LTU : LT; 5629 break; 5630 case LE_EXPR: 5631 code = unsignedp ? LEU : LE; 5632 break; 5633 case GT_EXPR: 5634 code = unsignedp ? GTU : GT; 5635 break; 5636 case GE_EXPR: 5637 code = unsignedp ? GEU : GE; 5638 break; 5639 5640 case UNORDERED_EXPR: 5641 code = UNORDERED; 5642 break; 5643 case ORDERED_EXPR: 5644 code = ORDERED; 5645 break; 5646 case UNLT_EXPR: 5647 code = UNLT; 5648 break; 5649 case UNLE_EXPR: 5650 code = UNLE; 5651 break; 5652 case UNGT_EXPR: 5653 code = UNGT; 5654 break; 5655 case UNGE_EXPR: 5656 code = UNGE; 5657 break; 5658 case UNEQ_EXPR: 5659 code = UNEQ; 5660 break; 5661 case LTGT_EXPR: 5662 code = LTGT; 5663 break; 5664 5665 default: 5666 gcc_unreachable (); 5667 } 5668 return code; 5669} 5670 5671/* Return comparison rtx for COND. Use UNSIGNEDP to select signed or 5672 unsigned operators. Do not generate compare instruction. */ 5673 5674static rtx 5675vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode) 5676{ 5677 enum rtx_code rcode; 5678 tree t_op0, t_op1; 5679 rtx rtx_op0, rtx_op1; 5680 5681 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer 5682 ensures that condition is a relational operation. */ 5683 gcc_assert (COMPARISON_CLASS_P (cond)); 5684 5685 rcode = get_rtx_code (TREE_CODE (cond), unsignedp); 5686 t_op0 = TREE_OPERAND (cond, 0); 5687 t_op1 = TREE_OPERAND (cond, 1); 5688 5689 /* Expand operands. */ 5690 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)), 1); 5691 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)), 1); 5692 5693 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0)) 5694 && GET_MODE (rtx_op0) != VOIDmode) 5695 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0); 5696 5697 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1)) 5698 && GET_MODE (rtx_op1) != VOIDmode) 5699 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1); 5700 5701 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1); 5702} 5703 5704/* Return insn code for VEC_COND_EXPR EXPR. */ 5705 5706static inline enum insn_code 5707get_vcond_icode (tree expr, enum machine_mode mode) 5708{ 5709 enum insn_code icode = CODE_FOR_nothing; 5710 5711 if (TYPE_UNSIGNED (TREE_TYPE (expr))) 5712 icode = vcondu_gen_code[mode]; 5713 else 5714 icode = vcond_gen_code[mode]; 5715 return icode; 5716} 5717 5718/* Return TRUE iff, appropriate vector insns are available 5719 for vector cond expr expr in VMODE mode. */ 5720 5721bool 5722expand_vec_cond_expr_p (tree expr, enum machine_mode vmode) 5723{ 5724 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing) 5725 return false; 5726 return true; 5727} 5728 5729/* Generate insns for VEC_COND_EXPR. */ 5730 5731rtx 5732expand_vec_cond_expr (tree vec_cond_expr, rtx target) 5733{ 5734 enum insn_code icode; 5735 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1; 5736 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr)); 5737 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr)); 5738 5739 icode = get_vcond_icode (vec_cond_expr, mode); 5740 if (icode == CODE_FOR_nothing) 5741 return 0; 5742 5743 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 5744 target = gen_reg_rtx (mode); 5745 5746 /* Get comparison rtx. First expand both cond expr operands. */ 5747 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0), 5748 unsignedp, icode); 5749 cc_op0 = XEXP (comparison, 0); 5750 cc_op1 = XEXP (comparison, 1); 5751 /* Expand both operands and force them in reg, if required. */ 5752 rtx_op1 = expand_expr (TREE_OPERAND (vec_cond_expr, 1), 5753 NULL_RTX, VOIDmode, EXPAND_NORMAL); 5754 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode) 5755 && mode != VOIDmode) 5756 rtx_op1 = force_reg (mode, rtx_op1); 5757 5758 rtx_op2 = expand_expr (TREE_OPERAND (vec_cond_expr, 2), 5759 NULL_RTX, VOIDmode, EXPAND_NORMAL); 5760 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode) 5761 && mode != VOIDmode) 5762 rtx_op2 = force_reg (mode, rtx_op2); 5763 5764 /* Emit instruction! */ 5765 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2, 5766 comparison, cc_op0, cc_op1)); 5767 5768 return target; 5769} 5770 5771 5772/* This is an internal subroutine of the other compare_and_swap expanders. 5773 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap 5774 operation. TARGET is an optional place to store the value result of 5775 the operation. ICODE is the particular instruction to expand. Return 5776 the result of the operation. */ 5777 5778static rtx 5779expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val, 5780 rtx target, enum insn_code icode) 5781{ 5782 enum machine_mode mode = GET_MODE (mem); 5783 rtx insn; 5784 5785 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 5786 target = gen_reg_rtx (mode); 5787 5788 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode) 5789 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1); 5790 if (!insn_data[icode].operand[2].predicate (old_val, mode)) 5791 old_val = force_reg (mode, old_val); 5792 5793 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode) 5794 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1); 5795 if (!insn_data[icode].operand[3].predicate (new_val, mode)) 5796 new_val = force_reg (mode, new_val); 5797 5798 insn = GEN_FCN (icode) (target, mem, old_val, new_val); 5799 if (insn == NULL_RTX) 5800 return NULL_RTX; 5801 emit_insn (insn); 5802 5803 return target; 5804} 5805 5806/* Expand a compare-and-swap operation and return its value. */ 5807 5808rtx 5809expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target) 5810{ 5811 enum machine_mode mode = GET_MODE (mem); 5812 enum insn_code icode = sync_compare_and_swap[mode]; 5813 5814 if (icode == CODE_FOR_nothing) 5815 return NULL_RTX; 5816 5817 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode); 5818} 5819 5820/* Expand a compare-and-swap operation and store true into the result if 5821 the operation was successful and false otherwise. Return the result. 5822 Unlike other routines, TARGET is not optional. */ 5823 5824rtx 5825expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target) 5826{ 5827 enum machine_mode mode = GET_MODE (mem); 5828 enum insn_code icode; 5829 rtx subtarget, label0, label1; 5830 5831 /* If the target supports a compare-and-swap pattern that simultaneously 5832 sets some flag for success, then use it. Otherwise use the regular 5833 compare-and-swap and follow that immediately with a compare insn. */ 5834 icode = sync_compare_and_swap_cc[mode]; 5835 switch (icode) 5836 { 5837 default: 5838 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val, 5839 NULL_RTX, icode); 5840 if (subtarget != NULL_RTX) 5841 break; 5842 5843 /* FALLTHRU */ 5844 case CODE_FOR_nothing: 5845 icode = sync_compare_and_swap[mode]; 5846 if (icode == CODE_FOR_nothing) 5847 return NULL_RTX; 5848 5849 /* Ensure that if old_val == mem, that we're not comparing 5850 against an old value. */ 5851 if (MEM_P (old_val)) 5852 old_val = force_reg (mode, old_val); 5853 5854 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val, 5855 NULL_RTX, icode); 5856 if (subtarget == NULL_RTX) 5857 return NULL_RTX; 5858 5859 emit_cmp_insn (subtarget, old_val, EQ, const0_rtx, mode, true); 5860 } 5861 5862 /* If the target has a sane STORE_FLAG_VALUE, then go ahead and use a 5863 setcc instruction from the beginning. We don't work too hard here, 5864 but it's nice to not be stupid about initial code gen either. */ 5865 if (STORE_FLAG_VALUE == 1) 5866 { 5867 icode = setcc_gen_code[EQ]; 5868 if (icode != CODE_FOR_nothing) 5869 { 5870 enum machine_mode cmode = insn_data[icode].operand[0].mode; 5871 rtx insn; 5872 5873 subtarget = target; 5874 if (!insn_data[icode].operand[0].predicate (target, cmode)) 5875 subtarget = gen_reg_rtx (cmode); 5876 5877 insn = GEN_FCN (icode) (subtarget); 5878 if (insn) 5879 { 5880 emit_insn (insn); 5881 if (GET_MODE (target) != GET_MODE (subtarget)) 5882 { 5883 convert_move (target, subtarget, 1); 5884 subtarget = target; 5885 } 5886 return subtarget; 5887 } 5888 } 5889 } 5890 5891 /* Without an appropriate setcc instruction, use a set of branches to 5892 get 1 and 0 stored into target. Presumably if the target has a 5893 STORE_FLAG_VALUE that isn't 1, then this will get cleaned up by ifcvt. */ 5894 5895 label0 = gen_label_rtx (); 5896 label1 = gen_label_rtx (); 5897 5898 emit_jump_insn (bcc_gen_fctn[EQ] (label0)); 5899 emit_move_insn (target, const0_rtx); 5900 emit_jump_insn (gen_jump (label1)); 5901 emit_barrier (); 5902 emit_label (label0); 5903 emit_move_insn (target, const1_rtx); 5904 emit_label (label1); 5905 5906 return target; 5907} 5908 5909/* This is a helper function for the other atomic operations. This function 5910 emits a loop that contains SEQ that iterates until a compare-and-swap 5911 operation at the end succeeds. MEM is the memory to be modified. SEQ is 5912 a set of instructions that takes a value from OLD_REG as an input and 5913 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be 5914 set to the current contents of MEM. After SEQ, a compare-and-swap will 5915 attempt to update MEM with NEW_REG. The function returns true when the 5916 loop was generated successfully. */ 5917 5918static bool 5919expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq) 5920{ 5921 enum machine_mode mode = GET_MODE (mem); 5922 enum insn_code icode; 5923 rtx label, cmp_reg, subtarget; 5924 5925 /* The loop we want to generate looks like 5926 5927 cmp_reg = mem; 5928 label: 5929 old_reg = cmp_reg; 5930 seq; 5931 cmp_reg = compare-and-swap(mem, old_reg, new_reg) 5932 if (cmp_reg != old_reg) 5933 goto label; 5934 5935 Note that we only do the plain load from memory once. Subsequent 5936 iterations use the value loaded by the compare-and-swap pattern. */ 5937 5938 label = gen_label_rtx (); 5939 cmp_reg = gen_reg_rtx (mode); 5940 5941 emit_move_insn (cmp_reg, mem); 5942 emit_label (label); 5943 emit_move_insn (old_reg, cmp_reg); 5944 if (seq) 5945 emit_insn (seq); 5946 5947 /* If the target supports a compare-and-swap pattern that simultaneously 5948 sets some flag for success, then use it. Otherwise use the regular 5949 compare-and-swap and follow that immediately with a compare insn. */ 5950 icode = sync_compare_and_swap_cc[mode]; 5951 switch (icode) 5952 { 5953 default: 5954 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg, 5955 cmp_reg, icode); 5956 if (subtarget != NULL_RTX) 5957 { 5958 gcc_assert (subtarget == cmp_reg); 5959 break; 5960 } 5961 5962 /* FALLTHRU */ 5963 case CODE_FOR_nothing: 5964 icode = sync_compare_and_swap[mode]; 5965 if (icode == CODE_FOR_nothing) 5966 return false; 5967 5968 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg, 5969 cmp_reg, icode); 5970 if (subtarget == NULL_RTX) 5971 return false; 5972 if (subtarget != cmp_reg) 5973 emit_move_insn (cmp_reg, subtarget); 5974 5975 emit_cmp_insn (cmp_reg, old_reg, EQ, const0_rtx, mode, true); 5976 } 5977 5978 /* ??? Mark this jump predicted not taken? */ 5979 emit_jump_insn (bcc_gen_fctn[NE] (label)); 5980 5981 return true; 5982} 5983 5984/* This function generates the atomic operation MEM CODE= VAL. In this 5985 case, we do not care about any resulting value. Returns NULL if we 5986 cannot generate the operation. */ 5987 5988rtx 5989expand_sync_operation (rtx mem, rtx val, enum rtx_code code) 5990{ 5991 enum machine_mode mode = GET_MODE (mem); 5992 enum insn_code icode; 5993 rtx insn; 5994 5995 /* Look to see if the target supports the operation directly. */ 5996 switch (code) 5997 { 5998 case PLUS: 5999 icode = sync_add_optab[mode]; 6000 break; 6001 case IOR: 6002 icode = sync_ior_optab[mode]; 6003 break; 6004 case XOR: 6005 icode = sync_xor_optab[mode]; 6006 break; 6007 case AND: 6008 icode = sync_and_optab[mode]; 6009 break; 6010 case NOT: 6011 icode = sync_nand_optab[mode]; 6012 break; 6013 6014 case MINUS: 6015 icode = sync_sub_optab[mode]; 6016 if (icode == CODE_FOR_nothing) 6017 { 6018 icode = sync_add_optab[mode]; 6019 if (icode != CODE_FOR_nothing) 6020 { 6021 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1); 6022 code = PLUS; 6023 } 6024 } 6025 break; 6026 6027 default: 6028 gcc_unreachable (); 6029 } 6030 6031 /* Generate the direct operation, if present. */ 6032 if (icode != CODE_FOR_nothing) 6033 { 6034 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 6035 val = convert_modes (mode, GET_MODE (val), val, 1); 6036 if (!insn_data[icode].operand[1].predicate (val, mode)) 6037 val = force_reg (mode, val); 6038 6039 insn = GEN_FCN (icode) (mem, val); 6040 if (insn) 6041 { 6042 emit_insn (insn); 6043 return const0_rtx; 6044 } 6045 } 6046 6047 /* Failing that, generate a compare-and-swap loop in which we perform the 6048 operation with normal arithmetic instructions. */ 6049 if (sync_compare_and_swap[mode] != CODE_FOR_nothing) 6050 { 6051 rtx t0 = gen_reg_rtx (mode), t1; 6052 6053 start_sequence (); 6054 6055 t1 = t0; 6056 if (code == NOT) 6057 { 6058 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true); 6059 code = AND; 6060 } 6061 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, 6062 true, OPTAB_LIB_WIDEN); 6063 6064 insn = get_insns (); 6065 end_sequence (); 6066 6067 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn)) 6068 return const0_rtx; 6069 } 6070 6071 return NULL_RTX; 6072} 6073 6074/* This function generates the atomic operation MEM CODE= VAL. In this 6075 case, we do care about the resulting value: if AFTER is true then 6076 return the value MEM holds after the operation, if AFTER is false 6077 then return the value MEM holds before the operation. TARGET is an 6078 optional place for the result value to be stored. */ 6079 6080rtx 6081expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code, 6082 bool after, rtx target) 6083{ 6084 enum machine_mode mode = GET_MODE (mem); 6085 enum insn_code old_code, new_code, icode; 6086 bool compensate; 6087 rtx insn; 6088 6089 /* Look to see if the target supports the operation directly. */ 6090 switch (code) 6091 { 6092 case PLUS: 6093 old_code = sync_old_add_optab[mode]; 6094 new_code = sync_new_add_optab[mode]; 6095 break; 6096 case IOR: 6097 old_code = sync_old_ior_optab[mode]; 6098 new_code = sync_new_ior_optab[mode]; 6099 break; 6100 case XOR: 6101 old_code = sync_old_xor_optab[mode]; 6102 new_code = sync_new_xor_optab[mode]; 6103 break; 6104 case AND: 6105 old_code = sync_old_and_optab[mode]; 6106 new_code = sync_new_and_optab[mode]; 6107 break; 6108 case NOT: 6109 old_code = sync_old_nand_optab[mode]; 6110 new_code = sync_new_nand_optab[mode]; 6111 break; 6112 6113 case MINUS: 6114 old_code = sync_old_sub_optab[mode]; 6115 new_code = sync_new_sub_optab[mode]; 6116 if (old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing) 6117 { 6118 old_code = sync_old_add_optab[mode]; 6119 new_code = sync_new_add_optab[mode]; 6120 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing) 6121 { 6122 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1); 6123 code = PLUS; 6124 } 6125 } 6126 break; 6127 6128 default: 6129 gcc_unreachable (); 6130 } 6131 6132 /* If the target does supports the proper new/old operation, great. But 6133 if we only support the opposite old/new operation, check to see if we 6134 can compensate. In the case in which the old value is supported, then 6135 we can always perform the operation again with normal arithmetic. In 6136 the case in which the new value is supported, then we can only handle 6137 this in the case the operation is reversible. */ 6138 compensate = false; 6139 if (after) 6140 { 6141 icode = new_code; 6142 if (icode == CODE_FOR_nothing) 6143 { 6144 icode = old_code; 6145 if (icode != CODE_FOR_nothing) 6146 compensate = true; 6147 } 6148 } 6149 else 6150 { 6151 icode = old_code; 6152 if (icode == CODE_FOR_nothing 6153 && (code == PLUS || code == MINUS || code == XOR)) 6154 { 6155 icode = new_code; 6156 if (icode != CODE_FOR_nothing) 6157 compensate = true; 6158 } 6159 } 6160 6161 /* If we found something supported, great. */ 6162 if (icode != CODE_FOR_nothing) 6163 { 6164 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 6165 target = gen_reg_rtx (mode); 6166 6167 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 6168 val = convert_modes (mode, GET_MODE (val), val, 1); 6169 if (!insn_data[icode].operand[2].predicate (val, mode)) 6170 val = force_reg (mode, val); 6171 6172 insn = GEN_FCN (icode) (target, mem, val); 6173 if (insn) 6174 { 6175 emit_insn (insn); 6176 6177 /* If we need to compensate for using an operation with the 6178 wrong return value, do so now. */ 6179 if (compensate) 6180 { 6181 if (!after) 6182 { 6183 if (code == PLUS) 6184 code = MINUS; 6185 else if (code == MINUS) 6186 code = PLUS; 6187 } 6188 6189 if (code == NOT) 6190 target = expand_simple_unop (mode, NOT, target, NULL_RTX, true); 6191 target = expand_simple_binop (mode, code, target, val, NULL_RTX, 6192 true, OPTAB_LIB_WIDEN); 6193 } 6194 6195 return target; 6196 } 6197 } 6198 6199 /* Failing that, generate a compare-and-swap loop in which we perform the 6200 operation with normal arithmetic instructions. */ 6201 if (sync_compare_and_swap[mode] != CODE_FOR_nothing) 6202 { 6203 rtx t0 = gen_reg_rtx (mode), t1; 6204 6205 if (!target || !register_operand (target, mode)) 6206 target = gen_reg_rtx (mode); 6207 6208 start_sequence (); 6209 6210 if (!after) 6211 emit_move_insn (target, t0); 6212 t1 = t0; 6213 if (code == NOT) 6214 { 6215 t1 = expand_simple_unop (mode, NOT, t1, NULL_RTX, true); 6216 code = AND; 6217 } 6218 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX, 6219 true, OPTAB_LIB_WIDEN); 6220 if (after) 6221 emit_move_insn (target, t1); 6222 6223 insn = get_insns (); 6224 end_sequence (); 6225 6226 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn)) 6227 return target; 6228 } 6229 6230 return NULL_RTX; 6231} 6232 6233/* This function expands a test-and-set operation. Ideally we atomically 6234 store VAL in MEM and return the previous value in MEM. Some targets 6235 may not support this operation and only support VAL with the constant 1; 6236 in this case while the return value will be 0/1, but the exact value 6237 stored in MEM is target defined. TARGET is an option place to stick 6238 the return value. */ 6239 6240rtx 6241expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target) 6242{ 6243 enum machine_mode mode = GET_MODE (mem); 6244 enum insn_code icode; 6245 rtx insn; 6246 6247 /* If the target supports the test-and-set directly, great. */ 6248 icode = sync_lock_test_and_set[mode]; 6249 if (icode != CODE_FOR_nothing) 6250 { 6251 if (!target || !insn_data[icode].operand[0].predicate (target, mode)) 6252 target = gen_reg_rtx (mode); 6253 6254 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 6255 val = convert_modes (mode, GET_MODE (val), val, 1); 6256 if (!insn_data[icode].operand[2].predicate (val, mode)) 6257 val = force_reg (mode, val); 6258 6259 insn = GEN_FCN (icode) (target, mem, val); 6260 if (insn) 6261 { 6262 emit_insn (insn); 6263 return target; 6264 } 6265 } 6266 6267 /* Otherwise, use a compare-and-swap loop for the exchange. */ 6268 if (sync_compare_and_swap[mode] != CODE_FOR_nothing) 6269 { 6270 if (!target || !register_operand (target, mode)) 6271 target = gen_reg_rtx (mode); 6272 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode) 6273 val = convert_modes (mode, GET_MODE (val), val, 1); 6274 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX)) 6275 return target; 6276 } 6277 6278 return NULL_RTX; 6279} 6280 6281#include "gt-optabs.h" 6282