1/* IRA hard register and memory cost calculation for allocnos or pseudos. 2 Copyright (C) 2006-2022 Free Software Foundation, Inc. 3 Contributed by Vladimir Makarov <vmakarov@redhat.com>. 4 5This file is part of GCC. 6 7GCC is free software; you can redistribute it and/or modify it under 8the terms of the GNU General Public License as published by the Free 9Software Foundation; either version 3, or (at your option) any later 10version. 11 12GCC is distributed in the hope that it will be useful, but WITHOUT ANY 13WARRANTY; without even the implied warranty of MERCHANTABILITY or 14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15for more details. 16 17You should have received a copy of the GNU General Public License 18along with GCC; see the file COPYING3. If not see 19<http://www.gnu.org/licenses/>. */ 20 21#include "config.h" 22#include "system.h" 23#include "coretypes.h" 24#include "backend.h" 25#include "target.h" 26#include "rtl.h" 27#include "tree.h" 28#include "predict.h" 29#include "memmodel.h" 30#include "tm_p.h" 31#include "insn-config.h" 32#include "regs.h" 33#include "ira.h" 34#include "ira-int.h" 35#include "addresses.h" 36#include "reload.h" 37#include "print-rtl.h" 38 39/* The flags is set up every time when we calculate pseudo register 40 classes through function ira_set_pseudo_classes. */ 41static bool pseudo_classes_defined_p = false; 42 43/* TRUE if we work with allocnos. Otherwise we work with pseudos. */ 44static bool allocno_p; 45 46/* Number of elements in array `costs'. */ 47static int cost_elements_num; 48 49/* The `costs' struct records the cost of using hard registers of each 50 class considered for the calculation and of using memory for each 51 allocno or pseudo. */ 52struct costs 53{ 54 int mem_cost; 55 /* Costs for register classes start here. We process only some 56 allocno classes. */ 57 int cost[1]; 58}; 59 60#define max_struct_costs_size \ 61 (this_target_ira_int->x_max_struct_costs_size) 62#define init_cost \ 63 (this_target_ira_int->x_init_cost) 64#define temp_costs \ 65 (this_target_ira_int->x_temp_costs) 66#define op_costs \ 67 (this_target_ira_int->x_op_costs) 68#define this_op_costs \ 69 (this_target_ira_int->x_this_op_costs) 70 71/* Costs of each class for each allocno or pseudo. */ 72static struct costs *costs; 73 74/* Accumulated costs of each class for each allocno. */ 75static struct costs *total_allocno_costs; 76 77/* It is the current size of struct costs. */ 78static size_t struct_costs_size; 79 80/* Return pointer to structure containing costs of allocno or pseudo 81 with given NUM in array ARR. */ 82#define COSTS(arr, num) \ 83 ((struct costs *) ((char *) (arr) + (num) * struct_costs_size)) 84 85/* Return index in COSTS when processing reg with REGNO. */ 86#define COST_INDEX(regno) (allocno_p \ 87 ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \ 88 : (int) regno) 89 90/* Record register class preferences of each allocno or pseudo. Null 91 value means no preferences. It happens on the 1st iteration of the 92 cost calculation. */ 93static enum reg_class *pref; 94 95/* Allocated buffers for pref. */ 96static enum reg_class *pref_buffer; 97 98/* Record allocno class of each allocno with the same regno. */ 99static enum reg_class *regno_aclass; 100 101/* Record cost gains for not allocating a register with an invariant 102 equivalence. */ 103static int *regno_equiv_gains; 104 105/* Execution frequency of the current insn. */ 106static int frequency; 107 108 109 110/* Info about reg classes whose costs are calculated for a pseudo. */ 111struct cost_classes 112{ 113 /* Number of the cost classes in the subsequent array. */ 114 int num; 115 /* Container of the cost classes. */ 116 enum reg_class classes[N_REG_CLASSES]; 117 /* Map reg class -> index of the reg class in the previous array. 118 -1 if it is not a cost class. */ 119 int index[N_REG_CLASSES]; 120 /* Map hard regno index of first class in array CLASSES containing 121 the hard regno, -1 otherwise. */ 122 int hard_regno_index[FIRST_PSEUDO_REGISTER]; 123}; 124 125/* Types of pointers to the structure above. */ 126typedef struct cost_classes *cost_classes_t; 127typedef const struct cost_classes *const_cost_classes_t; 128 129/* Info about cost classes for each pseudo. */ 130static cost_classes_t *regno_cost_classes; 131 132/* Helper for cost_classes hashing. */ 133 134struct cost_classes_hasher : pointer_hash <cost_classes> 135{ 136 static inline hashval_t hash (const cost_classes *); 137 static inline bool equal (const cost_classes *, const cost_classes *); 138 static inline void remove (cost_classes *); 139}; 140 141/* Returns hash value for cost classes info HV. */ 142inline hashval_t 143cost_classes_hasher::hash (const cost_classes *hv) 144{ 145 return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0); 146} 147 148/* Compares cost classes info HV1 and HV2. */ 149inline bool 150cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2) 151{ 152 return (hv1->num == hv2->num 153 && memcmp (hv1->classes, hv2->classes, 154 sizeof (enum reg_class) * hv1->num) == 0); 155} 156 157/* Delete cost classes info V from the hash table. */ 158inline void 159cost_classes_hasher::remove (cost_classes *v) 160{ 161 ira_free (v); 162} 163 164/* Hash table of unique cost classes. */ 165static hash_table<cost_classes_hasher> *cost_classes_htab; 166 167/* Map allocno class -> cost classes for pseudo of given allocno 168 class. */ 169static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES]; 170 171/* Map mode -> cost classes for pseudo of give mode. */ 172static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE]; 173 174/* Cost classes that include all classes in ira_important_classes. */ 175static cost_classes all_cost_classes; 176 177/* Use the array of classes in CLASSES_PTR to fill out the rest of 178 the structure. */ 179static void 180complete_cost_classes (cost_classes_t classes_ptr) 181{ 182 for (int i = 0; i < N_REG_CLASSES; i++) 183 classes_ptr->index[i] = -1; 184 for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++) 185 classes_ptr->hard_regno_index[i] = -1; 186 for (int i = 0; i < classes_ptr->num; i++) 187 { 188 enum reg_class cl = classes_ptr->classes[i]; 189 classes_ptr->index[cl] = i; 190 for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--) 191 { 192 unsigned int hard_regno = ira_class_hard_regs[cl][j]; 193 if (classes_ptr->hard_regno_index[hard_regno] < 0) 194 classes_ptr->hard_regno_index[hard_regno] = i; 195 } 196 } 197} 198 199/* Initialize info about the cost classes for each pseudo. */ 200static void 201initiate_regno_cost_classes (void) 202{ 203 int size = sizeof (cost_classes_t) * max_reg_num (); 204 205 regno_cost_classes = (cost_classes_t *) ira_allocate (size); 206 memset (regno_cost_classes, 0, size); 207 memset (cost_classes_aclass_cache, 0, 208 sizeof (cost_classes_t) * N_REG_CLASSES); 209 memset (cost_classes_mode_cache, 0, 210 sizeof (cost_classes_t) * MAX_MACHINE_MODE); 211 cost_classes_htab = new hash_table<cost_classes_hasher> (200); 212 all_cost_classes.num = ira_important_classes_num; 213 for (int i = 0; i < ira_important_classes_num; i++) 214 all_cost_classes.classes[i] = ira_important_classes[i]; 215 complete_cost_classes (&all_cost_classes); 216} 217 218/* Create new cost classes from cost classes FROM and set up members 219 index and hard_regno_index. Return the new classes. The function 220 implements some common code of two functions 221 setup_regno_cost_classes_by_aclass and 222 setup_regno_cost_classes_by_mode. */ 223static cost_classes_t 224setup_cost_classes (cost_classes_t from) 225{ 226 cost_classes_t classes_ptr; 227 228 classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes)); 229 classes_ptr->num = from->num; 230 for (int i = 0; i < from->num; i++) 231 classes_ptr->classes[i] = from->classes[i]; 232 complete_cost_classes (classes_ptr); 233 return classes_ptr; 234} 235 236/* Return a version of FULL that only considers registers in REGS that are 237 valid for mode MODE. Both FULL and the returned class are globally 238 allocated. */ 239static cost_classes_t 240restrict_cost_classes (cost_classes_t full, machine_mode mode, 241 const_hard_reg_set regs) 242{ 243 static struct cost_classes narrow; 244 int map[N_REG_CLASSES]; 245 narrow.num = 0; 246 for (int i = 0; i < full->num; i++) 247 { 248 /* Assume that we'll drop the class. */ 249 map[i] = -1; 250 251 /* Ignore classes that are too small for the mode. */ 252 enum reg_class cl = full->classes[i]; 253 if (!contains_reg_of_mode[cl][mode]) 254 continue; 255 256 /* Calculate the set of registers in CL that belong to REGS and 257 are valid for MODE. */ 258 HARD_REG_SET valid_for_cl = reg_class_contents[cl] & regs; 259 valid_for_cl &= ~(ira_prohibited_class_mode_regs[cl][mode] 260 | ira_no_alloc_regs); 261 if (hard_reg_set_empty_p (valid_for_cl)) 262 continue; 263 264 /* Don't use this class if the set of valid registers is a subset 265 of an existing class. For example, suppose we have two classes 266 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS. Suppose 267 that the mode changes allowed by FR_REGS are not as general as 268 the mode changes allowed by GR_REGS. 269 270 In this situation, the mode changes for GR_AND_FR_REGS could 271 either be seen as the union or the intersection of the mode 272 changes allowed by the two subclasses. The justification for 273 the union-based definition would be that, if you want a mode 274 change that's only allowed by GR_REGS, you can pick a register 275 from the GR_REGS subclass. The justification for the 276 intersection-based definition would be that every register 277 from the class would allow the mode change. 278 279 However, if we have a register that needs to be in GR_REGS, 280 using GR_AND_FR_REGS with the intersection-based definition 281 would be too pessimistic, since it would bring in restrictions 282 that only apply to FR_REGS. Conversely, if we have a register 283 that needs to be in FR_REGS, using GR_AND_FR_REGS with the 284 union-based definition would lose the extra restrictions 285 placed on FR_REGS. GR_AND_FR_REGS is therefore only useful 286 for cases where GR_REGS and FP_REGS are both valid. */ 287 int pos; 288 for (pos = 0; pos < narrow.num; ++pos) 289 { 290 enum reg_class cl2 = narrow.classes[pos]; 291 if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2])) 292 break; 293 } 294 map[i] = pos; 295 if (pos == narrow.num) 296 { 297 /* If several classes are equivalent, prefer to use the one 298 that was chosen as the allocno class. */ 299 enum reg_class cl2 = ira_allocno_class_translate[cl]; 300 if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2]) 301 cl = cl2; 302 narrow.classes[narrow.num++] = cl; 303 } 304 } 305 if (narrow.num == full->num) 306 return full; 307 308 cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT); 309 if (*slot == NULL) 310 { 311 cost_classes_t classes = setup_cost_classes (&narrow); 312 /* Map equivalent classes to the representative that we chose above. */ 313 for (int i = 0; i < ira_important_classes_num; i++) 314 { 315 enum reg_class cl = ira_important_classes[i]; 316 int index = full->index[cl]; 317 if (index >= 0) 318 classes->index[cl] = map[index]; 319 } 320 *slot = classes; 321 } 322 return *slot; 323} 324 325/* Setup cost classes for pseudo REGNO whose allocno class is ACLASS. 326 This function is used when we know an initial approximation of 327 allocno class of the pseudo already, e.g. on the second iteration 328 of class cost calculation or after class cost calculation in 329 register-pressure sensitive insn scheduling or register-pressure 330 sensitive loop-invariant motion. */ 331static void 332setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass) 333{ 334 static struct cost_classes classes; 335 cost_classes_t classes_ptr; 336 enum reg_class cl; 337 int i; 338 cost_classes **slot; 339 HARD_REG_SET temp, temp2; 340 bool exclude_p; 341 342 if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL) 343 { 344 temp = reg_class_contents[aclass] & ~ira_no_alloc_regs; 345 /* We exclude classes from consideration which are subsets of 346 ACLASS only if ACLASS is an uniform class. */ 347 exclude_p = ira_uniform_class_p[aclass]; 348 classes.num = 0; 349 for (i = 0; i < ira_important_classes_num; i++) 350 { 351 cl = ira_important_classes[i]; 352 if (exclude_p) 353 { 354 /* Exclude non-uniform classes which are subsets of 355 ACLASS. */ 356 temp2 = reg_class_contents[cl] & ~ira_no_alloc_regs; 357 if (hard_reg_set_subset_p (temp2, temp) && cl != aclass) 358 continue; 359 } 360 classes.classes[classes.num++] = cl; 361 } 362 slot = cost_classes_htab->find_slot (&classes, INSERT); 363 if (*slot == NULL) 364 { 365 classes_ptr = setup_cost_classes (&classes); 366 *slot = classes_ptr; 367 } 368 classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot; 369 } 370 if (regno_reg_rtx[regno] != NULL_RTX) 371 { 372 /* Restrict the classes to those that are valid for REGNO's mode 373 (which might for example exclude singleton classes if the mode 374 requires two registers). Also restrict the classes to those that 375 are valid for subregs of REGNO. */ 376 const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno); 377 if (!valid_regs) 378 valid_regs = ®_class_contents[ALL_REGS]; 379 classes_ptr = restrict_cost_classes (classes_ptr, 380 PSEUDO_REGNO_MODE (regno), 381 *valid_regs); 382 } 383 regno_cost_classes[regno] = classes_ptr; 384} 385 386/* Setup cost classes for pseudo REGNO with MODE. Usage of MODE can 387 decrease number of cost classes for the pseudo, if hard registers 388 of some important classes cannot hold a value of MODE. So the 389 pseudo cannot get hard register of some important classes and cost 390 calculation for such important classes is only wasting CPU 391 time. */ 392static void 393setup_regno_cost_classes_by_mode (int regno, machine_mode mode) 394{ 395 if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno)) 396 regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes, 397 mode, *valid_regs); 398 else 399 { 400 if (cost_classes_mode_cache[mode] == NULL) 401 cost_classes_mode_cache[mode] 402 = restrict_cost_classes (&all_cost_classes, mode, 403 reg_class_contents[ALL_REGS]); 404 regno_cost_classes[regno] = cost_classes_mode_cache[mode]; 405 } 406} 407 408/* Finalize info about the cost classes for each pseudo. */ 409static void 410finish_regno_cost_classes (void) 411{ 412 ira_free (regno_cost_classes); 413 delete cost_classes_htab; 414 cost_classes_htab = NULL; 415} 416 417 418 419/* Compute the cost of loading X into (if TO_P is TRUE) or from (if 420 TO_P is FALSE) a register of class RCLASS in mode MODE. X must not 421 be a pseudo register. */ 422static int 423copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p, 424 secondary_reload_info *prev_sri) 425{ 426 secondary_reload_info sri; 427 reg_class_t secondary_class = NO_REGS; 428 429 /* If X is a SCRATCH, there is actually nothing to move since we are 430 assuming optimal allocation. */ 431 if (GET_CODE (x) == SCRATCH) 432 return 0; 433 434 /* Get the class we will actually use for a reload. */ 435 rclass = targetm.preferred_reload_class (x, rclass); 436 437 /* If we need a secondary reload for an intermediate, the cost is 438 that to load the input into the intermediate register, then to 439 copy it. */ 440 sri.prev_sri = prev_sri; 441 sri.extra_cost = 0; 442 /* PR 68770: Secondary reload might examine the t_icode field. */ 443 sri.t_icode = CODE_FOR_nothing; 444 445 secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri); 446 447 if (secondary_class != NO_REGS) 448 { 449 ira_init_register_move_cost_if_necessary (mode); 450 return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass] 451 + sri.extra_cost 452 + copy_cost (x, mode, secondary_class, to_p, &sri)); 453 } 454 455 /* For memory, use the memory move cost, for (hard) registers, use 456 the cost to move between the register classes, and use 2 for 457 everything else (constants). */ 458 if (MEM_P (x) || rclass == NO_REGS) 459 return sri.extra_cost 460 + ira_memory_move_cost[mode][(int) rclass][to_p != 0]; 461 else if (REG_P (x)) 462 { 463 reg_class_t x_class = REGNO_REG_CLASS (REGNO (x)); 464 465 ira_init_register_move_cost_if_necessary (mode); 466 return (sri.extra_cost 467 + ira_register_move_cost[mode][(int) x_class][(int) rclass]); 468 } 469 else 470 /* If this is a constant, we may eventually want to call rtx_cost 471 here. */ 472 return sri.extra_cost + COSTS_N_INSNS (1); 473} 474 475 476 477/* Record the cost of using memory or hard registers of various 478 classes for the operands in INSN. 479 480 N_ALTS is the number of alternatives. 481 N_OPS is the number of operands. 482 OPS is an array of the operands. 483 MODES are the modes of the operands, in case any are VOIDmode. 484 CONSTRAINTS are the constraints to use for the operands. This array 485 is modified by this procedure. 486 487 This procedure works alternative by alternative. For each 488 alternative we assume that we will be able to allocate all allocnos 489 to their ideal register class and calculate the cost of using that 490 alternative. Then we compute, for each operand that is a 491 pseudo-register, the cost of having the allocno allocated to each 492 register class and using it in that alternative. To this cost is 493 added the cost of the alternative. 494 495 The cost of each class for this insn is its lowest cost among all 496 the alternatives. */ 497static void 498record_reg_classes (int n_alts, int n_ops, rtx *ops, 499 machine_mode *modes, const char **constraints, 500 rtx_insn *insn, enum reg_class *pref) 501{ 502 int alt; 503 int i, j, k; 504 int insn_allows_mem[MAX_RECOG_OPERANDS]; 505 move_table *move_in_cost, *move_out_cost; 506 short (*mem_cost)[2]; 507 const char *p; 508 509 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 510 { 511 fprintf (ira_dump_file, " Processing insn %u", INSN_UID (insn)); 512 if (INSN_CODE (insn) >= 0 513 && (p = get_insn_name (INSN_CODE (insn))) != NULL) 514 fprintf (ira_dump_file, " {%s}", p); 515 fprintf (ira_dump_file, " (freq=%d)\n", 516 REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn))); 517 dump_insn_slim (ira_dump_file, insn); 518 } 519 520 for (i = 0; i < n_ops; i++) 521 insn_allows_mem[i] = 0; 522 523 /* Process each alternative, each time minimizing an operand's cost 524 with the cost for each operand in that alternative. */ 525 alternative_mask preferred = get_preferred_alternatives (insn); 526 for (alt = 0; alt < n_alts; alt++) 527 { 528 enum reg_class classes[MAX_RECOG_OPERANDS]; 529 int allows_mem[MAX_RECOG_OPERANDS]; 530 enum reg_class rclass; 531 int alt_fail = 0; 532 int alt_cost = 0, op_cost_add; 533 534 if (!TEST_BIT (preferred, alt)) 535 { 536 for (i = 0; i < recog_data.n_operands; i++) 537 constraints[i] = skip_alternative (constraints[i]); 538 539 continue; 540 } 541 542 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 543 { 544 fprintf (ira_dump_file, " Alt %d:", alt); 545 for (i = 0; i < n_ops; i++) 546 { 547 p = constraints[i]; 548 if (*p == '\0') 549 continue; 550 fprintf (ira_dump_file, " (%d) ", i); 551 for (; *p != '\0' && *p != ',' && *p != '#'; p++) 552 fputc (*p, ira_dump_file); 553 } 554 fprintf (ira_dump_file, "\n"); 555 } 556 557 for (i = 0; i < n_ops; i++) 558 { 559 unsigned char c; 560 const char *p = constraints[i]; 561 rtx op = ops[i]; 562 machine_mode mode = modes[i]; 563 int allows_addr = 0; 564 int win = 0; 565 566 /* Initially show we know nothing about the register class. */ 567 classes[i] = NO_REGS; 568 allows_mem[i] = 0; 569 570 /* If this operand has no constraints at all, we can 571 conclude nothing about it since anything is valid. */ 572 if (*p == 0) 573 { 574 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) 575 memset (this_op_costs[i], 0, struct_costs_size); 576 continue; 577 } 578 579 /* If this alternative is only relevant when this operand 580 matches a previous operand, we do different things 581 depending on whether this operand is a allocno-reg or not. 582 We must process any modifiers for the operand before we 583 can make this test. */ 584 while (*p == '%' || *p == '=' || *p == '+' || *p == '&') 585 p++; 586 587 if (p[0] >= '0' && p[0] <= '0' + i) 588 { 589 /* Copy class and whether memory is allowed from the 590 matching alternative. Then perform any needed cost 591 computations and/or adjustments. */ 592 j = p[0] - '0'; 593 classes[i] = classes[j]; 594 allows_mem[i] = allows_mem[j]; 595 if (allows_mem[i]) 596 insn_allows_mem[i] = 1; 597 598 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER) 599 { 600 /* If this matches the other operand, we have no 601 added cost and we win. */ 602 if (rtx_equal_p (ops[j], op)) 603 win = 1; 604 /* If we can put the other operand into a register, 605 add to the cost of this alternative the cost to 606 copy this operand to the register used for the 607 other operand. */ 608 else if (classes[j] != NO_REGS) 609 { 610 alt_cost += copy_cost (op, mode, classes[j], 1, NULL); 611 win = 1; 612 } 613 } 614 else if (! REG_P (ops[j]) 615 || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER) 616 { 617 /* This op is an allocno but the one it matches is 618 not. */ 619 620 /* If we can't put the other operand into a 621 register, this alternative can't be used. */ 622 623 if (classes[j] == NO_REGS) 624 { 625 alt_fail = 1; 626 } 627 else 628 /* Otherwise, add to the cost of this alternative the cost 629 to copy the other operand to the hard register used for 630 this operand. */ 631 { 632 alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL); 633 } 634 } 635 else 636 { 637 /* The costs of this operand are not the same as the 638 other operand since move costs are not symmetric. 639 Moreover, if we cannot tie them, this alternative 640 needs to do a copy, which is one insn. */ 641 struct costs *pp = this_op_costs[i]; 642 int *pp_costs = pp->cost; 643 cost_classes_t cost_classes_ptr 644 = regno_cost_classes[REGNO (op)]; 645 enum reg_class *cost_classes = cost_classes_ptr->classes; 646 bool in_p = recog_data.operand_type[i] != OP_OUT; 647 bool out_p = recog_data.operand_type[i] != OP_IN; 648 enum reg_class op_class = classes[i]; 649 650 ira_init_register_move_cost_if_necessary (mode); 651 if (! in_p) 652 { 653 ira_assert (out_p); 654 if (op_class == NO_REGS) 655 { 656 mem_cost = ira_memory_move_cost[mode]; 657 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 658 { 659 rclass = cost_classes[k]; 660 pp_costs[k] = mem_cost[rclass][0] * frequency; 661 } 662 } 663 else 664 { 665 move_out_cost = ira_may_move_out_cost[mode]; 666 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 667 { 668 rclass = cost_classes[k]; 669 pp_costs[k] 670 = move_out_cost[op_class][rclass] * frequency; 671 } 672 } 673 } 674 else if (! out_p) 675 { 676 ira_assert (in_p); 677 if (op_class == NO_REGS) 678 { 679 mem_cost = ira_memory_move_cost[mode]; 680 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 681 { 682 rclass = cost_classes[k]; 683 pp_costs[k] = mem_cost[rclass][1] * frequency; 684 } 685 } 686 else 687 { 688 move_in_cost = ira_may_move_in_cost[mode]; 689 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 690 { 691 rclass = cost_classes[k]; 692 pp_costs[k] 693 = move_in_cost[rclass][op_class] * frequency; 694 } 695 } 696 } 697 else 698 { 699 if (op_class == NO_REGS) 700 { 701 mem_cost = ira_memory_move_cost[mode]; 702 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 703 { 704 rclass = cost_classes[k]; 705 pp_costs[k] = ((mem_cost[rclass][0] 706 + mem_cost[rclass][1]) 707 * frequency); 708 } 709 } 710 else 711 { 712 move_in_cost = ira_may_move_in_cost[mode]; 713 move_out_cost = ira_may_move_out_cost[mode]; 714 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 715 { 716 rclass = cost_classes[k]; 717 pp_costs[k] = ((move_in_cost[rclass][op_class] 718 + move_out_cost[op_class][rclass]) 719 * frequency); 720 } 721 } 722 } 723 724 /* If the alternative actually allows memory, make 725 things a bit cheaper since we won't need an extra 726 insn to load it. */ 727 pp->mem_cost 728 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0) 729 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0) 730 - allows_mem[i]) * frequency; 731 732 /* If we have assigned a class to this allocno in 733 our first pass, add a cost to this alternative 734 corresponding to what we would add if this 735 allocno were not in the appropriate class. */ 736 if (pref) 737 { 738 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))]; 739 740 if (pref_class == NO_REGS) 741 alt_cost 742 += ((out_p 743 ? ira_memory_move_cost[mode][op_class][0] : 0) 744 + (in_p 745 ? ira_memory_move_cost[mode][op_class][1] 746 : 0)); 747 else if (ira_reg_class_intersect 748 [pref_class][op_class] == NO_REGS) 749 alt_cost 750 += ira_register_move_cost[mode][pref_class][op_class]; 751 } 752 if (REGNO (ops[i]) != REGNO (ops[j]) 753 && ! find_reg_note (insn, REG_DEAD, op)) 754 alt_cost += 2; 755 756 p++; 757 } 758 } 759 760 /* Scan all the constraint letters. See if the operand 761 matches any of the constraints. Collect the valid 762 register classes and see if this operand accepts 763 memory. */ 764 while ((c = *p)) 765 { 766 switch (c) 767 { 768 case '*': 769 /* Ignore the next letter for this pass. */ 770 c = *++p; 771 break; 772 773 case '^': 774 alt_cost += 2; 775 break; 776 777 case '?': 778 alt_cost += 2; 779 break; 780 781 case 'g': 782 if (MEM_P (op) 783 || (CONSTANT_P (op) 784 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op)))) 785 win = 1; 786 insn_allows_mem[i] = allows_mem[i] = 1; 787 classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS]; 788 break; 789 790 default: 791 enum constraint_num cn = lookup_constraint (p); 792 enum reg_class cl; 793 switch (get_constraint_type (cn)) 794 { 795 case CT_REGISTER: 796 cl = reg_class_for_constraint (cn); 797 if (cl != NO_REGS) 798 classes[i] = ira_reg_class_subunion[classes[i]][cl]; 799 break; 800 801 case CT_CONST_INT: 802 if (CONST_INT_P (op) 803 && insn_const_int_ok_for_constraint (INTVAL (op), cn)) 804 win = 1; 805 break; 806 807 case CT_MEMORY: 808 case CT_RELAXED_MEMORY: 809 /* Every MEM can be reloaded to fit. */ 810 insn_allows_mem[i] = allows_mem[i] = 1; 811 if (MEM_P (op)) 812 win = 1; 813 break; 814 815 case CT_SPECIAL_MEMORY: 816 insn_allows_mem[i] = allows_mem[i] = 1; 817 if (MEM_P (extract_mem_from_operand (op)) 818 && constraint_satisfied_p (op, cn)) 819 win = 1; 820 break; 821 822 case CT_ADDRESS: 823 /* Every address can be reloaded to fit. */ 824 allows_addr = 1; 825 if (address_operand (op, GET_MODE (op)) 826 || constraint_satisfied_p (op, cn)) 827 win = 1; 828 /* We know this operand is an address, so we 829 want it to be allocated to a hard register 830 that can be the base of an address, 831 i.e. BASE_REG_CLASS. */ 832 classes[i] 833 = ira_reg_class_subunion[classes[i]] 834 [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, 835 ADDRESS, SCRATCH)]; 836 break; 837 838 case CT_FIXED_FORM: 839 if (constraint_satisfied_p (op, cn)) 840 win = 1; 841 break; 842 } 843 break; 844 } 845 p += CONSTRAINT_LEN (c, p); 846 if (c == ',') 847 break; 848 } 849 850 constraints[i] = p; 851 852 if (alt_fail) 853 break; 854 855 /* How we account for this operand now depends on whether it 856 is a pseudo register or not. If it is, we first check if 857 any register classes are valid. If not, we ignore this 858 alternative, since we want to assume that all allocnos get 859 allocated for register preferencing. If some register 860 class is valid, compute the costs of moving the allocno 861 into that class. */ 862 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) 863 { 864 if (classes[i] == NO_REGS && ! allows_mem[i]) 865 { 866 /* We must always fail if the operand is a REG, but 867 we did not find a suitable class and memory is 868 not allowed. 869 870 Otherwise we may perform an uninitialized read 871 from this_op_costs after the `continue' statement 872 below. */ 873 alt_fail = 1; 874 } 875 else 876 { 877 unsigned int regno = REGNO (op); 878 struct costs *pp = this_op_costs[i]; 879 int *pp_costs = pp->cost; 880 cost_classes_t cost_classes_ptr = regno_cost_classes[regno]; 881 enum reg_class *cost_classes = cost_classes_ptr->classes; 882 bool in_p = recog_data.operand_type[i] != OP_OUT; 883 bool out_p = recog_data.operand_type[i] != OP_IN; 884 enum reg_class op_class = classes[i]; 885 886 ira_init_register_move_cost_if_necessary (mode); 887 if (! in_p) 888 { 889 ira_assert (out_p); 890 if (op_class == NO_REGS) 891 { 892 mem_cost = ira_memory_move_cost[mode]; 893 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 894 { 895 rclass = cost_classes[k]; 896 pp_costs[k] = mem_cost[rclass][0] * frequency; 897 } 898 } 899 else 900 { 901 move_out_cost = ira_may_move_out_cost[mode]; 902 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 903 { 904 rclass = cost_classes[k]; 905 pp_costs[k] 906 = move_out_cost[op_class][rclass] * frequency; 907 } 908 } 909 } 910 else if (! out_p) 911 { 912 ira_assert (in_p); 913 if (op_class == NO_REGS) 914 { 915 mem_cost = ira_memory_move_cost[mode]; 916 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 917 { 918 rclass = cost_classes[k]; 919 pp_costs[k] = mem_cost[rclass][1] * frequency; 920 } 921 } 922 else 923 { 924 move_in_cost = ira_may_move_in_cost[mode]; 925 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 926 { 927 rclass = cost_classes[k]; 928 pp_costs[k] 929 = move_in_cost[rclass][op_class] * frequency; 930 } 931 } 932 } 933 else 934 { 935 if (op_class == NO_REGS) 936 { 937 mem_cost = ira_memory_move_cost[mode]; 938 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 939 { 940 rclass = cost_classes[k]; 941 pp_costs[k] = ((mem_cost[rclass][0] 942 + mem_cost[rclass][1]) 943 * frequency); 944 } 945 } 946 else 947 { 948 move_in_cost = ira_may_move_in_cost[mode]; 949 move_out_cost = ira_may_move_out_cost[mode]; 950 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 951 { 952 rclass = cost_classes[k]; 953 pp_costs[k] = ((move_in_cost[rclass][op_class] 954 + move_out_cost[op_class][rclass]) 955 * frequency); 956 } 957 } 958 } 959 960 if (op_class == NO_REGS) 961 /* Although we don't need insn to reload from 962 memory, still accessing memory is usually more 963 expensive than a register. */ 964 pp->mem_cost = frequency; 965 else 966 /* If the alternative actually allows memory, make 967 things a bit cheaper since we won't need an 968 extra insn to load it. */ 969 pp->mem_cost 970 = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0) 971 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0) 972 - allows_mem[i]) * frequency; 973 /* If we have assigned a class to this allocno in 974 our first pass, add a cost to this alternative 975 corresponding to what we would add if this 976 allocno were not in the appropriate class. */ 977 if (pref) 978 { 979 enum reg_class pref_class = pref[COST_INDEX (REGNO (op))]; 980 981 if (pref_class == NO_REGS) 982 { 983 if (op_class != NO_REGS) 984 alt_cost 985 += ((out_p 986 ? ira_memory_move_cost[mode][op_class][0] 987 : 0) 988 + (in_p 989 ? ira_memory_move_cost[mode][op_class][1] 990 : 0)); 991 } 992 else if (op_class == NO_REGS) 993 alt_cost 994 += ((out_p 995 ? ira_memory_move_cost[mode][pref_class][1] 996 : 0) 997 + (in_p 998 ? ira_memory_move_cost[mode][pref_class][0] 999 : 0)); 1000 else if (ira_reg_class_intersect[pref_class][op_class] 1001 == NO_REGS) 1002 alt_cost += (ira_register_move_cost 1003 [mode][pref_class][op_class]); 1004 } 1005 } 1006 } 1007 1008 /* Otherwise, if this alternative wins, either because we 1009 have already determined that or if we have a hard 1010 register of the proper class, there is no cost for this 1011 alternative. */ 1012 else if (win || (REG_P (op) 1013 && reg_fits_class_p (op, classes[i], 1014 0, GET_MODE (op)))) 1015 ; 1016 1017 /* If registers are valid, the cost of this alternative 1018 includes copying the object to and/or from a 1019 register. */ 1020 else if (classes[i] != NO_REGS) 1021 { 1022 if (recog_data.operand_type[i] != OP_OUT) 1023 alt_cost += copy_cost (op, mode, classes[i], 1, NULL); 1024 1025 if (recog_data.operand_type[i] != OP_IN) 1026 alt_cost += copy_cost (op, mode, classes[i], 0, NULL); 1027 } 1028 /* The only other way this alternative can be used is if 1029 this is a constant that could be placed into memory. */ 1030 else if (CONSTANT_P (op) && (allows_addr || allows_mem[i])) 1031 alt_cost += ira_memory_move_cost[mode][classes[i]][1]; 1032 else 1033 alt_fail = 1; 1034 1035 if (alt_fail) 1036 break; 1037 } 1038 1039 if (alt_fail) 1040 { 1041 /* The loop above might have exited early once the failure 1042 was seen. Skip over the constraints for the remaining 1043 operands. */ 1044 i += 1; 1045 for (; i < n_ops; ++i) 1046 constraints[i] = skip_alternative (constraints[i]); 1047 continue; 1048 } 1049 1050 op_cost_add = alt_cost * frequency; 1051 /* Finally, update the costs with the information we've 1052 calculated about this alternative. */ 1053 for (i = 0; i < n_ops; i++) 1054 if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER) 1055 { 1056 int old_cost; 1057 bool cost_change_p = false; 1058 struct costs *pp = op_costs[i], *qq = this_op_costs[i]; 1059 int *pp_costs = pp->cost, *qq_costs = qq->cost; 1060 int scale = 1 + (recog_data.operand_type[i] == OP_INOUT); 1061 cost_classes_t cost_classes_ptr 1062 = regno_cost_classes[REGNO (ops[i])]; 1063 1064 old_cost = pp->mem_cost; 1065 pp->mem_cost = MIN (old_cost, 1066 (qq->mem_cost + op_cost_add) * scale); 1067 1068 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5 1069 && pp->mem_cost < old_cost) 1070 { 1071 cost_change_p = true; 1072 fprintf (ira_dump_file, " op %d(r=%u) new costs MEM:%d", 1073 i, REGNO(ops[i]), pp->mem_cost); 1074 } 1075 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1076 { 1077 old_cost = pp_costs[k]; 1078 pp_costs[k] 1079 = MIN (old_cost, (qq_costs[k] + op_cost_add) * scale); 1080 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5 1081 && pp_costs[k] < old_cost) 1082 { 1083 if (!cost_change_p) 1084 fprintf (ira_dump_file, " op %d(r=%u) new costs", 1085 i, REGNO(ops[i])); 1086 cost_change_p = true; 1087 fprintf (ira_dump_file, " %s:%d", 1088 reg_class_names[cost_classes_ptr->classes[k]], 1089 pp_costs[k]); 1090 } 1091 } 1092 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5 1093 && cost_change_p) 1094 fprintf (ira_dump_file, "\n"); 1095 } 1096 } 1097 1098 if (allocno_p) 1099 for (i = 0; i < n_ops; i++) 1100 { 1101 ira_allocno_t a; 1102 rtx op = ops[i]; 1103 1104 if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER) 1105 continue; 1106 a = ira_curr_regno_allocno_map [REGNO (op)]; 1107 if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0) 1108 ALLOCNO_BAD_SPILL_P (a) = true; 1109 } 1110 1111} 1112 1113 1114 1115/* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers. */ 1116static inline bool 1117ok_for_index_p_nonstrict (rtx reg) 1118{ 1119 unsigned regno = REGNO (reg); 1120 1121 return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno); 1122} 1123 1124/* A version of regno_ok_for_base_p for use here, when all 1125 pseudo-registers should count as OK. Arguments as for 1126 regno_ok_for_base_p. */ 1127static inline bool 1128ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as, 1129 enum rtx_code outer_code, enum rtx_code index_code) 1130{ 1131 unsigned regno = REGNO (reg); 1132 1133 if (regno >= FIRST_PSEUDO_REGISTER) 1134 return true; 1135 return ok_for_base_p_1 (regno, mode, as, outer_code, index_code); 1136} 1137 1138/* Record the pseudo registers we must reload into hard registers in a 1139 subexpression of a memory address, X. 1140 1141 If CONTEXT is 0, we are looking at the base part of an address, 1142 otherwise we are looking at the index part. 1143 1144 MODE and AS are the mode and address space of the memory reference; 1145 OUTER_CODE and INDEX_CODE give the context that the rtx appears in. 1146 These four arguments are passed down to base_reg_class. 1147 1148 SCALE is twice the amount to multiply the cost by (it is twice so 1149 we can represent half-cost adjustments). */ 1150static void 1151record_address_regs (machine_mode mode, addr_space_t as, rtx x, 1152 int context, enum rtx_code outer_code, 1153 enum rtx_code index_code, int scale) 1154{ 1155 enum rtx_code code = GET_CODE (x); 1156 enum reg_class rclass; 1157 1158 if (context == 1) 1159 rclass = INDEX_REG_CLASS; 1160 else 1161 rclass = base_reg_class (mode, as, outer_code, index_code); 1162 1163 switch (code) 1164 { 1165 case CONST_INT: 1166 case CONST: 1167 case PC: 1168 case SYMBOL_REF: 1169 case LABEL_REF: 1170 return; 1171 1172 case PLUS: 1173 /* When we have an address that is a sum, we must determine 1174 whether registers are "base" or "index" regs. If there is a 1175 sum of two registers, we must choose one to be the "base". 1176 Luckily, we can use the REG_POINTER to make a good choice 1177 most of the time. We only need to do this on machines that 1178 can have two registers in an address and where the base and 1179 index register classes are different. 1180 1181 ??? This code used to set REGNO_POINTER_FLAG in some cases, 1182 but that seems bogus since it should only be set when we are 1183 sure the register is being used as a pointer. */ 1184 { 1185 rtx arg0 = XEXP (x, 0); 1186 rtx arg1 = XEXP (x, 1); 1187 enum rtx_code code0 = GET_CODE (arg0); 1188 enum rtx_code code1 = GET_CODE (arg1); 1189 1190 /* Look inside subregs. */ 1191 if (code0 == SUBREG) 1192 arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0); 1193 if (code1 == SUBREG) 1194 arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1); 1195 1196 /* If index registers do not appear, or coincide with base registers, 1197 just record registers in any non-constant operands. We 1198 assume here, as well as in the tests below, that all 1199 addresses are in canonical form. */ 1200 if (MAX_REGS_PER_ADDRESS == 1 1201 || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH)) 1202 { 1203 record_address_regs (mode, as, arg0, context, PLUS, code1, scale); 1204 if (! CONSTANT_P (arg1)) 1205 record_address_regs (mode, as, arg1, context, PLUS, code0, scale); 1206 } 1207 1208 /* If the second operand is a constant integer, it doesn't 1209 change what class the first operand must be. */ 1210 else if (CONST_SCALAR_INT_P (arg1)) 1211 record_address_regs (mode, as, arg0, context, PLUS, code1, scale); 1212 /* If the second operand is a symbolic constant, the first 1213 operand must be an index register. */ 1214 else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF) 1215 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale); 1216 /* If both operands are registers but one is already a hard 1217 register of index or reg-base class, give the other the 1218 class that the hard register is not. */ 1219 else if (code0 == REG && code1 == REG 1220 && REGNO (arg0) < FIRST_PSEUDO_REGISTER 1221 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG) 1222 || ok_for_index_p_nonstrict (arg0))) 1223 record_address_regs (mode, as, arg1, 1224 ok_for_base_p_nonstrict (arg0, mode, as, 1225 PLUS, REG) ? 1 : 0, 1226 PLUS, REG, scale); 1227 else if (code0 == REG && code1 == REG 1228 && REGNO (arg1) < FIRST_PSEUDO_REGISTER 1229 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG) 1230 || ok_for_index_p_nonstrict (arg1))) 1231 record_address_regs (mode, as, arg0, 1232 ok_for_base_p_nonstrict (arg1, mode, as, 1233 PLUS, REG) ? 1 : 0, 1234 PLUS, REG, scale); 1235 /* If one operand is known to be a pointer, it must be the 1236 base with the other operand the index. Likewise if the 1237 other operand is a MULT. */ 1238 else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT) 1239 { 1240 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale); 1241 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale); 1242 } 1243 else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT) 1244 { 1245 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale); 1246 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale); 1247 } 1248 /* Otherwise, count equal chances that each might be a base or 1249 index register. This case should be rare. */ 1250 else 1251 { 1252 record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2); 1253 record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2); 1254 record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2); 1255 record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2); 1256 } 1257 } 1258 break; 1259 1260 /* Double the importance of an allocno that is incremented or 1261 decremented, since it would take two extra insns if it ends 1262 up in the wrong place. */ 1263 case POST_MODIFY: 1264 case PRE_MODIFY: 1265 record_address_regs (mode, as, XEXP (x, 0), 0, code, 1266 GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale); 1267 if (REG_P (XEXP (XEXP (x, 1), 1))) 1268 record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG, 1269 2 * scale); 1270 break; 1271 1272 case POST_INC: 1273 case PRE_INC: 1274 case POST_DEC: 1275 case PRE_DEC: 1276 /* Double the importance of an allocno that is incremented or 1277 decremented, since it would take two extra insns if it ends 1278 up in the wrong place. */ 1279 record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale); 1280 break; 1281 1282 case REG: 1283 { 1284 struct costs *pp; 1285 int *pp_costs; 1286 enum reg_class i; 1287 int k, regno, add_cost; 1288 cost_classes_t cost_classes_ptr; 1289 enum reg_class *cost_classes; 1290 move_table *move_in_cost; 1291 1292 if (REGNO (x) < FIRST_PSEUDO_REGISTER) 1293 break; 1294 1295 regno = REGNO (x); 1296 if (allocno_p) 1297 ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true; 1298 pp = COSTS (costs, COST_INDEX (regno)); 1299 add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2; 1300 if (INT_MAX - add_cost < pp->mem_cost) 1301 pp->mem_cost = INT_MAX; 1302 else 1303 pp->mem_cost += add_cost; 1304 cost_classes_ptr = regno_cost_classes[regno]; 1305 cost_classes = cost_classes_ptr->classes; 1306 pp_costs = pp->cost; 1307 ira_init_register_move_cost_if_necessary (Pmode); 1308 move_in_cost = ira_may_move_in_cost[Pmode]; 1309 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1310 { 1311 i = cost_classes[k]; 1312 add_cost = (move_in_cost[i][rclass] * scale) / 2; 1313 if (INT_MAX - add_cost < pp_costs[k]) 1314 pp_costs[k] = INT_MAX; 1315 else 1316 pp_costs[k] += add_cost; 1317 } 1318 } 1319 break; 1320 1321 default: 1322 { 1323 const char *fmt = GET_RTX_FORMAT (code); 1324 int i; 1325 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) 1326 if (fmt[i] == 'e') 1327 record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH, 1328 scale); 1329 } 1330 } 1331} 1332 1333 1334 1335/* Calculate the costs of insn operands. */ 1336static void 1337record_operand_costs (rtx_insn *insn, enum reg_class *pref) 1338{ 1339 const char *constraints[MAX_RECOG_OPERANDS]; 1340 machine_mode modes[MAX_RECOG_OPERANDS]; 1341 rtx set; 1342 int i; 1343 1344 if ((set = single_set (insn)) != NULL_RTX 1345 /* In rare cases the single set insn might have less 2 operands 1346 as the source can be a fixed special reg. */ 1347 && recog_data.n_operands > 1 1348 && recog_data.operand[0] == SET_DEST (set) 1349 && recog_data.operand[1] == SET_SRC (set)) 1350 { 1351 int regno, other_regno; 1352 rtx dest = SET_DEST (set); 1353 rtx src = SET_SRC (set); 1354 1355 if (GET_CODE (dest) == SUBREG 1356 && known_eq (GET_MODE_SIZE (GET_MODE (dest)), 1357 GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest))))) 1358 dest = SUBREG_REG (dest); 1359 if (GET_CODE (src) == SUBREG 1360 && known_eq (GET_MODE_SIZE (GET_MODE (src)), 1361 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))) 1362 src = SUBREG_REG (src); 1363 if (REG_P (src) && REG_P (dest) 1364 && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER 1365 && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER) 1366 || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER 1367 && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER))) 1368 { 1369 machine_mode mode = GET_MODE (SET_SRC (set)), cost_mode = mode; 1370 machine_mode hard_reg_mode = GET_MODE(regno_reg_rtx[other_regno]); 1371 poly_int64 pmode_size = GET_MODE_SIZE (mode); 1372 poly_int64 phard_reg_mode_size = GET_MODE_SIZE (hard_reg_mode); 1373 HOST_WIDE_INT mode_size, hard_reg_mode_size; 1374 cost_classes_t cost_classes_ptr = regno_cost_classes[regno]; 1375 enum reg_class *cost_classes = cost_classes_ptr->classes; 1376 reg_class_t rclass, hard_reg_class, bigger_hard_reg_class; 1377 int cost_factor = 1, cost, k; 1378 move_table *move_costs; 1379 bool dead_p = find_regno_note (insn, REG_DEAD, REGNO (src)); 1380 1381 hard_reg_class = REGNO_REG_CLASS (other_regno); 1382 bigger_hard_reg_class = ira_pressure_class_translate[hard_reg_class]; 1383 /* Target code may return any cost for mode which does not fit the 1384 hard reg class (e.g. DImode for AREG on i386). Check this and use 1385 a bigger class to get the right cost. */ 1386 if (bigger_hard_reg_class != NO_REGS 1387 && ! ira_hard_reg_in_set_p (other_regno, mode, 1388 reg_class_contents[hard_reg_class])) 1389 hard_reg_class = bigger_hard_reg_class; 1390 ira_init_register_move_cost_if_necessary (mode); 1391 ira_init_register_move_cost_if_necessary (hard_reg_mode); 1392 /* Use smaller movement cost for natural hard reg mode or its mode as 1393 operand. */ 1394 if (pmode_size.is_constant (&mode_size) 1395 && phard_reg_mode_size.is_constant (&hard_reg_mode_size)) 1396 { 1397 /* Assume we are moving in the natural modes: */ 1398 cost_factor = mode_size / hard_reg_mode_size; 1399 if (mode_size % hard_reg_mode_size != 0) 1400 cost_factor++; 1401 if (cost_factor 1402 * (ira_register_move_cost 1403 [hard_reg_mode][hard_reg_class][hard_reg_class]) 1404 < (ira_register_move_cost 1405 [mode][hard_reg_class][hard_reg_class])) 1406 cost_mode = hard_reg_mode; 1407 else 1408 cost_factor = 1; 1409 } 1410 move_costs = ira_register_move_cost[cost_mode]; 1411 i = regno == (int) REGNO (src) ? 1 : 0; 1412 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1413 { 1414 rclass = cost_classes[k]; 1415 cost = (i == 0 1416 ? move_costs[hard_reg_class][rclass] 1417 : move_costs[rclass][hard_reg_class]); 1418 cost *= cost_factor; 1419 op_costs[i]->cost[k] = cost * frequency; 1420 /* If this insn is a single set copying operand 1 to 1421 operand 0 and one operand is an allocno with the 1422 other a hard reg or an allocno that prefers a hard 1423 register that is in its own register class then we 1424 may want to adjust the cost of that register class to 1425 -1. 1426 1427 Avoid the adjustment if the source does not die to 1428 avoid stressing of register allocator by preferencing 1429 two colliding registers into single class. */ 1430 if (dead_p 1431 && TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno) 1432 && (reg_class_size[(int) rclass] 1433 == (ira_reg_class_max_nregs 1434 [(int) rclass][(int) GET_MODE(src)]))) 1435 { 1436 if (reg_class_size[rclass] == 1) 1437 op_costs[i]->cost[k] = -frequency; 1438 else if (in_hard_reg_set_p (reg_class_contents[rclass], 1439 GET_MODE(src), other_regno)) 1440 op_costs[i]->cost[k] = -frequency; 1441 } 1442 } 1443 op_costs[i]->mem_cost 1444 = ira_memory_move_cost[mode][hard_reg_class][i] * frequency; 1445 return; 1446 } 1447 } 1448 1449 for (i = 0; i < recog_data.n_operands; i++) 1450 { 1451 constraints[i] = recog_data.constraints[i]; 1452 modes[i] = recog_data.operand_mode[i]; 1453 } 1454 1455 /* If we get here, we are set up to record the costs of all the 1456 operands for this insn. Start by initializing the costs. Then 1457 handle any address registers. Finally record the desired classes 1458 for any allocnos, doing it twice if some pair of operands are 1459 commutative. */ 1460 for (i = 0; i < recog_data.n_operands; i++) 1461 { 1462 rtx op_mem = extract_mem_from_operand (recog_data.operand[i]); 1463 memcpy (op_costs[i], init_cost, struct_costs_size); 1464 1465 if (GET_CODE (recog_data.operand[i]) == SUBREG) 1466 recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]); 1467 1468 if (MEM_P (op_mem)) 1469 record_address_regs (GET_MODE (op_mem), 1470 MEM_ADDR_SPACE (op_mem), 1471 XEXP (op_mem, 0), 1472 0, MEM, SCRATCH, frequency * 2); 1473 else if (constraints[i][0] == 'p' 1474 || (insn_extra_address_constraint 1475 (lookup_constraint (constraints[i])))) 1476 record_address_regs (VOIDmode, ADDR_SPACE_GENERIC, 1477 recog_data.operand[i], 0, ADDRESS, SCRATCH, 1478 frequency * 2); 1479 } 1480 1481 /* Check for commutative in a separate loop so everything will have 1482 been initialized. We must do this even if one operand is a 1483 constant--see addsi3 in m68k.md. */ 1484 for (i = 0; i < (int) recog_data.n_operands - 1; i++) 1485 if (constraints[i][0] == '%') 1486 { 1487 const char *xconstraints[MAX_RECOG_OPERANDS]; 1488 int j; 1489 1490 /* Handle commutative operands by swapping the 1491 constraints. We assume the modes are the same. */ 1492 for (j = 0; j < recog_data.n_operands; j++) 1493 xconstraints[j] = constraints[j]; 1494 1495 xconstraints[i] = constraints[i+1]; 1496 xconstraints[i+1] = constraints[i]; 1497 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands, 1498 recog_data.operand, modes, 1499 xconstraints, insn, pref); 1500 } 1501 record_reg_classes (recog_data.n_alternatives, recog_data.n_operands, 1502 recog_data.operand, modes, 1503 constraints, insn, pref); 1504} 1505 1506 1507 1508/* Process one insn INSN. Scan it and record each time it would save 1509 code to put a certain allocnos in a certain class. Return the last 1510 insn processed, so that the scan can be continued from there. */ 1511static rtx_insn * 1512scan_one_insn (rtx_insn *insn) 1513{ 1514 enum rtx_code pat_code; 1515 rtx set, note; 1516 int i, k; 1517 bool counted_mem; 1518 1519 if (!NONDEBUG_INSN_P (insn)) 1520 return insn; 1521 1522 pat_code = GET_CODE (PATTERN (insn)); 1523 if (pat_code == ASM_INPUT) 1524 return insn; 1525 1526 /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead 1527 and initialize the register move costs of mode M. 1528 1529 The pseudo may be related to another pseudo via a copy (implicit or 1530 explicit) and if there are no mode M uses/sets of the original 1531 pseudo, then we may leave the register move costs uninitialized for 1532 mode M. */ 1533 if (pat_code == USE || pat_code == CLOBBER) 1534 { 1535 rtx x = XEXP (PATTERN (insn), 0); 1536 if (GET_CODE (x) == REG 1537 && REGNO (x) >= FIRST_PSEUDO_REGISTER 1538 && have_regs_of_mode[GET_MODE (x)]) 1539 ira_init_register_move_cost_if_necessary (GET_MODE (x)); 1540 return insn; 1541 } 1542 1543 counted_mem = false; 1544 set = single_set (insn); 1545 extract_insn (insn); 1546 1547 /* If this insn loads a parameter from its stack slot, then it 1548 represents a savings, rather than a cost, if the parameter is 1549 stored in memory. Record this fact. 1550 1551 Similarly if we're loading other constants from memory (constant 1552 pool, TOC references, small data areas, etc) and this is the only 1553 assignment to the destination pseudo. 1554 1555 Don't do this if SET_SRC (set) isn't a general operand, if it is 1556 a memory requiring special instructions to load it, decreasing 1557 mem_cost might result in it being loaded using the specialized 1558 instruction into a register, then stored into stack and loaded 1559 again from the stack. See PR52208. 1560 1561 Don't do this if SET_SRC (set) has side effect. See PR56124. */ 1562 if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set)) 1563 && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX 1564 && ((MEM_P (XEXP (note, 0)) 1565 && !side_effects_p (SET_SRC (set))) 1566 || (CONSTANT_P (XEXP (note, 0)) 1567 && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)), 1568 XEXP (note, 0)) 1569 && REG_N_SETS (REGNO (SET_DEST (set))) == 1)) 1570 && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set))) 1571 /* LRA does not use equiv with a symbol for PIC code. */ 1572 && (! ira_use_lra_p || ! pic_offset_table_rtx 1573 || ! contains_symbol_ref_p (XEXP (note, 0)))) 1574 { 1575 enum reg_class cl = GENERAL_REGS; 1576 rtx reg = SET_DEST (set); 1577 int num = COST_INDEX (REGNO (reg)); 1578 1579 COSTS (costs, num)->mem_cost 1580 -= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency; 1581 record_address_regs (GET_MODE (SET_SRC (set)), 1582 MEM_ADDR_SPACE (SET_SRC (set)), 1583 XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH, 1584 frequency * 2); 1585 counted_mem = true; 1586 } 1587 1588 record_operand_costs (insn, pref); 1589 1590 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 1591 { 1592 const char *p; 1593 fprintf (ira_dump_file, " Final costs after insn %u", INSN_UID (insn)); 1594 if (INSN_CODE (insn) >= 0 1595 && (p = get_insn_name (INSN_CODE (insn))) != NULL) 1596 fprintf (ira_dump_file, " {%s}", p); 1597 fprintf (ira_dump_file, " (freq=%d)\n", 1598 REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn))); 1599 dump_insn_slim (ira_dump_file, insn); 1600 } 1601 1602 /* Now add the cost for each operand to the total costs for its 1603 allocno. */ 1604 for (i = 0; i < recog_data.n_operands; i++) 1605 { 1606 rtx op = recog_data.operand[i]; 1607 1608 if (GET_CODE (op) == SUBREG) 1609 op = SUBREG_REG (op); 1610 if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER) 1611 { 1612 int regno = REGNO (op); 1613 struct costs *p = COSTS (costs, COST_INDEX (regno)); 1614 struct costs *q = op_costs[i]; 1615 int *p_costs = p->cost, *q_costs = q->cost; 1616 cost_classes_t cost_classes_ptr = regno_cost_classes[regno]; 1617 int add_cost = 0; 1618 1619 /* If the already accounted for the memory "cost" above, don't 1620 do so again. */ 1621 if (!counted_mem) 1622 { 1623 add_cost = q->mem_cost; 1624 if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost) 1625 p->mem_cost = INT_MAX; 1626 else 1627 p->mem_cost += add_cost; 1628 } 1629 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 1630 { 1631 fprintf (ira_dump_file, " op %d(r=%u) MEM:%d(+%d)", 1632 i, REGNO(op), p->mem_cost, add_cost); 1633 } 1634 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1635 { 1636 add_cost = q_costs[k]; 1637 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k]) 1638 p_costs[k] = INT_MAX; 1639 else 1640 p_costs[k] += add_cost; 1641 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 1642 { 1643 fprintf (ira_dump_file, " %s:%d(+%d)", 1644 reg_class_names[cost_classes_ptr->classes[k]], 1645 p_costs[k], add_cost); 1646 } 1647 } 1648 if (ira_dump_file != NULL && internal_flag_ira_verbose > 5) 1649 fprintf (ira_dump_file, "\n"); 1650 } 1651 } 1652 return insn; 1653} 1654 1655 1656 1657/* Print allocnos costs to file F. */ 1658static void 1659print_allocno_costs (FILE *f) 1660{ 1661 int k; 1662 ira_allocno_t a; 1663 ira_allocno_iterator ai; 1664 1665 ira_assert (allocno_p); 1666 fprintf (f, "\n"); 1667 FOR_EACH_ALLOCNO (a, ai) 1668 { 1669 int i, rclass; 1670 basic_block bb; 1671 int regno = ALLOCNO_REGNO (a); 1672 cost_classes_t cost_classes_ptr = regno_cost_classes[regno]; 1673 enum reg_class *cost_classes = cost_classes_ptr->classes; 1674 1675 i = ALLOCNO_NUM (a); 1676 fprintf (f, " a%d(r%d,", i, regno); 1677 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL) 1678 fprintf (f, "b%d", bb->index); 1679 else 1680 fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num); 1681 fprintf (f, ") costs:"); 1682 for (k = 0; k < cost_classes_ptr->num; k++) 1683 { 1684 rclass = cost_classes[k]; 1685 fprintf (f, " %s:%d", reg_class_names[rclass], 1686 COSTS (costs, i)->cost[k]); 1687 if (flag_ira_region == IRA_REGION_ALL 1688 || flag_ira_region == IRA_REGION_MIXED) 1689 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]); 1690 } 1691 fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost); 1692 if (flag_ira_region == IRA_REGION_ALL 1693 || flag_ira_region == IRA_REGION_MIXED) 1694 fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost); 1695 fprintf (f, "\n"); 1696 } 1697} 1698 1699/* Print pseudo costs to file F. */ 1700static void 1701print_pseudo_costs (FILE *f) 1702{ 1703 int regno, k; 1704 int rclass; 1705 cost_classes_t cost_classes_ptr; 1706 enum reg_class *cost_classes; 1707 1708 ira_assert (! allocno_p); 1709 fprintf (f, "\n"); 1710 for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--) 1711 { 1712 if (REG_N_REFS (regno) <= 0) 1713 continue; 1714 cost_classes_ptr = regno_cost_classes[regno]; 1715 cost_classes = cost_classes_ptr->classes; 1716 fprintf (f, " r%d costs:", regno); 1717 for (k = 0; k < cost_classes_ptr->num; k++) 1718 { 1719 rclass = cost_classes[k]; 1720 fprintf (f, " %s:%d", reg_class_names[rclass], 1721 COSTS (costs, regno)->cost[k]); 1722 } 1723 fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost); 1724 } 1725} 1726 1727/* Traverse the BB represented by LOOP_TREE_NODE to update the allocno 1728 costs. */ 1729static void 1730process_bb_for_costs (basic_block bb) 1731{ 1732 rtx_insn *insn; 1733 1734 frequency = REG_FREQ_FROM_BB (bb); 1735 if (frequency == 0) 1736 frequency = 1; 1737 FOR_BB_INSNS (bb, insn) 1738 insn = scan_one_insn (insn); 1739} 1740 1741/* Traverse the BB represented by LOOP_TREE_NODE to update the allocno 1742 costs. */ 1743static void 1744process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node) 1745{ 1746 basic_block bb; 1747 1748 bb = loop_tree_node->bb; 1749 if (bb != NULL) 1750 process_bb_for_costs (bb); 1751} 1752 1753/* Find costs of register classes and memory for allocnos or pseudos 1754 and their best costs. Set up preferred, alternative and allocno 1755 classes for pseudos. */ 1756static void 1757find_costs_and_classes (FILE *dump_file) 1758{ 1759 int i, k, start, max_cost_classes_num; 1760 int pass; 1761 basic_block bb; 1762 enum reg_class *regno_best_class, new_class; 1763 1764 init_recog (); 1765 regno_best_class 1766 = (enum reg_class *) ira_allocate (max_reg_num () 1767 * sizeof (enum reg_class)); 1768 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) 1769 regno_best_class[i] = NO_REGS; 1770 if (!resize_reg_info () && allocno_p 1771 && pseudo_classes_defined_p && flag_expensive_optimizations) 1772 { 1773 ira_allocno_t a; 1774 ira_allocno_iterator ai; 1775 1776 pref = pref_buffer; 1777 max_cost_classes_num = 1; 1778 FOR_EACH_ALLOCNO (a, ai) 1779 { 1780 pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a)); 1781 setup_regno_cost_classes_by_aclass 1782 (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]); 1783 max_cost_classes_num 1784 = MAX (max_cost_classes_num, 1785 regno_cost_classes[ALLOCNO_REGNO (a)]->num); 1786 } 1787 start = 1; 1788 } 1789 else 1790 { 1791 pref = NULL; 1792 max_cost_classes_num = ira_important_classes_num; 1793 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) 1794 if (regno_reg_rtx[i] != NULL_RTX) 1795 setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i)); 1796 else 1797 setup_regno_cost_classes_by_aclass (i, ALL_REGS); 1798 start = 0; 1799 } 1800 if (allocno_p) 1801 /* Clear the flag for the next compiled function. */ 1802 pseudo_classes_defined_p = false; 1803 /* Normally we scan the insns once and determine the best class to 1804 use for each allocno. However, if -fexpensive-optimizations are 1805 on, we do so twice, the second time using the tentative best 1806 classes to guide the selection. */ 1807 for (pass = start; pass <= flag_expensive_optimizations; pass++) 1808 { 1809 if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file) 1810 fprintf (dump_file, 1811 "\nPass %i for finding pseudo/allocno costs\n\n", pass); 1812 1813 if (pass != start) 1814 { 1815 max_cost_classes_num = 1; 1816 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) 1817 { 1818 setup_regno_cost_classes_by_aclass (i, regno_best_class[i]); 1819 max_cost_classes_num 1820 = MAX (max_cost_classes_num, regno_cost_classes[i]->num); 1821 } 1822 } 1823 1824 struct_costs_size 1825 = sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1); 1826 /* Zero out our accumulation of the cost of each class for each 1827 allocno. */ 1828 memset (costs, 0, cost_elements_num * struct_costs_size); 1829 1830 if (allocno_p) 1831 { 1832 /* Scan the instructions and record each time it would save code 1833 to put a certain allocno in a certain class. */ 1834 ira_traverse_loop_tree (true, ira_loop_tree_root, 1835 process_bb_node_for_costs, NULL); 1836 1837 memcpy (total_allocno_costs, costs, 1838 max_struct_costs_size * ira_allocnos_num); 1839 } 1840 else 1841 { 1842 basic_block bb; 1843 1844 FOR_EACH_BB_FN (bb, cfun) 1845 process_bb_for_costs (bb); 1846 } 1847 1848 if (pass == 0) 1849 pref = pref_buffer; 1850 1851 /* Now for each allocno look at how desirable each class is and 1852 find which class is preferred. */ 1853 for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--) 1854 { 1855 ira_allocno_t a, parent_a; 1856 int rclass, a_num, parent_a_num, add_cost; 1857 ira_loop_tree_node_t parent; 1858 int best_cost, allocno_cost; 1859 enum reg_class best, alt_class; 1860 cost_classes_t cost_classes_ptr = regno_cost_classes[i]; 1861 enum reg_class *cost_classes; 1862 int *i_costs = temp_costs->cost; 1863 int i_mem_cost; 1864 int equiv_savings = regno_equiv_gains[i]; 1865 1866 if (! allocno_p) 1867 { 1868 if (regno_reg_rtx[i] == NULL_RTX) 1869 continue; 1870 memcpy (temp_costs, COSTS (costs, i), struct_costs_size); 1871 i_mem_cost = temp_costs->mem_cost; 1872 cost_classes = cost_classes_ptr->classes; 1873 } 1874 else 1875 { 1876 if (ira_regno_allocno_map[i] == NULL) 1877 continue; 1878 memset (temp_costs, 0, struct_costs_size); 1879 i_mem_cost = 0; 1880 cost_classes = cost_classes_ptr->classes; 1881 /* Find cost of all allocnos with the same regno. */ 1882 for (a = ira_regno_allocno_map[i]; 1883 a != NULL; 1884 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a)) 1885 { 1886 int *a_costs, *p_costs; 1887 1888 a_num = ALLOCNO_NUM (a); 1889 if ((flag_ira_region == IRA_REGION_ALL 1890 || flag_ira_region == IRA_REGION_MIXED) 1891 && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL 1892 && (parent_a = parent->regno_allocno_map[i]) != NULL 1893 /* There are no caps yet. */ 1894 && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE 1895 (a)->border_allocnos, 1896 ALLOCNO_NUM (a))) 1897 { 1898 /* Propagate costs to upper levels in the region 1899 tree. */ 1900 parent_a_num = ALLOCNO_NUM (parent_a); 1901 a_costs = COSTS (total_allocno_costs, a_num)->cost; 1902 p_costs = COSTS (total_allocno_costs, parent_a_num)->cost; 1903 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1904 { 1905 add_cost = a_costs[k]; 1906 if (add_cost > 0 && INT_MAX - add_cost < p_costs[k]) 1907 p_costs[k] = INT_MAX; 1908 else 1909 p_costs[k] += add_cost; 1910 } 1911 add_cost = COSTS (total_allocno_costs, a_num)->mem_cost; 1912 if (add_cost > 0 1913 && (INT_MAX - add_cost 1914 < COSTS (total_allocno_costs, 1915 parent_a_num)->mem_cost)) 1916 COSTS (total_allocno_costs, parent_a_num)->mem_cost 1917 = INT_MAX; 1918 else 1919 COSTS (total_allocno_costs, parent_a_num)->mem_cost 1920 += add_cost; 1921 1922 if (i >= first_moveable_pseudo && i < last_moveable_pseudo) 1923 COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0; 1924 } 1925 a_costs = COSTS (costs, a_num)->cost; 1926 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1927 { 1928 add_cost = a_costs[k]; 1929 if (add_cost > 0 && INT_MAX - add_cost < i_costs[k]) 1930 i_costs[k] = INT_MAX; 1931 else 1932 i_costs[k] += add_cost; 1933 } 1934 add_cost = COSTS (costs, a_num)->mem_cost; 1935 if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost) 1936 i_mem_cost = INT_MAX; 1937 else 1938 i_mem_cost += add_cost; 1939 } 1940 } 1941 if (i >= first_moveable_pseudo && i < last_moveable_pseudo) 1942 i_mem_cost = 0; 1943 else if (equiv_savings < 0) 1944 i_mem_cost = -equiv_savings; 1945 else if (equiv_savings > 0) 1946 { 1947 i_mem_cost = 0; 1948 for (k = cost_classes_ptr->num - 1; k >= 0; k--) 1949 i_costs[k] += equiv_savings; 1950 } 1951 1952 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1; 1953 best = ALL_REGS; 1954 alt_class = NO_REGS; 1955 /* Find best common class for all allocnos with the same 1956 regno. */ 1957 for (k = 0; k < cost_classes_ptr->num; k++) 1958 { 1959 rclass = cost_classes[k]; 1960 if (i_costs[k] < best_cost) 1961 { 1962 best_cost = i_costs[k]; 1963 best = (enum reg_class) rclass; 1964 } 1965 else if (i_costs[k] == best_cost) 1966 best = ira_reg_class_subunion[best][rclass]; 1967 if (pass == flag_expensive_optimizations 1968 /* We still prefer registers to memory even at this 1969 stage if their costs are the same. We will make 1970 a final decision during assigning hard registers 1971 when we have all info including more accurate 1972 costs which might be affected by assigning hard 1973 registers to other pseudos because the pseudos 1974 involved in moves can be coalesced. */ 1975 && i_costs[k] <= i_mem_cost 1976 && (reg_class_size[reg_class_subunion[alt_class][rclass]] 1977 > reg_class_size[alt_class])) 1978 alt_class = reg_class_subunion[alt_class][rclass]; 1979 } 1980 alt_class = ira_allocno_class_translate[alt_class]; 1981 if (best_cost > i_mem_cost 1982 && ! non_spilled_static_chain_regno_p (i)) 1983 regno_aclass[i] = NO_REGS; 1984 else if (!optimize && !targetm.class_likely_spilled_p (best)) 1985 /* Registers in the alternative class are likely to need 1986 longer or slower sequences than registers in the best class. 1987 When optimizing we make some effort to use the best class 1988 over the alternative class where possible, but at -O0 we 1989 effectively give the alternative class equal weight. 1990 We then run the risk of using slower alternative registers 1991 when plenty of registers from the best class are still free. 1992 This is especially true because live ranges tend to be very 1993 short in -O0 code and so register pressure tends to be low. 1994 1995 Avoid that by ignoring the alternative class if the best 1996 class has plenty of registers. 1997 1998 The union class arrays give important classes and only 1999 part of it are allocno classes. So translate them into 2000 allocno classes. */ 2001 regno_aclass[i] = ira_allocno_class_translate[best]; 2002 else 2003 { 2004 /* Make the common class the biggest class of best and 2005 alt_class. Translate the common class into an 2006 allocno class too. */ 2007 regno_aclass[i] = (ira_allocno_class_translate 2008 [ira_reg_class_superunion[best][alt_class]]); 2009 ira_assert (regno_aclass[i] != NO_REGS 2010 && ira_reg_allocno_class_p[regno_aclass[i]]); 2011 } 2012 if ((new_class 2013 = (reg_class) (targetm.ira_change_pseudo_allocno_class 2014 (i, regno_aclass[i], best))) != regno_aclass[i]) 2015 { 2016 regno_aclass[i] = new_class; 2017 if (hard_reg_set_subset_p (reg_class_contents[new_class], 2018 reg_class_contents[best])) 2019 best = new_class; 2020 if (hard_reg_set_subset_p (reg_class_contents[new_class], 2021 reg_class_contents[alt_class])) 2022 alt_class = new_class; 2023 } 2024 if (pass == flag_expensive_optimizations) 2025 { 2026 if (best_cost > i_mem_cost 2027 /* Do not assign NO_REGS to static chain pointer 2028 pseudo when non-local goto is used. */ 2029 && ! non_spilled_static_chain_regno_p (i)) 2030 best = alt_class = NO_REGS; 2031 else if (best == alt_class) 2032 alt_class = NO_REGS; 2033 setup_reg_classes (i, best, alt_class, regno_aclass[i]); 2034 if ((!allocno_p || internal_flag_ira_verbose > 2) 2035 && dump_file != NULL) 2036 fprintf (dump_file, 2037 " r%d: preferred %s, alternative %s, allocno %s\n", 2038 i, reg_class_names[best], reg_class_names[alt_class], 2039 reg_class_names[regno_aclass[i]]); 2040 } 2041 regno_best_class[i] = best; 2042 if (! allocno_p) 2043 { 2044 pref[i] = (best_cost > i_mem_cost 2045 && ! non_spilled_static_chain_regno_p (i) 2046 ? NO_REGS : best); 2047 continue; 2048 } 2049 for (a = ira_regno_allocno_map[i]; 2050 a != NULL; 2051 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a)) 2052 { 2053 enum reg_class aclass = regno_aclass[i]; 2054 int a_num = ALLOCNO_NUM (a); 2055 int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost; 2056 int *a_costs = COSTS (costs, a_num)->cost; 2057 2058 if (aclass == NO_REGS) 2059 best = NO_REGS; 2060 else 2061 { 2062 /* Finding best class which is subset of the common 2063 class. */ 2064 best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1; 2065 allocno_cost = best_cost; 2066 best = ALL_REGS; 2067 for (k = 0; k < cost_classes_ptr->num; k++) 2068 { 2069 rclass = cost_classes[k]; 2070 if (! ira_class_subset_p[rclass][aclass]) 2071 continue; 2072 if (total_a_costs[k] < best_cost) 2073 { 2074 best_cost = total_a_costs[k]; 2075 allocno_cost = a_costs[k]; 2076 best = (enum reg_class) rclass; 2077 } 2078 else if (total_a_costs[k] == best_cost) 2079 { 2080 best = ira_reg_class_subunion[best][rclass]; 2081 allocno_cost = MAX (allocno_cost, a_costs[k]); 2082 } 2083 } 2084 ALLOCNO_CLASS_COST (a) = allocno_cost; 2085 } 2086 if (internal_flag_ira_verbose > 2 && dump_file != NULL 2087 && (pass == 0 || pref[a_num] != best)) 2088 { 2089 fprintf (dump_file, " a%d (r%d,", a_num, i); 2090 if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL) 2091 fprintf (dump_file, "b%d", bb->index); 2092 else 2093 fprintf (dump_file, "l%d", 2094 ALLOCNO_LOOP_TREE_NODE (a)->loop_num); 2095 fprintf (dump_file, ") best %s, allocno %s\n", 2096 reg_class_names[best], 2097 reg_class_names[aclass]); 2098 } 2099 pref[a_num] = best; 2100 if (pass == flag_expensive_optimizations && best != aclass 2101 && ira_class_hard_regs_num[best] > 0 2102 && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)] 2103 >= ira_class_hard_regs_num[best])) 2104 { 2105 int ind = cost_classes_ptr->index[aclass]; 2106 2107 ira_assert (ind >= 0); 2108 ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a)); 2109 ira_add_allocno_pref (a, ira_class_hard_regs[best][0], 2110 (a_costs[ind] - ALLOCNO_CLASS_COST (a)) 2111 / (ira_register_move_cost 2112 [ALLOCNO_MODE (a)][best][aclass])); 2113 for (k = 0; k < cost_classes_ptr->num; k++) 2114 if (ira_class_subset_p[cost_classes[k]][best]) 2115 a_costs[k] = a_costs[ind]; 2116 } 2117 } 2118 } 2119 2120 if (internal_flag_ira_verbose > 4 && dump_file) 2121 { 2122 if (allocno_p) 2123 print_allocno_costs (dump_file); 2124 else 2125 print_pseudo_costs (dump_file); 2126 fprintf (dump_file,"\n"); 2127 } 2128 } 2129 ira_free (regno_best_class); 2130} 2131 2132 2133 2134/* Process moves involving hard regs to modify allocno hard register 2135 costs. We can do this only after determining allocno class. If a 2136 hard register forms a register class, then moves with the hard 2137 register are already taken into account in class costs for the 2138 allocno. */ 2139static void 2140process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node) 2141{ 2142 int i, freq, src_regno, dst_regno, hard_regno, a_regno; 2143 bool to_p; 2144 ira_allocno_t a, curr_a; 2145 ira_loop_tree_node_t curr_loop_tree_node; 2146 enum reg_class rclass; 2147 basic_block bb; 2148 rtx_insn *insn; 2149 rtx set, src, dst; 2150 2151 bb = loop_tree_node->bb; 2152 if (bb == NULL) 2153 return; 2154 freq = REG_FREQ_FROM_BB (bb); 2155 if (freq == 0) 2156 freq = 1; 2157 FOR_BB_INSNS (bb, insn) 2158 { 2159 if (!NONDEBUG_INSN_P (insn)) 2160 continue; 2161 set = single_set (insn); 2162 if (set == NULL_RTX) 2163 continue; 2164 dst = SET_DEST (set); 2165 src = SET_SRC (set); 2166 if (! REG_P (dst) || ! REG_P (src)) 2167 continue; 2168 dst_regno = REGNO (dst); 2169 src_regno = REGNO (src); 2170 if (dst_regno >= FIRST_PSEUDO_REGISTER 2171 && src_regno < FIRST_PSEUDO_REGISTER) 2172 { 2173 hard_regno = src_regno; 2174 a = ira_curr_regno_allocno_map[dst_regno]; 2175 to_p = true; 2176 } 2177 else if (src_regno >= FIRST_PSEUDO_REGISTER 2178 && dst_regno < FIRST_PSEUDO_REGISTER) 2179 { 2180 hard_regno = dst_regno; 2181 a = ira_curr_regno_allocno_map[src_regno]; 2182 to_p = false; 2183 } 2184 else 2185 continue; 2186 if (reg_class_size[(int) REGNO_REG_CLASS (hard_regno)] 2187 == (ira_reg_class_max_nregs 2188 [REGNO_REG_CLASS (hard_regno)][(int) ALLOCNO_MODE(a)])) 2189 /* If the class can provide only one hard reg to the allocno, 2190 we processed the insn record_operand_costs already and we 2191 actually updated the hard reg cost there. */ 2192 continue; 2193 rclass = ALLOCNO_CLASS (a); 2194 if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno)) 2195 continue; 2196 i = ira_class_hard_reg_index[rclass][hard_regno]; 2197 if (i < 0) 2198 continue; 2199 a_regno = ALLOCNO_REGNO (a); 2200 for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a); 2201 curr_loop_tree_node != NULL; 2202 curr_loop_tree_node = curr_loop_tree_node->parent) 2203 if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL) 2204 ira_add_allocno_pref (curr_a, hard_regno, freq); 2205 { 2206 int cost; 2207 enum reg_class hard_reg_class; 2208 machine_mode mode; 2209 2210 mode = ALLOCNO_MODE (a); 2211 hard_reg_class = REGNO_REG_CLASS (hard_regno); 2212 ira_init_register_move_cost_if_necessary (mode); 2213 cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass] 2214 : ira_register_move_cost[mode][rclass][hard_reg_class]) * freq; 2215 ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass, 2216 ALLOCNO_CLASS_COST (a)); 2217 ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), 2218 rclass, 0); 2219 ALLOCNO_HARD_REG_COSTS (a)[i] -= cost; 2220 ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost; 2221 ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a), 2222 ALLOCNO_HARD_REG_COSTS (a)[i]); 2223 } 2224 } 2225} 2226 2227/* After we find hard register and memory costs for allocnos, define 2228 its class and modify hard register cost because insns moving 2229 allocno to/from hard registers. */ 2230static void 2231setup_allocno_class_and_costs (void) 2232{ 2233 int i, j, n, regno, hard_regno, num; 2234 int *reg_costs; 2235 enum reg_class aclass, rclass; 2236 ira_allocno_t a; 2237 ira_allocno_iterator ai; 2238 cost_classes_t cost_classes_ptr; 2239 2240 ira_assert (allocno_p); 2241 FOR_EACH_ALLOCNO (a, ai) 2242 { 2243 i = ALLOCNO_NUM (a); 2244 regno = ALLOCNO_REGNO (a); 2245 aclass = regno_aclass[regno]; 2246 cost_classes_ptr = regno_cost_classes[regno]; 2247 ira_assert (pref[i] == NO_REGS || aclass != NO_REGS); 2248 ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost; 2249 ira_set_allocno_class (a, aclass); 2250 if (aclass == NO_REGS) 2251 continue; 2252 if (optimize && ALLOCNO_CLASS (a) != pref[i]) 2253 { 2254 n = ira_class_hard_regs_num[aclass]; 2255 ALLOCNO_HARD_REG_COSTS (a) 2256 = reg_costs = ira_allocate_cost_vector (aclass); 2257 for (j = n - 1; j >= 0; j--) 2258 { 2259 hard_regno = ira_class_hard_regs[aclass][j]; 2260 if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno)) 2261 reg_costs[j] = ALLOCNO_CLASS_COST (a); 2262 else 2263 { 2264 rclass = REGNO_REG_CLASS (hard_regno); 2265 num = cost_classes_ptr->index[rclass]; 2266 if (num < 0) 2267 { 2268 num = cost_classes_ptr->hard_regno_index[hard_regno]; 2269 ira_assert (num >= 0); 2270 } 2271 reg_costs[j] = COSTS (costs, i)->cost[num]; 2272 } 2273 } 2274 } 2275 } 2276 if (optimize) 2277 ira_traverse_loop_tree (true, ira_loop_tree_root, 2278 process_bb_node_for_hard_reg_moves, NULL); 2279} 2280 2281 2282 2283/* Function called once during compiler work. */ 2284void 2285ira_init_costs_once (void) 2286{ 2287 int i; 2288 2289 init_cost = NULL; 2290 for (i = 0; i < MAX_RECOG_OPERANDS; i++) 2291 { 2292 op_costs[i] = NULL; 2293 this_op_costs[i] = NULL; 2294 } 2295 temp_costs = NULL; 2296} 2297 2298/* Free allocated temporary cost vectors. */ 2299void 2300target_ira_int::free_ira_costs () 2301{ 2302 int i; 2303 2304 free (x_init_cost); 2305 x_init_cost = NULL; 2306 for (i = 0; i < MAX_RECOG_OPERANDS; i++) 2307 { 2308 free (x_op_costs[i]); 2309 free (x_this_op_costs[i]); 2310 x_op_costs[i] = x_this_op_costs[i] = NULL; 2311 } 2312 free (x_temp_costs); 2313 x_temp_costs = NULL; 2314} 2315 2316/* This is called each time register related information is 2317 changed. */ 2318void 2319ira_init_costs (void) 2320{ 2321 int i; 2322 2323 this_target_ira_int->free_ira_costs (); 2324 max_struct_costs_size 2325 = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1); 2326 /* Don't use ira_allocate because vectors live through several IRA 2327 calls. */ 2328 init_cost = (struct costs *) xmalloc (max_struct_costs_size); 2329 init_cost->mem_cost = 1000000; 2330 for (i = 0; i < ira_important_classes_num; i++) 2331 init_cost->cost[i] = 1000000; 2332 for (i = 0; i < MAX_RECOG_OPERANDS; i++) 2333 { 2334 op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size); 2335 this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size); 2336 } 2337 temp_costs = (struct costs *) xmalloc (max_struct_costs_size); 2338} 2339 2340 2341 2342/* Common initialization function for ira_costs and 2343 ira_set_pseudo_classes. */ 2344static void 2345init_costs (void) 2346{ 2347 init_subregs_of_mode (); 2348 costs = (struct costs *) ira_allocate (max_struct_costs_size 2349 * cost_elements_num); 2350 pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class) 2351 * cost_elements_num); 2352 regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class) 2353 * max_reg_num ()); 2354 regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ()); 2355 memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ()); 2356} 2357 2358/* Common finalization function for ira_costs and 2359 ira_set_pseudo_classes. */ 2360static void 2361finish_costs (void) 2362{ 2363 finish_subregs_of_mode (); 2364 ira_free (regno_equiv_gains); 2365 ira_free (regno_aclass); 2366 ira_free (pref_buffer); 2367 ira_free (costs); 2368} 2369 2370/* Entry function which defines register class, memory and hard 2371 register costs for each allocno. */ 2372void 2373ira_costs (void) 2374{ 2375 allocno_p = true; 2376 cost_elements_num = ira_allocnos_num; 2377 init_costs (); 2378 total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size 2379 * ira_allocnos_num); 2380 initiate_regno_cost_classes (); 2381 calculate_elim_costs_all_insns (); 2382 find_costs_and_classes (ira_dump_file); 2383 setup_allocno_class_and_costs (); 2384 finish_regno_cost_classes (); 2385 finish_costs (); 2386 ira_free (total_allocno_costs); 2387} 2388 2389/* Entry function which defines classes for pseudos. 2390 Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true. */ 2391void 2392ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file) 2393{ 2394 allocno_p = false; 2395 internal_flag_ira_verbose = flag_ira_verbose; 2396 cost_elements_num = max_reg_num (); 2397 init_costs (); 2398 initiate_regno_cost_classes (); 2399 find_costs_and_classes (dump_file); 2400 finish_regno_cost_classes (); 2401 if (define_pseudo_classes) 2402 pseudo_classes_defined_p = true; 2403 2404 finish_costs (); 2405} 2406 2407 2408 2409/* Change hard register costs for allocnos which lives through 2410 function calls. This is called only when we found all intersected 2411 calls during building allocno live ranges. */ 2412void 2413ira_tune_allocno_costs (void) 2414{ 2415 int j, n, regno; 2416 int cost, min_cost, *reg_costs; 2417 enum reg_class aclass; 2418 machine_mode mode; 2419 ira_allocno_t a; 2420 ira_allocno_iterator ai; 2421 ira_allocno_object_iterator oi; 2422 ira_object_t obj; 2423 bool skip_p; 2424 2425 FOR_EACH_ALLOCNO (a, ai) 2426 { 2427 aclass = ALLOCNO_CLASS (a); 2428 if (aclass == NO_REGS) 2429 continue; 2430 mode = ALLOCNO_MODE (a); 2431 n = ira_class_hard_regs_num[aclass]; 2432 min_cost = INT_MAX; 2433 if (ALLOCNO_CALLS_CROSSED_NUM (a) 2434 != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)) 2435 { 2436 ira_allocate_and_set_costs 2437 (&ALLOCNO_HARD_REG_COSTS (a), aclass, 2438 ALLOCNO_CLASS_COST (a)); 2439 reg_costs = ALLOCNO_HARD_REG_COSTS (a); 2440 for (j = n - 1; j >= 0; j--) 2441 { 2442 regno = ira_class_hard_regs[aclass][j]; 2443 skip_p = false; 2444 FOR_EACH_ALLOCNO_OBJECT (a, obj, oi) 2445 { 2446 if (ira_hard_reg_set_intersection_p (regno, mode, 2447 OBJECT_CONFLICT_HARD_REGS 2448 (obj))) 2449 { 2450 skip_p = true; 2451 break; 2452 } 2453 } 2454 if (skip_p) 2455 continue; 2456 cost = 0; 2457 if (ira_need_caller_save_p (a, regno)) 2458 cost += ira_caller_save_cost (a); 2459#ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER 2460 { 2461 auto rclass = REGNO_REG_CLASS (regno); 2462 cost += ((ira_memory_move_cost[mode][rclass][0] 2463 + ira_memory_move_cost[mode][rclass][1]) 2464 * ALLOCNO_FREQ (a) 2465 * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2); 2466 } 2467#endif 2468 if (INT_MAX - cost < reg_costs[j]) 2469 reg_costs[j] = INT_MAX; 2470 else 2471 reg_costs[j] += cost; 2472 if (min_cost > reg_costs[j]) 2473 min_cost = reg_costs[j]; 2474 } 2475 } 2476 if (min_cost != INT_MAX) 2477 ALLOCNO_CLASS_COST (a) = min_cost; 2478 2479 /* Some targets allow pseudos to be allocated to unaligned sequences 2480 of hard registers. However, selecting an unaligned sequence can 2481 unnecessarily restrict later allocations. So increase the cost of 2482 unaligned hard regs to encourage the use of aligned hard regs. */ 2483 { 2484 const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)]; 2485 2486 if (nregs > 1) 2487 { 2488 ira_allocate_and_set_costs 2489 (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a)); 2490 reg_costs = ALLOCNO_HARD_REG_COSTS (a); 2491 for (j = n - 1; j >= 0; j--) 2492 { 2493 regno = ira_non_ordered_class_hard_regs[aclass][j]; 2494 if ((regno % nregs) != 0) 2495 { 2496 int index = ira_class_hard_reg_index[aclass][regno]; 2497 ira_assert (index != -1); 2498 reg_costs[index] += ALLOCNO_FREQ (a); 2499 } 2500 } 2501 } 2502 } 2503 } 2504} 2505 2506/* Add COST to the estimated gain for eliminating REGNO with its 2507 equivalence. If COST is zero, record that no such elimination is 2508 possible. */ 2509 2510void 2511ira_adjust_equiv_reg_cost (unsigned regno, int cost) 2512{ 2513 if (cost == 0) 2514 regno_equiv_gains[regno] = 0; 2515 else 2516 regno_equiv_gains[regno] += cost; 2517} 2518 2519void 2520ira_costs_cc_finalize (void) 2521{ 2522 this_target_ira_int->free_ira_costs (); 2523} 2524