1/* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "memory/allocation.inline.hpp" 27#include "opto/block.hpp" 28#include "opto/cfgnode.hpp" 29#include "opto/chaitin.hpp" 30#include "opto/coalesce.hpp" 31#include "opto/connode.hpp" 32#include "opto/indexSet.hpp" 33#include "opto/machnode.hpp" 34#include "opto/matcher.hpp" 35#include "opto/regmask.hpp" 36 37#ifndef PRODUCT 38void PhaseCoalesce::dump(Node *n) const { 39 // Being a const function means I cannot use 'Find' 40 uint r = _phc._lrg_map.find(n); 41 tty->print("L%d/N%d ",r,n->_idx); 42} 43 44void PhaseCoalesce::dump() const { 45 // I know I have a block layout now, so I can print blocks in a loop 46 for( uint i=0; i<_phc._cfg.number_of_blocks(); i++ ) { 47 uint j; 48 Block* b = _phc._cfg.get_block(i); 49 // Print a nice block header 50 tty->print("B%d: ",b->_pre_order); 51 for( j=1; j<b->num_preds(); j++ ) 52 tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order); 53 tty->print("-> "); 54 for( j=0; j<b->_num_succs; j++ ) 55 tty->print("B%d ",b->_succs[j]->_pre_order); 56 tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth); 57 uint cnt = b->number_of_nodes(); 58 for( j=0; j<cnt; j++ ) { 59 Node *n = b->get_node(j); 60 dump( n ); 61 tty->print("\t%s\t",n->Name()); 62 63 // Dump the inputs 64 uint k; // Exit value of loop 65 for( k=0; k<n->req(); k++ ) // For all required inputs 66 if( n->in(k) ) dump( n->in(k) ); 67 else tty->print("_ "); 68 int any_prec = 0; 69 for( ; k<n->len(); k++ ) // For all precedence inputs 70 if( n->in(k) ) { 71 if( !any_prec++ ) tty->print(" |"); 72 dump( n->in(k) ); 73 } 74 75 // Dump node-specific info 76 n->dump_spec(tty); 77 tty->print("\n"); 78 79 } 80 tty->print("\n"); 81 } 82} 83#endif 84 85// Combine the live ranges def'd by these 2 Nodes. N2 is an input to N1. 86void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) { 87 uint lr1 = _phc._lrg_map.find(n1); 88 uint lr2 = _phc._lrg_map.find(n2); 89 if( lr1 != lr2 && // Different live ranges already AND 90 !_phc._ifg->test_edge_sq( lr1, lr2 ) ) { // Do not interfere 91 LRG *lrg1 = &_phc.lrgs(lr1); 92 LRG *lrg2 = &_phc.lrgs(lr2); 93 // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. 94 95 // Now, why is int->oop OK? We end up declaring a raw-pointer as an oop 96 // and in general that's a bad thing. However, int->oop conversions only 97 // happen at GC points, so the lifetime of the misclassified raw-pointer 98 // is from the CheckCastPP (that converts it to an oop) backwards up 99 // through a merge point and into the slow-path call, and around the 100 // diamond up to the heap-top check and back down into the slow-path call. 101 // The misclassified raw pointer is NOT live across the slow-path call, 102 // and so does not appear in any GC info, so the fact that it is 103 // misclassified is OK. 104 105 if( (lrg1->_is_oop || !lrg2->_is_oop) && // not an oop->int cast AND 106 // Compatible final mask 107 lrg1->mask().overlap( lrg2->mask() ) ) { 108 // Merge larger into smaller. 109 if( lr1 > lr2 ) { 110 uint tmp = lr1; lr1 = lr2; lr2 = tmp; 111 Node *n = n1; n1 = n2; n2 = n; 112 LRG *ltmp = lrg1; lrg1 = lrg2; lrg2 = ltmp; 113 } 114 // Union lr2 into lr1 115 _phc.Union( n1, n2 ); 116 if (lrg1->_maxfreq < lrg2->_maxfreq) 117 lrg1->_maxfreq = lrg2->_maxfreq; 118 // Merge in the IFG 119 _phc._ifg->Union( lr1, lr2 ); 120 // Combine register restrictions 121 lrg1->AND(lrg2->mask()); 122 } 123 } 124} 125 126// Copy coalescing 127void PhaseCoalesce::coalesce_driver() { 128 verify(); 129 // Coalesce from high frequency to low 130 for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) { 131 coalesce(_phc._blks[i]); 132 } 133} 134 135// I am inserting copies to come out of SSA form. In the general case, I am 136// doing a parallel renaming. I'm in the Named world now, so I can't do a 137// general parallel renaming. All the copies now use "names" (live-ranges) 138// to carry values instead of the explicit use-def chains. Suppose I need to 139// insert 2 copies into the same block. They copy L161->L128 and L128->L132. 140// If I insert them in the wrong order then L128 will get clobbered before it 141// can get used by the second copy. This cannot happen in the SSA model; 142// direct use-def chains get me the right value. It DOES happen in the named 143// model so I have to handle the reordering of copies. 144// 145// In general, I need to topo-sort the placed copies to avoid conflicts. 146// Its possible to have a closed cycle of copies (e.g., recirculating the same 147// values around a loop). In this case I need a temp to break the cycle. 148void PhaseAggressiveCoalesce::insert_copy_with_overlap( Block *b, Node *copy, uint dst_name, uint src_name ) { 149 150 // Scan backwards for the locations of the last use of the dst_name. 151 // I am about to clobber the dst_name, so the copy must be inserted 152 // after the last use. Last use is really first-use on a backwards scan. 153 uint i = b->end_idx()-1; 154 while(1) { 155 Node *n = b->get_node(i); 156 // Check for end of virtual copies; this is also the end of the 157 // parallel renaming effort. 158 if (n->_idx < _unique) { 159 break; 160 } 161 uint idx = n->is_Copy(); 162 assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); 163 if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) { 164 break; 165 } 166 i--; 167 } 168 uint last_use_idx = i; 169 170 // Also search for any kill of src_name that exits the block. 171 // Since the copy uses src_name, I have to come before any kill. 172 uint kill_src_idx = b->end_idx(); 173 // There can be only 1 kill that exits any block and that is 174 // the last kill. Thus it is the first kill on a backwards scan. 175 i = b->end_idx()-1; 176 while (1) { 177 Node *n = b->get_node(i); 178 // Check for end of virtual copies; this is also the end of the 179 // parallel renaming effort. 180 if (n->_idx < _unique) { 181 break; 182 } 183 assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" ); 184 if (_phc._lrg_map.find(n) == src_name) { 185 kill_src_idx = i; 186 break; 187 } 188 i--; 189 } 190 // Need a temp? Last use of dst comes after the kill of src? 191 if (last_use_idx >= kill_src_idx) { 192 // Need to break a cycle with a temp 193 uint idx = copy->is_Copy(); 194 Node *tmp = copy->clone(); 195 uint max_lrg_id = _phc._lrg_map.max_lrg_id(); 196 _phc.new_lrg(tmp, max_lrg_id); 197 _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); 198 199 // Insert new temp between copy and source 200 tmp ->set_req(idx,copy->in(idx)); 201 copy->set_req(idx,tmp); 202 // Save source in temp early, before source is killed 203 b->insert_node(tmp, kill_src_idx); 204 _phc._cfg.map_node_to_block(tmp, b); 205 last_use_idx++; 206 } 207 208 // Insert just after last use 209 b->insert_node(copy, last_use_idx + 1); 210} 211 212void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) { 213 // We do LRGs compressing and fix a liveout data only here since the other 214 // place in Split() is guarded by the assert which we never hit. 215 _phc._lrg_map.compress_uf_map_for_nodes(); 216 // Fix block's liveout data for compressed live ranges. 217 for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) { 218 uint compressed_lrg = _phc._lrg_map.find(lrg); 219 if (lrg != compressed_lrg) { 220 for (uint bidx = 0; bidx < _phc._cfg.number_of_blocks(); bidx++) { 221 IndexSet *liveout = _phc._live->live(_phc._cfg.get_block(bidx)); 222 if (liveout->member(lrg)) { 223 liveout->remove(lrg); 224 liveout->insert(compressed_lrg); 225 } 226 } 227 } 228 } 229 230 // All new nodes added are actual copies to replace virtual copies. 231 // Nodes with index less than '_unique' are original, non-virtual Nodes. 232 _unique = C->unique(); 233 234 for (uint i = 0; i < _phc._cfg.number_of_blocks(); i++) { 235 C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce"); 236 if (C->failing()) return; 237 Block *b = _phc._cfg.get_block(i); 238 uint cnt = b->num_preds(); // Number of inputs to the Phi 239 240 for( uint l = 1; l<b->number_of_nodes(); l++ ) { 241 Node *n = b->get_node(l); 242 243 // Do not use removed-copies, use copied value instead 244 uint ncnt = n->req(); 245 for( uint k = 1; k<ncnt; k++ ) { 246 Node *copy = n->in(k); 247 uint cidx = copy->is_Copy(); 248 if( cidx ) { 249 Node *def = copy->in(cidx); 250 if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) { 251 n->set_req(k, def); 252 } 253 } 254 } 255 256 // Remove any explicit copies that get coalesced. 257 uint cidx = n->is_Copy(); 258 if( cidx ) { 259 Node *def = n->in(cidx); 260 if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) { 261 n->replace_by(def); 262 n->set_req(cidx,NULL); 263 b->remove_node(l); 264 l--; 265 continue; 266 } 267 } 268 269 if (n->is_Phi()) { 270 // Get the chosen name for the Phi 271 uint phi_name = _phc._lrg_map.find(n); 272 // Ignore the pre-allocated specials 273 if (!phi_name) { 274 continue; 275 } 276 // Check for mismatch inputs to Phi 277 for (uint j = 1; j < cnt; j++) { 278 Node *m = n->in(j); 279 uint src_name = _phc._lrg_map.find(m); 280 if (src_name != phi_name) { 281 Block *pred = _phc._cfg.get_block_for_node(b->pred(j)); 282 Node *copy; 283 assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); 284 // Rematerialize constants instead of copying them. 285 // We do this only for immediate constants, we avoid constant table loads 286 // because that will unsafely extend the live range of the constant table base. 287 if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && 288 m->as_Mach()->rematerialize()) { 289 copy = m->clone(); 290 // Insert the copy in the predecessor basic block 291 pred->add_inst(copy); 292 // Copy any flags as well 293 _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map); 294 } else { 295 uint ireg = m->ideal_reg(); 296 if (ireg == 0 || ireg == Op_RegFlags) { 297 assert(false, "attempted to spill a non-spillable item: %d: %s, ireg = %u, spill_type: %s", 298 m->_idx, m->Name(), ireg, MachSpillCopyNode::spill_type(MachSpillCopyNode::PhiInput)); 299 C->record_method_not_compilable("attempted to spill a non-spillable item"); 300 return; 301 } 302 const RegMask *rm = C->matcher()->idealreg2spillmask[ireg]; 303 copy = new MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm); 304 // Find a good place to insert. Kinda tricky, use a subroutine 305 insert_copy_with_overlap(pred,copy,phi_name,src_name); 306 } 307 // Insert the copy in the use-def chain 308 n->set_req(j, copy); 309 _phc._cfg.map_node_to_block(copy, pred); 310 // Extend ("register allocate") the names array for the copy. 311 _phc._lrg_map.extend(copy->_idx, phi_name); 312 } // End of if Phi names do not match 313 } // End of for all inputs to Phi 314 } else { // End of if Phi 315 316 // Now check for 2-address instructions 317 uint idx; 318 if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) { 319 // Get the chosen name for the Node 320 uint name = _phc._lrg_map.find(n); 321 assert (name, "no 2-address specials"); 322 // Check for name mis-match on the 2-address input 323 Node *m = n->in(idx); 324 if (_phc._lrg_map.find(m) != name) { 325 Node *copy; 326 assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach"); 327 // At this point it is unsafe to extend live ranges (6550579). 328 // Rematerialize only constants as we do for Phi above. 329 if (m->is_Mach() && m->as_Mach()->is_Con() && !m->as_Mach()->is_MachConstant() && 330 m->as_Mach()->rematerialize()) { 331 copy = m->clone(); 332 // Insert the copy in the basic block, just before us 333 b->insert_node(copy, l++); 334 l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map); 335 } else { 336 uint ireg = m->ideal_reg(); 337 if (ireg == 0 || ireg == Op_RegFlags) { 338 assert(false, "attempted to spill a non-spillable item: %d: %s, ireg = %u, spill_type: %s", 339 m->_idx, m->Name(), ireg, MachSpillCopyNode::spill_type(MachSpillCopyNode::TwoAddress)); 340 C->record_method_not_compilable("attempted to spill a non-spillable item"); 341 return; 342 } 343 const RegMask *rm = C->matcher()->idealreg2spillmask[ireg]; 344 copy = new MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm); 345 // Insert the copy in the basic block, just before us 346 b->insert_node(copy, l++); 347 } 348 // Insert the copy in the use-def chain 349 n->set_req(idx, copy); 350 // Extend ("register allocate") the names array for the copy. 351 _phc._lrg_map.extend(copy->_idx, name); 352 _phc._cfg.map_node_to_block(copy, b); 353 } 354 355 } // End of is two-adr 356 357 // Insert a copy at a debug use for a lrg which has high frequency 358 if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) { 359 // Walk the debug inputs to the node and check for lrg freq 360 JVMState* jvms = n->jvms(); 361 uint debug_start = jvms ? jvms->debug_start() : 999999; 362 uint debug_end = jvms ? jvms->debug_end() : 999999; 363 for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) { 364 // Do not split monitors; they are only needed for debug table 365 // entries and need no code. 366 if (jvms->is_monitor_use(inpidx)) { 367 continue; 368 } 369 Node *inp = n->in(inpidx); 370 uint nidx = _phc._lrg_map.live_range_id(inp); 371 LRG &lrg = lrgs(nidx); 372 373 // If this lrg has a high frequency use/def 374 if( lrg._maxfreq >= _phc.high_frequency_lrg() ) { 375 // If the live range is also live out of this block (like it 376 // would be for a fast/slow idiom), the normal spill mechanism 377 // does an excellent job. If it is not live out of this block 378 // (like it would be for debug info to uncommon trap) splitting 379 // the live range now allows a better allocation in the high 380 // frequency blocks. 381 // Build_IFG_virtual has converted the live sets to 382 // live-IN info, not live-OUT info. 383 uint k; 384 for( k=0; k < b->_num_succs; k++ ) 385 if( _phc._live->live(b->_succs[k])->member( nidx ) ) 386 break; // Live in to some successor block? 387 if( k < b->_num_succs ) 388 continue; // Live out; do not pre-split 389 // Split the lrg at this use 390 uint ireg = inp->ideal_reg(); 391 if (ireg == 0 || ireg == Op_RegFlags) { 392 assert(false, "attempted to spill a non-spillable item: %d: %s, ireg = %u, spill_type: %s", 393 inp->_idx, inp->Name(), ireg, MachSpillCopyNode::spill_type(MachSpillCopyNode::DebugUse)); 394 C->record_method_not_compilable("attempted to spill a non-spillable item"); 395 return; 396 } 397 const RegMask *rm = C->matcher()->idealreg2spillmask[ireg]; 398 Node* copy = new MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm); 399 // Insert the copy in the use-def chain 400 n->set_req(inpidx, copy ); 401 // Insert the copy in the basic block, just before us 402 b->insert_node(copy, l++); 403 // Extend ("register allocate") the names array for the copy. 404 uint max_lrg_id = _phc._lrg_map.max_lrg_id(); 405 _phc.new_lrg(copy, max_lrg_id); 406 _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1); 407 _phc._cfg.map_node_to_block(copy, b); 408 //tty->print_cr("Split a debug use in Aggressive Coalesce"); 409 } // End of if high frequency use/def 410 } // End of for all debug inputs 411 } // End of if low frequency safepoint 412 413 } // End of if Phi 414 415 } // End of for all instructions 416 } // End of for all blocks 417} 418 419 420// Aggressive (but pessimistic) copy coalescing of a single block 421 422// The following coalesce pass represents a single round of aggressive 423// pessimistic coalesce. "Aggressive" means no attempt to preserve 424// colorability when coalescing. This occasionally means more spills, but 425// it also means fewer rounds of coalescing for better code - and that means 426// faster compiles. 427 428// "Pessimistic" means we do not hit the fixed point in one pass (and we are 429// reaching for the least fixed point to boot). This is typically solved 430// with a few more rounds of coalescing, but the compiler must run fast. We 431// could optimistically coalescing everything touching PhiNodes together 432// into one big live range, then check for self-interference. Everywhere 433// the live range interferes with self it would have to be split. Finding 434// the right split points can be done with some heuristics (based on 435// expected frequency of edges in the live range). In short, it's a real 436// research problem and the timeline is too short to allow such research. 437// Further thoughts: (1) build the LR in a pass, (2) find self-interference 438// in another pass, (3) per each self-conflict, split, (4) split by finding 439// the low-cost cut (min-cut) of the LR, (5) edges in the LR are weighted 440// according to the GCM algorithm (or just exec freq on CFG edges). 441 442void PhaseAggressiveCoalesce::coalesce( Block *b ) { 443 // Copies are still "virtual" - meaning we have not made them explicitly 444 // copies. Instead, Phi functions of successor blocks have mis-matched 445 // live-ranges. If I fail to coalesce, I'll have to insert a copy to line 446 // up the live-ranges. Check for Phis in successor blocks. 447 uint i; 448 for( i=0; i<b->_num_succs; i++ ) { 449 Block *bs = b->_succs[i]; 450 // Find index of 'b' in 'bs' predecessors 451 uint j=1; 452 while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) { 453 j++; 454 } 455 456 // Visit all the Phis in successor block 457 for( uint k = 1; k<bs->number_of_nodes(); k++ ) { 458 Node *n = bs->get_node(k); 459 if( !n->is_Phi() ) break; 460 combine_these_two( n, n->in(j) ); 461 } 462 } // End of for all successor blocks 463 464 465 // Check _this_ block for 2-address instructions and copies. 466 uint cnt = b->end_idx(); 467 for( i = 1; i<cnt; i++ ) { 468 Node *n = b->get_node(i); 469 uint idx; 470 // 2-address instructions have a virtual Copy matching their input 471 // to their output 472 if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) { 473 MachNode *mach = n->as_Mach(); 474 combine_these_two(mach, mach->in(idx)); 475 } 476 } // End of for all instructions in block 477} 478 479PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) { 480 _ulr.initialize(_phc._lrg_map.max_lrg_id()); 481} 482 483void PhaseConservativeCoalesce::verify() { 484#ifdef ASSERT 485 _phc.set_was_low(); 486#endif 487} 488 489void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, uint lr1, uint lr2, Node *src_def, Node *dst_copy, Node *src_copy, Block *b, uint bindex ) { 490 // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the 491 // union-find tree 492 _phc.Union( lr1_node, lr2_node ); 493 494 // Single-def live range ONLY if both live ranges are single-def. 495 // If both are single def, then src_def powers one live range 496 // and def_copy powers the other. After merging, src_def powers 497 // the combined live range. 498 lrgs(lr1)._def = (lrgs(lr1).is_multidef() || 499 lrgs(lr2).is_multidef() ) 500 ? NodeSentinel : src_def; 501 lrgs(lr2)._def = NULL; // No def for lrg 2 502 lrgs(lr2).Clear(); // Force empty mask for LRG 2 503 //lrgs(lr2)._size = 0; // Live-range 2 goes dead 504 lrgs(lr1)._is_oop |= lrgs(lr2)._is_oop; 505 lrgs(lr2)._is_oop = 0; // In particular, not an oop for GC info 506 507 if (lrgs(lr1)._maxfreq < lrgs(lr2)._maxfreq) 508 lrgs(lr1)._maxfreq = lrgs(lr2)._maxfreq; 509 510 // Copy original value instead. Intermediate copies go dead, and 511 // the dst_copy becomes useless. 512 int didx = dst_copy->is_Copy(); 513 dst_copy->set_req( didx, src_def ); 514 // Add copy to free list 515 // _phc.free_spillcopy(b->_nodes[bindex]); 516 assert( b->get_node(bindex) == dst_copy, "" ); 517 dst_copy->replace_by( dst_copy->in(didx) ); 518 dst_copy->set_req( didx, NULL); 519 b->remove_node(bindex); 520 if( bindex < b->_ihrp_index ) b->_ihrp_index--; 521 if( bindex < b->_fhrp_index ) b->_fhrp_index--; 522 523 // Stretched lr1; add it to liveness of intermediate blocks 524 Block *b2 = _phc._cfg.get_block_for_node(src_copy); 525 while( b != b2 ) { 526 b = _phc._cfg.get_block_for_node(b->pred(1)); 527 _phc._live->live(b)->insert(lr1); 528 } 529} 530 531// Factored code from copy_copy that computes extra interferences from 532// lengthening a live range by double-coalescing. 533uint PhaseConservativeCoalesce::compute_separating_interferences(Node *dst_copy, Node *src_copy, Block *b, uint bindex, RegMask &rm, uint reg_degree, uint rm_size, uint lr1, uint lr2 ) { 534 535 assert(!lrgs(lr1)._fat_proj, "cannot coalesce fat_proj"); 536 assert(!lrgs(lr2)._fat_proj, "cannot coalesce fat_proj"); 537 Node *prev_copy = dst_copy->in(dst_copy->is_Copy()); 538 Block *b2 = b; 539 uint bindex2 = bindex; 540 while( 1 ) { 541 // Find previous instruction 542 bindex2--; // Chain backwards 1 instruction 543 while( bindex2 == 0 ) { // At block start, find prior block 544 assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" ); 545 b2 = _phc._cfg.get_block_for_node(b2->pred(1)); 546 bindex2 = b2->end_idx()-1; 547 } 548 // Get prior instruction 549 assert(bindex2 < b2->number_of_nodes(), "index out of bounds"); 550 Node *x = b2->get_node(bindex2); 551 if( x == prev_copy ) { // Previous copy in copy chain? 552 if( prev_copy == src_copy)// Found end of chain and all interferences 553 break; // So break out of loop 554 // Else work back one in copy chain 555 prev_copy = prev_copy->in(prev_copy->is_Copy()); 556 } else { // Else collect interferences 557 uint lidx = _phc._lrg_map.find(x); 558 // Found another def of live-range being stretched? 559 if(lidx == lr1) { 560 return max_juint; 561 } 562 if(lidx == lr2) { 563 return max_juint; 564 } 565 566 // If we attempt to coalesce across a bound def 567 if( lrgs(lidx).is_bound() ) { 568 // Do not let the coalesced LRG expect to get the bound color 569 rm.SUBTRACT( lrgs(lidx).mask() ); 570 // Recompute rm_size 571 rm_size = rm.Size(); 572 //if( rm._flags ) rm_size += 1000000; 573 if( reg_degree >= rm_size ) return max_juint; 574 } 575 if( rm.overlap(lrgs(lidx).mask()) ) { 576 // Insert lidx into union LRG; returns TRUE if actually inserted 577 if( _ulr.insert(lidx) ) { 578 // Infinite-stack neighbors do not alter colorability, as they 579 // can always color to some other color. 580 if( !lrgs(lidx).mask().is_AllStack() ) { 581 // If this coalesce will make any new neighbor uncolorable, 582 // do not coalesce. 583 if( lrgs(lidx).just_lo_degree() ) 584 return max_juint; 585 // Bump our degree 586 if( ++reg_degree >= rm_size ) 587 return max_juint; 588 } // End of if not infinite-stack neighbor 589 } // End of if actually inserted 590 } // End of if live range overlaps 591 } // End of else collect interferences for 1 node 592 } // End of while forever, scan back for interferences 593 return reg_degree; 594} 595 596void PhaseConservativeCoalesce::update_ifg(uint lr1, uint lr2, IndexSet *n_lr1, IndexSet *n_lr2) { 597 // Some original neighbors of lr1 might have gone away 598 // because the constrained register mask prevented them. 599 // Remove lr1 from such neighbors. 600 IndexSetIterator one(n_lr1); 601 uint neighbor; 602 LRG &lrg1 = lrgs(lr1); 603 while ((neighbor = one.next()) != 0) 604 if( !_ulr.member(neighbor) ) 605 if( _phc._ifg->neighbors(neighbor)->remove(lr1) ) 606 lrgs(neighbor).inc_degree( -lrg1.compute_degree(lrgs(neighbor)) ); 607 608 609 // lr2 is now called (coalesced into) lr1. 610 // Remove lr2 from the IFG. 611 IndexSetIterator two(n_lr2); 612 LRG &lrg2 = lrgs(lr2); 613 while ((neighbor = two.next()) != 0) 614 if( _phc._ifg->neighbors(neighbor)->remove(lr2) ) 615 lrgs(neighbor).inc_degree( -lrg2.compute_degree(lrgs(neighbor)) ); 616 617 // Some neighbors of intermediate copies now interfere with the 618 // combined live range. 619 IndexSetIterator three(&_ulr); 620 while ((neighbor = three.next()) != 0) 621 if( _phc._ifg->neighbors(neighbor)->insert(lr1) ) 622 lrgs(neighbor).inc_degree( lrg1.compute_degree(lrgs(neighbor)) ); 623} 624 625static void record_bias( const PhaseIFG *ifg, int lr1, int lr2 ) { 626 // Tag copy bias here 627 if( !ifg->lrgs(lr1)._copy_bias ) 628 ifg->lrgs(lr1)._copy_bias = lr2; 629 if( !ifg->lrgs(lr2)._copy_bias ) 630 ifg->lrgs(lr2)._copy_bias = lr1; 631} 632 633// See if I can coalesce a series of multiple copies together. I need the 634// final dest copy and the original src copy. They can be the same Node. 635// Compute the compatible register masks. 636bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) { 637 638 if (!dst_copy->is_SpillCopy()) { 639 return false; 640 } 641 if (!src_copy->is_SpillCopy()) { 642 return false; 643 } 644 Node *src_def = src_copy->in(src_copy->is_Copy()); 645 uint lr1 = _phc._lrg_map.find(dst_copy); 646 uint lr2 = _phc._lrg_map.find(src_def); 647 648 // Same live ranges already? 649 if (lr1 == lr2) { 650 return false; 651 } 652 653 // Interfere? 654 if (_phc._ifg->test_edge_sq(lr1, lr2)) { 655 return false; 656 } 657 658 // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK. 659 if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast 660 return false; 661 } 662 663 // Coalescing between an aligned live range and a mis-aligned live range? 664 // No, no! Alignment changes how we count degree. 665 if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) { 666 return false; 667 } 668 669 // Sort; use smaller live-range number 670 Node *lr1_node = dst_copy; 671 Node *lr2_node = src_def; 672 if (lr1 > lr2) { 673 uint tmp = lr1; lr1 = lr2; lr2 = tmp; 674 lr1_node = src_def; lr2_node = dst_copy; 675 } 676 677 // Check for compatibility of the 2 live ranges by 678 // intersecting their allowed register sets. 679 RegMask rm = lrgs(lr1).mask(); 680 rm.AND(lrgs(lr2).mask()); 681 // Number of bits free 682 uint rm_size = rm.Size(); 683 684 if (UseFPUForSpilling && rm.is_AllStack() ) { 685 // Don't coalesce when frequency difference is large 686 Block *dst_b = _phc._cfg.get_block_for_node(dst_copy); 687 Block *src_def_b = _phc._cfg.get_block_for_node(src_def); 688 if (src_def_b->_freq > 10*dst_b->_freq ) 689 return false; 690 } 691 692 // If we can use any stack slot, then effective size is infinite 693 if( rm.is_AllStack() ) rm_size += 1000000; 694 // Incompatible masks, no way to coalesce 695 if( rm_size == 0 ) return false; 696 697 // Another early bail-out test is when we are double-coalescing and the 698 // 2 copies are separated by some control flow. 699 if( dst_copy != src_copy ) { 700 Block *src_b = _phc._cfg.get_block_for_node(src_copy); 701 Block *b2 = b; 702 while( b2 != src_b ) { 703 if( b2->num_preds() > 2 ){// Found merge-point 704 _phc._lost_opp_cflow_coalesce++; 705 // extra record_bias commented out because Chris believes it is not 706 // productive. Since we can record only 1 bias, we want to choose one 707 // that stands a chance of working and this one probably does not. 708 //record_bias( _phc._lrgs, lr1, lr2 ); 709 return false; // To hard to find all interferences 710 } 711 b2 = _phc._cfg.get_block_for_node(b2->pred(1)); 712 } 713 } 714 715 // Union the two interference sets together into '_ulr' 716 uint reg_degree = _ulr.lrg_union( lr1, lr2, rm_size, _phc._ifg, rm ); 717 718 if( reg_degree >= rm_size ) { 719 record_bias( _phc._ifg, lr1, lr2 ); 720 return false; 721 } 722 723 // Now I need to compute all the interferences between dst_copy and 724 // src_copy. I'm not willing visit the entire interference graph, so 725 // I limit my search to things in dst_copy's block or in a straight 726 // line of previous blocks. I give up at merge points or when I get 727 // more interferences than my degree. I can stop when I find src_copy. 728 if( dst_copy != src_copy ) { 729 reg_degree = compute_separating_interferences(dst_copy, src_copy, b, bindex, rm, rm_size, reg_degree, lr1, lr2 ); 730 if( reg_degree == max_juint ) { 731 record_bias( _phc._ifg, lr1, lr2 ); 732 return false; 733 } 734 } // End of if dst_copy & src_copy are different 735 736 737 // ---- THE COMBINED LRG IS COLORABLE ---- 738 739 // YEAH - Now coalesce this copy away 740 assert( lrgs(lr1).num_regs() == lrgs(lr2).num_regs(), "" ); 741 742 IndexSet *n_lr1 = _phc._ifg->neighbors(lr1); 743 IndexSet *n_lr2 = _phc._ifg->neighbors(lr2); 744 745 // Update the interference graph 746 update_ifg(lr1, lr2, n_lr1, n_lr2); 747 748 _ulr.remove(lr1); 749 750 // Uncomment the following code to trace Coalescing in great detail. 751 // 752 //if (false) { 753 // tty->cr(); 754 // tty->print_cr("#######################################"); 755 // tty->print_cr("union %d and %d", lr1, lr2); 756 // n_lr1->dump(); 757 // n_lr2->dump(); 758 // tty->print_cr("resulting set is"); 759 // _ulr.dump(); 760 //} 761 762 // Replace n_lr1 with the new combined live range. _ulr will use 763 // n_lr1's old memory on the next iteration. n_lr2 is cleared to 764 // send its internal memory to the free list. 765 _ulr.swap(n_lr1); 766 _ulr.clear(); 767 n_lr2->clear(); 768 769 lrgs(lr1).set_degree( _phc._ifg->effective_degree(lr1) ); 770 lrgs(lr2).set_degree( 0 ); 771 772 // Join live ranges. Merge larger into smaller. Union lr2 into lr1 in the 773 // union-find tree 774 union_helper( lr1_node, lr2_node, lr1, lr2, src_def, dst_copy, src_copy, b, bindex ); 775 // Combine register restrictions 776 lrgs(lr1).set_mask(rm); 777 lrgs(lr1).compute_set_mask_size(); 778 lrgs(lr1)._cost += lrgs(lr2)._cost; 779 lrgs(lr1)._area += lrgs(lr2)._area; 780 781 // While its uncommon to successfully coalesce live ranges that started out 782 // being not-lo-degree, it can happen. In any case the combined coalesced 783 // live range better Simplify nicely. 784 lrgs(lr1)._was_lo = 1; 785 786 // kinda expensive to do all the time 787 //tty->print_cr("warning: slow verify happening"); 788 //_phc._ifg->verify( &_phc ); 789 return true; 790} 791 792// Conservative (but pessimistic) copy coalescing of a single block 793void PhaseConservativeCoalesce::coalesce( Block *b ) { 794 // Bail out on infrequent blocks 795 if (_phc._cfg.is_uncommon(b)) { 796 return; 797 } 798 // Check this block for copies. 799 for( uint i = 1; i<b->end_idx(); i++ ) { 800 // Check for actual copies on inputs. Coalesce a copy into its 801 // input if use and copy's input are compatible. 802 Node *copy1 = b->get_node(i); 803 uint idx1 = copy1->is_Copy(); 804 if( !idx1 ) continue; // Not a copy 805 806 if( copy_copy(copy1,copy1,b,i) ) { 807 i--; // Retry, same location in block 808 PhaseChaitin::_conserv_coalesce++; // Collect stats on success 809 continue; 810 } 811 } 812} 813