lcm.cpp revision 5945:d2907f74462e
1/* 2 * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "memory/allocation.inline.hpp" 27#include "opto/block.hpp" 28#include "opto/c2compiler.hpp" 29#include "opto/callnode.hpp" 30#include "opto/cfgnode.hpp" 31#include "opto/machnode.hpp" 32#include "opto/runtime.hpp" 33#ifdef TARGET_ARCH_MODEL_x86_32 34# include "adfiles/ad_x86_32.hpp" 35#endif 36#ifdef TARGET_ARCH_MODEL_x86_64 37# include "adfiles/ad_x86_64.hpp" 38#endif 39#ifdef TARGET_ARCH_MODEL_sparc 40# include "adfiles/ad_sparc.hpp" 41#endif 42#ifdef TARGET_ARCH_MODEL_zero 43# include "adfiles/ad_zero.hpp" 44#endif 45#ifdef TARGET_ARCH_MODEL_arm 46# include "adfiles/ad_arm.hpp" 47#endif 48#ifdef TARGET_ARCH_MODEL_ppc_32 49# include "adfiles/ad_ppc_32.hpp" 50#endif 51#ifdef TARGET_ARCH_MODEL_ppc_64 52# include "adfiles/ad_ppc_64.hpp" 53#endif 54 55// Optimization - Graph Style 56 57//------------------------------implicit_null_check---------------------------- 58// Detect implicit-null-check opportunities. Basically, find NULL checks 59// with suitable memory ops nearby. Use the memory op to do the NULL check. 60// I can generate a memory op if there is not one nearby. 61// The proj is the control projection for the not-null case. 62// The val is the pointer being checked for nullness or 63// decodeHeapOop_not_null node if it did not fold into address. 64void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) { 65 // Assume if null check need for 0 offset then always needed 66 // Intel solaris doesn't support any null checks yet and no 67 // mechanism exists (yet) to set the switches at an os_cpu level 68 if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; 69 70 // Make sure the ptr-is-null path appears to be uncommon! 71 float f = end()->as_MachIf()->_prob; 72 if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; 73 if( f > PROB_UNLIKELY_MAG(4) ) return; 74 75 uint bidx = 0; // Capture index of value into memop 76 bool was_store; // Memory op is a store op 77 78 // Get the successor block for if the test ptr is non-null 79 Block* not_null_block; // this one goes with the proj 80 Block* null_block; 81 if (_nodes[_nodes.size()-1] == proj) { 82 null_block = _succs[0]; 83 not_null_block = _succs[1]; 84 } else { 85 assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other"); 86 not_null_block = _succs[0]; 87 null_block = _succs[1]; 88 } 89 while (null_block->is_Empty() == Block::empty_with_goto) { 90 null_block = null_block->_succs[0]; 91 } 92 93 // Search the exception block for an uncommon trap. 94 // (See Parse::do_if and Parse::do_ifnull for the reason 95 // we need an uncommon trap. Briefly, we need a way to 96 // detect failure of this optimization, as in 6366351.) 97 { 98 bool found_trap = false; 99 for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) { 100 Node* nn = null_block->_nodes[i1]; 101 if (nn->is_MachCall() && 102 nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { 103 const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); 104 if (trtype->isa_int() && trtype->is_int()->is_con()) { 105 jint tr_con = trtype->is_int()->get_con(); 106 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con); 107 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con); 108 assert((int)reason < (int)BitsPerInt, "recode bit map"); 109 if (is_set_nth_bit(allowed_reasons, (int) reason) 110 && action != Deoptimization::Action_none) { 111 // This uncommon trap is sure to recompile, eventually. 112 // When that happens, C->too_many_traps will prevent 113 // this transformation from happening again. 114 found_trap = true; 115 } 116 } 117 break; 118 } 119 } 120 if (!found_trap) { 121 // We did not find an uncommon trap. 122 return; 123 } 124 } 125 126 // Check for decodeHeapOop_not_null node which did not fold into address 127 bool is_decoden = ((intptr_t)val) & 1; 128 val = (Node*)(((intptr_t)val) & ~1); 129 130 assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() && 131 (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity"); 132 133 // Search the successor block for a load or store who's base value is also 134 // the tested value. There may be several. 135 Node_List *out = new Node_List(Thread::current()->resource_area()); 136 MachNode *best = NULL; // Best found so far 137 for (DUIterator i = val->outs(); val->has_out(i); i++) { 138 Node *m = val->out(i); 139 if( !m->is_Mach() ) continue; 140 MachNode *mach = m->as_Mach(); 141 was_store = false; 142 int iop = mach->ideal_Opcode(); 143 switch( iop ) { 144 case Op_LoadB: 145 case Op_LoadUB: 146 case Op_LoadUS: 147 case Op_LoadD: 148 case Op_LoadF: 149 case Op_LoadI: 150 case Op_LoadL: 151 case Op_LoadP: 152 case Op_LoadN: 153 case Op_LoadS: 154 case Op_LoadKlass: 155 case Op_LoadNKlass: 156 case Op_LoadRange: 157 case Op_LoadD_unaligned: 158 case Op_LoadL_unaligned: 159 assert(mach->in(2) == val, "should be address"); 160 break; 161 case Op_StoreB: 162 case Op_StoreC: 163 case Op_StoreCM: 164 case Op_StoreD: 165 case Op_StoreF: 166 case Op_StoreI: 167 case Op_StoreL: 168 case Op_StoreP: 169 case Op_StoreN: 170 case Op_StoreNKlass: 171 was_store = true; // Memory op is a store op 172 // Stores will have their address in slot 2 (memory in slot 1). 173 // If the value being nul-checked is in another slot, it means we 174 // are storing the checked value, which does NOT check the value! 175 if( mach->in(2) != val ) continue; 176 break; // Found a memory op? 177 case Op_StrComp: 178 case Op_StrEquals: 179 case Op_StrIndexOf: 180 case Op_AryEq: 181 case Op_EncodeISOArray: 182 // Not a legit memory op for implicit null check regardless of 183 // embedded loads 184 continue; 185 default: // Also check for embedded loads 186 if( !mach->needs_anti_dependence_check() ) 187 continue; // Not an memory op; skip it 188 if( must_clone[iop] ) { 189 // Do not move nodes which produce flags because 190 // RA will try to clone it to place near branch and 191 // it will cause recompilation, see clone_node(). 192 continue; 193 } 194 { 195 // Check that value is used in memory address in 196 // instructions with embedded load (CmpP val1,(val2+off)). 197 Node* base; 198 Node* index; 199 const MachOper* oper = mach->memory_inputs(base, index); 200 if (oper == NULL || oper == (MachOper*)-1) { 201 continue; // Not an memory op; skip it 202 } 203 if (val == base || 204 val == index && val->bottom_type()->isa_narrowoop()) { 205 break; // Found it 206 } else { 207 continue; // Skip it 208 } 209 } 210 break; 211 } 212 // check if the offset is not too high for implicit exception 213 { 214 intptr_t offset = 0; 215 const TypePtr *adr_type = NULL; // Do not need this return value here 216 const Node* base = mach->get_base_and_disp(offset, adr_type); 217 if (base == NULL || base == NodeSentinel) { 218 // Narrow oop address doesn't have base, only index 219 if( val->bottom_type()->isa_narrowoop() && 220 MacroAssembler::needs_explicit_null_check(offset) ) 221 continue; // Give up if offset is beyond page size 222 // cannot reason about it; is probably not implicit null exception 223 } else { 224 const TypePtr* tptr; 225 if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 || 226 Universe::narrow_klass_shift() == 0)) { 227 // 32-bits narrow oop can be the base of address expressions 228 tptr = base->get_ptr_type(); 229 } else { 230 // only regular oops are expected here 231 tptr = base->bottom_type()->is_ptr(); 232 } 233 // Give up if offset is not a compile-time constant 234 if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot ) 235 continue; 236 offset += tptr->_offset; // correct if base is offseted 237 if( MacroAssembler::needs_explicit_null_check(offset) ) 238 continue; // Give up is reference is beyond 4K page size 239 } 240 } 241 242 // Check ctrl input to see if the null-check dominates the memory op 243 Block *cb = cfg->_bbs[mach->_idx]; 244 cb = cb->_idom; // Always hoist at least 1 block 245 if( !was_store ) { // Stores can be hoisted only one block 246 while( cb->_dom_depth > (_dom_depth + 1)) 247 cb = cb->_idom; // Hoist loads as far as we want 248 // The non-null-block should dominate the memory op, too. Live 249 // range spilling will insert a spill in the non-null-block if it is 250 // needs to spill the memory op for an implicit null check. 251 if (cb->_dom_depth == (_dom_depth + 1)) { 252 if (cb != not_null_block) continue; 253 cb = cb->_idom; 254 } 255 } 256 if( cb != this ) continue; 257 258 // Found a memory user; see if it can be hoisted to check-block 259 uint vidx = 0; // Capture index of value into memop 260 uint j; 261 for( j = mach->req()-1; j > 0; j-- ) { 262 if( mach->in(j) == val ) { 263 vidx = j; 264 // Ignore DecodeN val which could be hoisted to where needed. 265 if( is_decoden ) continue; 266 } 267 // Block of memory-op input 268 Block *inb = cfg->_bbs[mach->in(j)->_idx]; 269 Block *b = this; // Start from nul check 270 while( b != inb && b->_dom_depth > inb->_dom_depth ) 271 b = b->_idom; // search upwards for input 272 // See if input dominates null check 273 if( b != inb ) 274 break; 275 } 276 if( j > 0 ) 277 continue; 278 Block *mb = cfg->_bbs[mach->_idx]; 279 // Hoisting stores requires more checks for the anti-dependence case. 280 // Give up hoisting if we have to move the store past any load. 281 if( was_store ) { 282 Block *b = mb; // Start searching here for a local load 283 // mach use (faulting) trying to hoist 284 // n might be blocker to hoisting 285 while( b != this ) { 286 uint k; 287 for( k = 1; k < b->_nodes.size(); k++ ) { 288 Node *n = b->_nodes[k]; 289 if( n->needs_anti_dependence_check() && 290 n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) ) 291 break; // Found anti-dependent load 292 } 293 if( k < b->_nodes.size() ) 294 break; // Found anti-dependent load 295 // Make sure control does not do a merge (would have to check allpaths) 296 if( b->num_preds() != 2 ) break; 297 b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block 298 } 299 if( b != this ) continue; 300 } 301 302 // Make sure this memory op is not already being used for a NullCheck 303 Node *e = mb->end(); 304 if( e->is_MachNullCheck() && e->in(1) == mach ) 305 continue; // Already being used as a NULL check 306 307 // Found a candidate! Pick one with least dom depth - the highest 308 // in the dom tree should be closest to the null check. 309 if( !best || 310 cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) { 311 best = mach; 312 bidx = vidx; 313 314 } 315 } 316 // No candidate! 317 if( !best ) return; 318 319 // ---- Found an implicit null check 320 extern int implicit_null_checks; 321 implicit_null_checks++; 322 323 if( is_decoden ) { 324 // Check if we need to hoist decodeHeapOop_not_null first. 325 Block *valb = cfg->_bbs[val->_idx]; 326 if( this != valb && this->_dom_depth < valb->_dom_depth ) { 327 // Hoist it up to the end of the test block. 328 valb->find_remove(val); 329 this->add_inst(val); 330 cfg->_bbs.map(val->_idx,this); 331 // DecodeN on x86 may kill flags. Check for flag-killing projections 332 // that also need to be hoisted. 333 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { 334 Node* n = val->fast_out(j); 335 if( n->is_MachProj() ) { 336 cfg->_bbs[n->_idx]->find_remove(n); 337 this->add_inst(n); 338 cfg->_bbs.map(n->_idx,this); 339 } 340 } 341 } 342 } 343 // Hoist the memory candidate up to the end of the test block. 344 Block *old_block = cfg->_bbs[best->_idx]; 345 old_block->find_remove(best); 346 add_inst(best); 347 cfg->_bbs.map(best->_idx,this); 348 349 // Move the control dependence 350 if (best->in(0) && best->in(0) == old_block->_nodes[0]) 351 best->set_req(0, _nodes[0]); 352 353 // Check for flag-killing projections that also need to be hoisted 354 // Should be DU safe because no edge updates. 355 for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { 356 Node* n = best->fast_out(j); 357 if( n->is_MachProj() ) { 358 cfg->_bbs[n->_idx]->find_remove(n); 359 add_inst(n); 360 cfg->_bbs.map(n->_idx,this); 361 } 362 } 363 364 Compile *C = cfg->C; 365 // proj==Op_True --> ne test; proj==Op_False --> eq test. 366 // One of two graph shapes got matched: 367 // (IfTrue (If (Bool NE (CmpP ptr NULL)))) 368 // (IfFalse (If (Bool EQ (CmpP ptr NULL)))) 369 // NULL checks are always branch-if-eq. If we see a IfTrue projection 370 // then we are replacing a 'ne' test with a 'eq' NULL check test. 371 // We need to flip the projections to keep the same semantics. 372 if( proj->Opcode() == Op_IfTrue ) { 373 // Swap order of projections in basic block to swap branch targets 374 Node *tmp1 = _nodes[end_idx()+1]; 375 Node *tmp2 = _nodes[end_idx()+2]; 376 _nodes.map(end_idx()+1, tmp2); 377 _nodes.map(end_idx()+2, tmp1); 378 Node *tmp = new (C) Node(C->top()); // Use not NULL input 379 tmp1->replace_by(tmp); 380 tmp2->replace_by(tmp1); 381 tmp->replace_by(tmp2); 382 tmp->destruct(); 383 } 384 385 // Remove the existing null check; use a new implicit null check instead. 386 // Since schedule-local needs precise def-use info, we need to correct 387 // it as well. 388 Node *old_tst = proj->in(0); 389 MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); 390 _nodes.map(end_idx(),nul_chk); 391 cfg->_bbs.map(nul_chk->_idx,this); 392 // Redirect users of old_test to nul_chk 393 for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) 394 old_tst->last_out(i2)->set_req(0, nul_chk); 395 // Clean-up any dead code 396 for (uint i3 = 0; i3 < old_tst->req(); i3++) 397 old_tst->set_req(i3, NULL); 398 399 cfg->latency_from_uses(nul_chk); 400 cfg->latency_from_uses(best); 401} 402 403 404//------------------------------select----------------------------------------- 405// Select a nice fellow from the worklist to schedule next. If there is only 406// one choice, then use it. Projections take top priority for correctness 407// reasons - if I see a projection, then it is next. There are a number of 408// other special cases, for instructions that consume condition codes, et al. 409// These are chosen immediately. Some instructions are required to immediately 410// precede the last instruction in the block, and these are taken last. Of the 411// remaining cases (most), choose the instruction with the greatest latency 412// (that is, the most number of pseudo-cycles required to the end of the 413// routine). If there is a tie, choose the instruction with the most inputs. 414Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) { 415 416 // If only a single entry on the stack, use it 417 uint cnt = worklist.size(); 418 if (cnt == 1) { 419 Node *n = worklist[0]; 420 worklist.map(0,worklist.pop()); 421 return n; 422 } 423 424 uint choice = 0; // Bigger is most important 425 uint latency = 0; // Bigger is scheduled first 426 uint score = 0; // Bigger is better 427 int idx = -1; // Index in worklist 428 int cand_cnt = 0; // Candidate count 429 430 for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist 431 // Order in worklist is used to break ties. 432 // See caller for how this is used to delay scheduling 433 // of induction variable increments to after the other 434 // uses of the phi are scheduled. 435 Node *n = worklist[i]; // Get Node on worklist 436 437 int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0; 438 if( n->is_Proj() || // Projections always win 439 n->Opcode()== Op_Con || // So does constant 'Top' 440 iop == Op_CreateEx || // Create-exception must start block 441 iop == Op_CheckCastPP 442 ) { 443 worklist.map(i,worklist.pop()); 444 return n; 445 } 446 447 // Final call in a block must be adjacent to 'catch' 448 Node *e = end(); 449 if( e->is_Catch() && e->in(0)->in(0) == n ) 450 continue; 451 452 // Memory op for an implicit null check has to be at the end of the block 453 if( e->is_MachNullCheck() && e->in(1) == n ) 454 continue; 455 456 // Schedule IV increment last. 457 if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd && 458 e->in(1)->in(1) == n && n->is_iteratively_computed()) 459 continue; 460 461 uint n_choice = 2; 462 463 // See if this instruction is consumed by a branch. If so, then (as the 464 // branch is the last instruction in the basic block) force it to the 465 // end of the basic block 466 if ( must_clone[iop] ) { 467 // See if any use is a branch 468 bool found_machif = false; 469 470 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 471 Node* use = n->fast_out(j); 472 473 // The use is a conditional branch, make them adjacent 474 if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) { 475 found_machif = true; 476 break; 477 } 478 479 // More than this instruction pending for successor to be ready, 480 // don't choose this if other opportunities are ready 481 if (ready_cnt.at(use->_idx) > 1) 482 n_choice = 1; 483 } 484 485 // loop terminated, prefer not to use this instruction 486 if (found_machif) 487 continue; 488 } 489 490 // See if this has a predecessor that is "must_clone", i.e. sets the 491 // condition code. If so, choose this first 492 for (uint j = 0; j < n->req() ; j++) { 493 Node *inn = n->in(j); 494 if (inn) { 495 if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) { 496 n_choice = 3; 497 break; 498 } 499 } 500 } 501 502 // MachTemps should be scheduled last so they are near their uses 503 if (n->is_MachTemp()) { 504 n_choice = 1; 505 } 506 507 uint n_latency = cfg->_node_latency->at_grow(n->_idx); 508 uint n_score = n->req(); // Many inputs get high score to break ties 509 510 // Keep best latency found 511 cand_cnt++; 512 if (choice < n_choice || 513 (choice == n_choice && 514 ((StressLCM && Compile::randomized_select(cand_cnt)) || 515 (!StressLCM && 516 (latency < n_latency || 517 (latency == n_latency && 518 (score < n_score))))))) { 519 choice = n_choice; 520 latency = n_latency; 521 score = n_score; 522 idx = i; // Also keep index in worklist 523 } 524 } // End of for all ready nodes in worklist 525 526 assert(idx >= 0, "index should be set"); 527 Node *n = worklist[(uint)idx]; // Get the winner 528 529 worklist.map((uint)idx, worklist.pop()); // Compress worklist 530 return n; 531} 532 533 534//------------------------------set_next_call---------------------------------- 535void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) { 536 if( next_call.test_set(n->_idx) ) return; 537 for( uint i=0; i<n->len(); i++ ) { 538 Node *m = n->in(i); 539 if( !m ) continue; // must see all nodes in block that precede call 540 if( bbs[m->_idx] == this ) 541 set_next_call( m, next_call, bbs ); 542 } 543} 544 545//------------------------------needed_for_next_call--------------------------- 546// Set the flag 'next_call' for each Node that is needed for the next call to 547// be scheduled. This flag lets me bias scheduling so Nodes needed for the 548// next subroutine call get priority - basically it moves things NOT needed 549// for the next call till after the call. This prevents me from trying to 550// carry lots of stuff live across a call. 551void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) { 552 // Find the next control-defining Node in this block 553 Node* call = NULL; 554 for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { 555 Node* m = this_call->fast_out(i); 556 if( bbs[m->_idx] == this && // Local-block user 557 m != this_call && // Not self-start node 558 m->is_MachCall() ) 559 call = m; 560 break; 561 } 562 if (call == NULL) return; // No next call (e.g., block end is near) 563 // Set next-call for all inputs to this call 564 set_next_call(call, next_call, bbs); 565} 566 567//------------------------------add_call_kills------------------------------------- 568void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { 569 // Fill in the kill mask for the call 570 for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) { 571 if( !regs.Member(r) ) { // Not already defined by the call 572 // Save-on-call register? 573 if ((save_policy[r] == 'C') || 574 (save_policy[r] == 'A') || 575 ((save_policy[r] == 'E') && exclude_soe)) { 576 proj->_rout.Insert(r); 577 } 578 } 579 } 580} 581 582 583//------------------------------sched_call------------------------------------- 584uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) { 585 RegMask regs; 586 587 // Schedule all the users of the call right now. All the users are 588 // projection Nodes, so they must be scheduled next to the call. 589 // Collect all the defined registers. 590 for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) { 591 Node* n = mcall->fast_out(i); 592 assert( n->is_MachProj(), "" ); 593 int n_cnt = ready_cnt.at(n->_idx)-1; 594 ready_cnt.at_put(n->_idx, n_cnt); 595 assert( n_cnt == 0, "" ); 596 // Schedule next to call 597 _nodes.map(node_cnt++, n); 598 // Collect defined registers 599 regs.OR(n->out_RegMask()); 600 // Check for scheduling the next control-definer 601 if( n->bottom_type() == Type::CONTROL ) 602 // Warm up next pile of heuristic bits 603 needed_for_next_call(n, next_call, bbs); 604 605 // Children of projections are now all ready 606 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 607 Node* m = n->fast_out(j); // Get user 608 if( bbs[m->_idx] != this ) continue; 609 if( m->is_Phi() ) continue; 610 int m_cnt = ready_cnt.at(m->_idx)-1; 611 ready_cnt.at_put(m->_idx, m_cnt); 612 if( m_cnt == 0 ) 613 worklist.push(m); 614 } 615 616 } 617 618 // Act as if the call defines the Frame Pointer. 619 // Certainly the FP is alive and well after the call. 620 regs.Insert(matcher.c_frame_pointer()); 621 622 // Set all registers killed and not already defined by the call. 623 uint r_cnt = mcall->tf()->range()->cnt(); 624 int op = mcall->ideal_Opcode(); 625 MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); 626 bbs.map(proj->_idx,this); 627 _nodes.insert(node_cnt++, proj); 628 629 // Select the right register save policy. 630 const char * save_policy; 631 switch (op) { 632 case Op_CallRuntime: 633 case Op_CallLeaf: 634 case Op_CallLeafNoFP: 635 // Calling C code so use C calling convention 636 save_policy = matcher._c_reg_save_policy; 637 break; 638 639 case Op_CallStaticJava: 640 case Op_CallDynamicJava: 641 // Calling Java code so use Java calling convention 642 save_policy = matcher._register_save_policy; 643 break; 644 645 default: 646 ShouldNotReachHere(); 647 } 648 649 // When using CallRuntime mark SOE registers as killed by the call 650 // so values that could show up in the RegisterMap aren't live in a 651 // callee saved register since the register wouldn't know where to 652 // find them. CallLeaf and CallLeafNoFP are ok because they can't 653 // have debug info on them. Strictly speaking this only needs to be 654 // done for oops since idealreg2debugmask takes care of debug info 655 // references but there no way to handle oops differently than other 656 // pointers as far as the kill mask goes. 657 bool exclude_soe = op == Op_CallRuntime; 658 659 // If the call is a MethodHandle invoke, we need to exclude the 660 // register which is used to save the SP value over MH invokes from 661 // the mask. Otherwise this register could be used for 662 // deoptimization information. 663 if (op == Op_CallStaticJava) { 664 MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall; 665 if (mcallstaticjava->_method_handle_invoke) 666 proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask()); 667 } 668 669 add_call_kills(proj, regs, save_policy, exclude_soe); 670 671 return node_cnt; 672} 673 674 675//------------------------------schedule_local--------------------------------- 676// Topological sort within a block. Someday become a real scheduler. 677bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) { 678 // Already "sorted" are the block start Node (as the first entry), and 679 // the block-ending Node and any trailing control projections. We leave 680 // these alone. PhiNodes and ParmNodes are made to follow the block start 681 // Node. Everything else gets topo-sorted. 682 683#ifndef PRODUCT 684 if (cfg->trace_opto_pipelining()) { 685 tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order); 686 for (uint i = 0;i < _nodes.size();i++) { 687 tty->print("# "); 688 _nodes[i]->fast_dump(); 689 } 690 tty->print_cr("#"); 691 } 692#endif 693 694 // RootNode is already sorted 695 if( _nodes.size() == 1 ) return true; 696 697 // Move PhiNodes and ParmNodes from 1 to cnt up to the start 698 uint node_cnt = end_idx(); 699 uint phi_cnt = 1; 700 uint i; 701 for( i = 1; i<node_cnt; i++ ) { // Scan for Phi 702 Node *n = _nodes[i]; 703 if( n->is_Phi() || // Found a PhiNode or ParmNode 704 (n->is_Proj() && n->in(0) == head()) ) { 705 // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt 706 _nodes.map(i,_nodes[phi_cnt]); 707 _nodes.map(phi_cnt++,n); // swap Phi/Parm up front 708 } else { // All others 709 // Count block-local inputs to 'n' 710 uint cnt = n->len(); // Input count 711 uint local = 0; 712 for( uint j=0; j<cnt; j++ ) { 713 Node *m = n->in(j); 714 if( m && cfg->_bbs[m->_idx] == this && !m->is_top() ) 715 local++; // One more block-local input 716 } 717 ready_cnt.at_put(n->_idx, local); // Count em up 718 719#ifdef ASSERT 720 if( UseConcMarkSweepGC || UseG1GC ) { 721 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { 722 // Check the precedence edges 723 for (uint prec = n->req(); prec < n->len(); prec++) { 724 Node* oop_store = n->in(prec); 725 if (oop_store != NULL) { 726 assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark"); 727 } 728 } 729 } 730 } 731#endif 732 733 // A few node types require changing a required edge to a precedence edge 734 // before allocation. 735 if( n->is_Mach() && n->req() > TypeFunc::Parms && 736 (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire || 737 n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) { 738 // MemBarAcquire could be created without Precedent edge. 739 // del_req() replaces the specified edge with the last input edge 740 // and then removes the last edge. If the specified edge > number of 741 // edges the last edge will be moved outside of the input edges array 742 // and the edge will be lost. This is why this code should be 743 // executed only when Precedent (== TypeFunc::Parms) edge is present. 744 Node *x = n->in(TypeFunc::Parms); 745 n->del_req(TypeFunc::Parms); 746 n->add_prec(x); 747 } 748 } 749 } 750 for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count 751 ready_cnt.at_put(_nodes[i2]->_idx, 0); 752 753 // All the prescheduled guys do not hold back internal nodes 754 uint i3; 755 for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled 756 Node *n = _nodes[i3]; // Get pre-scheduled 757 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { 758 Node* m = n->fast_out(j); 759 if( cfg->_bbs[m->_idx] ==this ) { // Local-block user 760 int m_cnt = ready_cnt.at(m->_idx)-1; 761 ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count 762 } 763 } 764 } 765 766 Node_List delay; 767 // Make a worklist 768 Node_List worklist; 769 for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist 770 Node *m = _nodes[i4]; 771 if( !ready_cnt.at(m->_idx) ) { // Zero ready count? 772 if (m->is_iteratively_computed()) { 773 // Push induction variable increments last to allow other uses 774 // of the phi to be scheduled first. The select() method breaks 775 // ties in scheduling by worklist order. 776 delay.push(m); 777 } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) { 778 // Force the CreateEx to the top of the list so it's processed 779 // first and ends up at the start of the block. 780 worklist.insert(0, m); 781 } else { 782 worklist.push(m); // Then on to worklist! 783 } 784 } 785 } 786 while (delay.size()) { 787 Node* d = delay.pop(); 788 worklist.push(d); 789 } 790 791 // Warm up the 'next_call' heuristic bits 792 needed_for_next_call(_nodes[0], next_call, cfg->_bbs); 793 794#ifndef PRODUCT 795 if (cfg->trace_opto_pipelining()) { 796 for (uint j=0; j<_nodes.size(); j++) { 797 Node *n = _nodes[j]; 798 int idx = n->_idx; 799 tty->print("# ready cnt:%3d ", ready_cnt.at(idx)); 800 tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx)); 801 tty->print("%4d: %s\n", idx, n->Name()); 802 } 803 } 804#endif 805 806 uint max_idx = (uint)ready_cnt.length(); 807 // Pull from worklist and schedule 808 while( worklist.size() ) { // Worklist is not ready 809 810#ifndef PRODUCT 811 if (cfg->trace_opto_pipelining()) { 812 tty->print("# ready list:"); 813 for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist 814 Node *n = worklist[i]; // Get Node on worklist 815 tty->print(" %d", n->_idx); 816 } 817 tty->cr(); 818 } 819#endif 820 821 // Select and pop a ready guy from worklist 822 Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt); 823 _nodes.map(phi_cnt++,n); // Schedule him next 824 825#ifndef PRODUCT 826 if (cfg->trace_opto_pipelining()) { 827 tty->print("# select %d: %s", n->_idx, n->Name()); 828 tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx)); 829 n->dump(); 830 if (Verbose) { 831 tty->print("# ready list:"); 832 for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist 833 Node *n = worklist[i]; // Get Node on worklist 834 tty->print(" %d", n->_idx); 835 } 836 tty->cr(); 837 } 838 } 839 840#endif 841 if( n->is_MachCall() ) { 842 MachCallNode *mcall = n->as_MachCall(); 843 phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call); 844 continue; 845 } 846 847 if (n->is_Mach() && n->as_Mach()->has_call()) { 848 RegMask regs; 849 regs.Insert(matcher.c_frame_pointer()); 850 regs.OR(n->out_RegMask()); 851 852 MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); 853 cfg->_bbs.map(proj->_idx,this); 854 _nodes.insert(phi_cnt++, proj); 855 856 add_call_kills(proj, regs, matcher._c_reg_save_policy, false); 857 } 858 859 // Children are now all ready 860 for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { 861 Node* m = n->fast_out(i5); // Get user 862 if( cfg->_bbs[m->_idx] != this ) continue; 863 if( m->is_Phi() ) continue; 864 if (m->_idx >= max_idx) { // new node, skip it 865 assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types"); 866 continue; 867 } 868 int m_cnt = ready_cnt.at(m->_idx)-1; 869 ready_cnt.at_put(m->_idx, m_cnt); 870 if( m_cnt == 0 ) 871 worklist.push(m); 872 } 873 } 874 875 if( phi_cnt != end_idx() ) { 876 // did not schedule all. Retry, Bailout, or Die 877 Compile* C = matcher.C; 878 if (C->subsume_loads() == true && !C->failing()) { 879 // Retry with subsume_loads == false 880 // If this is the first failure, the sentinel string will "stick" 881 // to the Compile object, and the C2Compiler will see it and retry. 882 C->record_failure(C2Compiler::retry_no_subsuming_loads()); 883 } 884 // assert( phi_cnt == end_idx(), "did not schedule all" ); 885 return false; 886 } 887 888#ifndef PRODUCT 889 if (cfg->trace_opto_pipelining()) { 890 tty->print_cr("#"); 891 tty->print_cr("# after schedule_local"); 892 for (uint i = 0;i < _nodes.size();i++) { 893 tty->print("# "); 894 _nodes[i]->fast_dump(); 895 } 896 tty->cr(); 897 } 898#endif 899 900 901 return true; 902} 903 904//--------------------------catch_cleanup_fix_all_inputs----------------------- 905static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) { 906 for (uint l = 0; l < use->len(); l++) { 907 if (use->in(l) == old_def) { 908 if (l < use->req()) { 909 use->set_req(l, new_def); 910 } else { 911 use->rm_prec(l); 912 use->add_prec(new_def); 913 l--; 914 } 915 } 916 } 917} 918 919//------------------------------catch_cleanup_find_cloned_def------------------ 920static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { 921 assert( use_blk != def_blk, "Inter-block cleanup only"); 922 923 // The use is some block below the Catch. Find and return the clone of the def 924 // that dominates the use. If there is no clone in a dominating block, then 925 // create a phi for the def in a dominating block. 926 927 // Find which successor block dominates this use. The successor 928 // blocks must all be single-entry (from the Catch only; I will have 929 // split blocks to make this so), hence they all dominate. 930 while( use_blk->_dom_depth > def_blk->_dom_depth+1 ) 931 use_blk = use_blk->_idom; 932 933 // Find the successor 934 Node *fixup = NULL; 935 936 uint j; 937 for( j = 0; j < def_blk->_num_succs; j++ ) 938 if( use_blk == def_blk->_succs[j] ) 939 break; 940 941 if( j == def_blk->_num_succs ) { 942 // Block at same level in dom-tree is not a successor. It needs a 943 // PhiNode, the PhiNode uses from the def and IT's uses need fixup. 944 Node_Array inputs = new Node_List(Thread::current()->resource_area()); 945 for(uint k = 1; k < use_blk->num_preds(); k++) { 946 inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx)); 947 } 948 949 // Check to see if the use_blk already has an identical phi inserted. 950 // If it exists, it will be at the first position since all uses of a 951 // def are processed together. 952 Node *phi = use_blk->_nodes[1]; 953 if( phi->is_Phi() ) { 954 fixup = phi; 955 for (uint k = 1; k < use_blk->num_preds(); k++) { 956 if (phi->in(k) != inputs[k]) { 957 // Not a match 958 fixup = NULL; 959 break; 960 } 961 } 962 } 963 964 // If an existing PhiNode was not found, make a new one. 965 if (fixup == NULL) { 966 Node *new_phi = PhiNode::make(use_blk->head(), def); 967 use_blk->_nodes.insert(1, new_phi); 968 bbs.map(new_phi->_idx, use_blk); 969 for (uint k = 1; k < use_blk->num_preds(); k++) { 970 new_phi->set_req(k, inputs[k]); 971 } 972 fixup = new_phi; 973 } 974 975 } else { 976 // Found the use just below the Catch. Make it use the clone. 977 fixup = use_blk->_nodes[n_clone_idx]; 978 } 979 980 return fixup; 981} 982 983//--------------------------catch_cleanup_intra_block-------------------------- 984// Fix all input edges in use that reference "def". The use is in the same 985// block as the def and both have been cloned in each successor block. 986static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) { 987 988 // Both the use and def have been cloned. For each successor block, 989 // get the clone of the use, and make its input the clone of the def 990 // found in that block. 991 992 uint use_idx = blk->find_node(use); 993 uint offset_idx = use_idx - beg; 994 for( uint k = 0; k < blk->_num_succs; k++ ) { 995 // Get clone in each successor block 996 Block *sb = blk->_succs[k]; 997 Node *clone = sb->_nodes[offset_idx+1]; 998 assert( clone->Opcode() == use->Opcode(), "" ); 999 1000 // Make use-clone reference the def-clone 1001 catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]); 1002 } 1003} 1004 1005//------------------------------catch_cleanup_inter_block--------------------- 1006// Fix all input edges in use that reference "def". The use is in a different 1007// block than the def. 1008static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) { 1009 if( !use_blk ) return; // Can happen if the use is a precedence edge 1010 1011 Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx); 1012 catch_cleanup_fix_all_inputs(use, def, new_def); 1013} 1014 1015//------------------------------call_catch_cleanup----------------------------- 1016// If we inserted any instructions between a Call and his CatchNode, 1017// clone the instructions on all paths below the Catch. 1018void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) { 1019 1020 // End of region to clone 1021 uint end = end_idx(); 1022 if( !_nodes[end]->is_Catch() ) return; 1023 // Start of region to clone 1024 uint beg = end; 1025 while(!_nodes[beg-1]->is_MachProj() || 1026 !_nodes[beg-1]->in(0)->is_MachCall() ) { 1027 beg--; 1028 assert(beg > 0,"Catch cleanup walking beyond block boundary"); 1029 } 1030 // Range of inserted instructions is [beg, end) 1031 if( beg == end ) return; 1032 1033 // Clone along all Catch output paths. Clone area between the 'beg' and 1034 // 'end' indices. 1035 for( uint i = 0; i < _num_succs; i++ ) { 1036 Block *sb = _succs[i]; 1037 // Clone the entire area; ignoring the edge fixup for now. 1038 for( uint j = end; j > beg; j-- ) { 1039 // It is safe here to clone a node with anti_dependence 1040 // since clones dominate on each path. 1041 Node *clone = _nodes[j-1]->clone(); 1042 sb->_nodes.insert( 1, clone ); 1043 bbs.map(clone->_idx,sb); 1044 } 1045 } 1046 1047 1048 // Fixup edges. Check the def-use info per cloned Node 1049 for(uint i2 = beg; i2 < end; i2++ ) { 1050 uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block 1051 Node *n = _nodes[i2]; // Node that got cloned 1052 // Need DU safe iterator because of edge manipulation in calls. 1053 Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area()); 1054 for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) { 1055 out->push(n->fast_out(j1)); 1056 } 1057 uint max = out->size(); 1058 for (uint j = 0; j < max; j++) {// For all users 1059 Node *use = out->pop(); 1060 Block *buse = bbs[use->_idx]; 1061 if( use->is_Phi() ) { 1062 for( uint k = 1; k < use->req(); k++ ) 1063 if( use->in(k) == n ) { 1064 Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx); 1065 use->set_req(k, fixup); 1066 } 1067 } else { 1068 if (this == buse) { 1069 catch_cleanup_intra_block(use, n, this, beg, n_clone_idx); 1070 } else { 1071 catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx); 1072 } 1073 } 1074 } // End for all users 1075 1076 } // End of for all Nodes in cloned area 1077 1078 // Remove the now-dead cloned ops 1079 for(uint i3 = beg; i3 < end; i3++ ) { 1080 _nodes[beg]->disconnect_inputs(NULL, C); 1081 _nodes.remove(beg); 1082 } 1083 1084 // If the successor blocks have a CreateEx node, move it back to the top 1085 for(uint i4 = 0; i4 < _num_succs; i4++ ) { 1086 Block *sb = _succs[i4]; 1087 uint new_cnt = end - beg; 1088 // Remove any newly created, but dead, nodes. 1089 for( uint j = new_cnt; j > 0; j-- ) { 1090 Node *n = sb->_nodes[j]; 1091 if (n->outcnt() == 0 && 1092 (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){ 1093 n->disconnect_inputs(NULL, C); 1094 sb->_nodes.remove(j); 1095 new_cnt--; 1096 } 1097 } 1098 // If any newly created nodes remain, move the CreateEx node to the top 1099 if (new_cnt > 0) { 1100 Node *cex = sb->_nodes[1+new_cnt]; 1101 if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { 1102 sb->_nodes.remove(1+new_cnt); 1103 sb->_nodes.insert(1,cex); 1104 } 1105 } 1106 } 1107} 1108