c1_LIRGenerator.cpp revision 337:9ee9cf798b59
1/* 2 * Copyright 2005-2008 Sun Microsystems, Inc. All Rights Reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20 * CA 95054 USA or visit www.sun.com if you need additional information or 21 * have any questions. 22 * 23 */ 24 25# include "incls/_precompiled.incl" 26# include "incls/_c1_LIRGenerator.cpp.incl" 27 28#ifdef ASSERT 29#define __ gen()->lir(__FILE__, __LINE__)-> 30#else 31#define __ gen()->lir()-> 32#endif 33 34 35void PhiResolverState::reset(int max_vregs) { 36 // Initialize array sizes 37 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); 38 _virtual_operands.trunc_to(0); 39 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL); 40 _other_operands.trunc_to(0); 41 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL); 42 _vreg_table.trunc_to(0); 43} 44 45 46 47//-------------------------------------------------------------- 48// PhiResolver 49 50// Resolves cycles: 51// 52// r1 := r2 becomes temp := r1 53// r2 := r1 r1 := r2 54// r2 := temp 55// and orders moves: 56// 57// r2 := r3 becomes r1 := r2 58// r1 := r2 r2 := r3 59 60PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) 61 : _gen(gen) 62 , _state(gen->resolver_state()) 63 , _temp(LIR_OprFact::illegalOpr) 64{ 65 // reinitialize the shared state arrays 66 _state.reset(max_vregs); 67} 68 69 70void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { 71 assert(src->is_valid(), ""); 72 assert(dest->is_valid(), ""); 73 __ move(src, dest); 74} 75 76 77void PhiResolver::move_temp_to(LIR_Opr dest) { 78 assert(_temp->is_valid(), ""); 79 emit_move(_temp, dest); 80 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); 81} 82 83 84void PhiResolver::move_to_temp(LIR_Opr src) { 85 assert(_temp->is_illegal(), ""); 86 _temp = _gen->new_register(src->type()); 87 emit_move(src, _temp); 88} 89 90 91// Traverse assignment graph in depth first order and generate moves in post order 92// ie. two assignments: b := c, a := b start with node c: 93// Call graph: move(NULL, c) -> move(c, b) -> move(b, a) 94// Generates moves in this order: move b to a and move c to b 95// ie. cycle a := b, b := a start with node a 96// Call graph: move(NULL, a) -> move(a, b) -> move(b, a) 97// Generates moves in this order: move b to temp, move a to b, move temp to a 98void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { 99 if (!dest->visited()) { 100 dest->set_visited(); 101 for (int i = dest->no_of_destinations()-1; i >= 0; i --) { 102 move(dest, dest->destination_at(i)); 103 } 104 } else if (!dest->start_node()) { 105 // cylce in graph detected 106 assert(_loop == NULL, "only one loop valid!"); 107 _loop = dest; 108 move_to_temp(src->operand()); 109 return; 110 } // else dest is a start node 111 112 if (!dest->assigned()) { 113 if (_loop == dest) { 114 move_temp_to(dest->operand()); 115 dest->set_assigned(); 116 } else if (src != NULL) { 117 emit_move(src->operand(), dest->operand()); 118 dest->set_assigned(); 119 } 120 } 121} 122 123 124PhiResolver::~PhiResolver() { 125 int i; 126 // resolve any cycles in moves from and to virtual registers 127 for (i = virtual_operands().length() - 1; i >= 0; i --) { 128 ResolveNode* node = virtual_operands()[i]; 129 if (!node->visited()) { 130 _loop = NULL; 131 move(NULL, node); 132 node->set_start_node(); 133 assert(_temp->is_illegal(), "move_temp_to() call missing"); 134 } 135 } 136 137 // generate move for move from non virtual register to abitrary destination 138 for (i = other_operands().length() - 1; i >= 0; i --) { 139 ResolveNode* node = other_operands()[i]; 140 for (int j = node->no_of_destinations() - 1; j >= 0; j --) { 141 emit_move(node->operand(), node->destination_at(j)->operand()); 142 } 143 } 144} 145 146 147ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { 148 ResolveNode* node; 149 if (opr->is_virtual()) { 150 int vreg_num = opr->vreg_number(); 151 node = vreg_table().at_grow(vreg_num, NULL); 152 assert(node == NULL || node->operand() == opr, ""); 153 if (node == NULL) { 154 node = new ResolveNode(opr); 155 vreg_table()[vreg_num] = node; 156 } 157 // Make sure that all virtual operands show up in the list when 158 // they are used as the source of a move. 159 if (source && !virtual_operands().contains(node)) { 160 virtual_operands().append(node); 161 } 162 } else { 163 assert(source, ""); 164 node = new ResolveNode(opr); 165 other_operands().append(node); 166 } 167 return node; 168} 169 170 171void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { 172 assert(dest->is_virtual(), ""); 173 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); 174 assert(src->is_valid(), ""); 175 assert(dest->is_valid(), ""); 176 ResolveNode* source = source_node(src); 177 source->append(destination_node(dest)); 178} 179 180 181//-------------------------------------------------------------- 182// LIRItem 183 184void LIRItem::set_result(LIR_Opr opr) { 185 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); 186 value()->set_operand(opr); 187 188 if (opr->is_virtual()) { 189 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); 190 } 191 192 _result = opr; 193} 194 195void LIRItem::load_item() { 196 if (result()->is_illegal()) { 197 // update the items result 198 _result = value()->operand(); 199 } 200 if (!result()->is_register()) { 201 LIR_Opr reg = _gen->new_register(value()->type()); 202 __ move(result(), reg); 203 if (result()->is_constant()) { 204 _result = reg; 205 } else { 206 set_result(reg); 207 } 208 } 209} 210 211 212void LIRItem::load_for_store(BasicType type) { 213 if (_gen->can_store_as_constant(value(), type)) { 214 _result = value()->operand(); 215 if (!_result->is_constant()) { 216 _result = LIR_OprFact::value_type(value()->type()); 217 } 218 } else if (type == T_BYTE || type == T_BOOLEAN) { 219 load_byte_item(); 220 } else { 221 load_item(); 222 } 223} 224 225void LIRItem::load_item_force(LIR_Opr reg) { 226 LIR_Opr r = result(); 227 if (r != reg) { 228 if (r->type() != reg->type()) { 229 // moves between different types need an intervening spill slot 230 LIR_Opr tmp = _gen->force_to_spill(r, reg->type()); 231 __ move(tmp, reg); 232 } else { 233 __ move(r, reg); 234 } 235 _result = reg; 236 } 237} 238 239ciObject* LIRItem::get_jobject_constant() const { 240 ObjectType* oc = type()->as_ObjectType(); 241 if (oc) { 242 return oc->constant_value(); 243 } 244 return NULL; 245} 246 247 248jint LIRItem::get_jint_constant() const { 249 assert(is_constant() && value() != NULL, ""); 250 assert(type()->as_IntConstant() != NULL, "type check"); 251 return type()->as_IntConstant()->value(); 252} 253 254 255jint LIRItem::get_address_constant() const { 256 assert(is_constant() && value() != NULL, ""); 257 assert(type()->as_AddressConstant() != NULL, "type check"); 258 return type()->as_AddressConstant()->value(); 259} 260 261 262jfloat LIRItem::get_jfloat_constant() const { 263 assert(is_constant() && value() != NULL, ""); 264 assert(type()->as_FloatConstant() != NULL, "type check"); 265 return type()->as_FloatConstant()->value(); 266} 267 268 269jdouble LIRItem::get_jdouble_constant() const { 270 assert(is_constant() && value() != NULL, ""); 271 assert(type()->as_DoubleConstant() != NULL, "type check"); 272 return type()->as_DoubleConstant()->value(); 273} 274 275 276jlong LIRItem::get_jlong_constant() const { 277 assert(is_constant() && value() != NULL, ""); 278 assert(type()->as_LongConstant() != NULL, "type check"); 279 return type()->as_LongConstant()->value(); 280} 281 282 283 284//-------------------------------------------------------------- 285 286 287void LIRGenerator::init() { 288 BarrierSet* bs = Universe::heap()->barrier_set(); 289 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind"); 290 CardTableModRefBS* ct = (CardTableModRefBS*)bs; 291 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code"); 292 293#ifdef _LP64 294 _card_table_base = new LIR_Const((jlong)ct->byte_map_base); 295#else 296 _card_table_base = new LIR_Const((jint)ct->byte_map_base); 297#endif 298} 299 300 301void LIRGenerator::block_do_prolog(BlockBegin* block) { 302#ifndef PRODUCT 303 if (PrintIRWithLIR) { 304 block->print(); 305 } 306#endif 307 308 // set up the list of LIR instructions 309 assert(block->lir() == NULL, "LIR list already computed for this block"); 310 _lir = new LIR_List(compilation(), block); 311 block->set_lir(_lir); 312 313 __ branch_destination(block->label()); 314 315 if (LIRTraceExecution && 316 Compilation::current_compilation()->hir()->start()->block_id() != block->block_id() && 317 !block->is_set(BlockBegin::exception_entry_flag)) { 318 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); 319 trace_block_entry(block); 320 } 321} 322 323 324void LIRGenerator::block_do_epilog(BlockBegin* block) { 325#ifndef PRODUCT 326 if (PrintIRWithLIR) { 327 tty->cr(); 328 } 329#endif 330 331 // LIR_Opr for unpinned constants shouldn't be referenced by other 332 // blocks so clear them out after processing the block. 333 for (int i = 0; i < _unpinned_constants.length(); i++) { 334 _unpinned_constants.at(i)->clear_operand(); 335 } 336 _unpinned_constants.trunc_to(0); 337 338 // clear our any registers for other local constants 339 _constants.trunc_to(0); 340 _reg_for_constants.trunc_to(0); 341} 342 343 344void LIRGenerator::block_do(BlockBegin* block) { 345 CHECK_BAILOUT(); 346 347 block_do_prolog(block); 348 set_block(block); 349 350 for (Instruction* instr = block; instr != NULL; instr = instr->next()) { 351 if (instr->is_pinned()) do_root(instr); 352 } 353 354 set_block(NULL); 355 block_do_epilog(block); 356} 357 358 359//-------------------------LIRGenerator----------------------------- 360 361// This is where the tree-walk starts; instr must be root; 362void LIRGenerator::do_root(Value instr) { 363 CHECK_BAILOUT(); 364 365 InstructionMark im(compilation(), instr); 366 367 assert(instr->is_pinned(), "use only with roots"); 368 assert(instr->subst() == instr, "shouldn't have missed substitution"); 369 370 instr->visit(this); 371 372 assert(!instr->has_uses() || instr->operand()->is_valid() || 373 instr->as_Constant() != NULL || bailed_out(), "invalid item set"); 374} 375 376 377// This is called for each node in tree; the walk stops if a root is reached 378void LIRGenerator::walk(Value instr) { 379 InstructionMark im(compilation(), instr); 380 //stop walk when encounter a root 381 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) { 382 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); 383 } else { 384 assert(instr->subst() == instr, "shouldn't have missed substitution"); 385 instr->visit(this); 386 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); 387 } 388} 389 390 391CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 392 int index; 393 Value value; 394 for_each_stack_value(state, index, value) { 395 assert(value->subst() == value, "missed substition"); 396 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 397 walk(value); 398 assert(value->operand()->is_valid(), "must be evaluated now"); 399 } 400 } 401 ValueStack* s = state; 402 int bci = x->bci(); 403 for_each_state(s) { 404 IRScope* scope = s->scope(); 405 ciMethod* method = scope->method(); 406 407 MethodLivenessResult liveness = method->liveness_at_bci(bci); 408 if (bci == SynchronizationEntryBCI) { 409 if (x->as_ExceptionObject() || x->as_Throw()) { 410 // all locals are dead on exit from the synthetic unlocker 411 liveness.clear(); 412 } else { 413 assert(x->as_MonitorEnter(), "only other case is MonitorEnter"); 414 } 415 } 416 if (!liveness.is_valid()) { 417 // Degenerate or breakpointed method. 418 bailout("Degenerate or breakpointed method"); 419 } else { 420 assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); 421 for_each_local_value(s, index, value) { 422 assert(value->subst() == value, "missed substition"); 423 if (liveness.at(index) && !value->type()->is_illegal()) { 424 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 425 walk(value); 426 assert(value->operand()->is_valid(), "must be evaluated now"); 427 } 428 } else { 429 // NULL out this local so that linear scan can assume that all non-NULL values are live. 430 s->invalidate_local(index); 431 } 432 } 433 } 434 bci = scope->caller_bci(); 435 } 436 437 return new CodeEmitInfo(x->bci(), state, ignore_xhandler ? NULL : x->exception_handlers()); 438} 439 440 441CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 442 return state_for(x, x->lock_stack()); 443} 444 445 446void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) { 447 if (!obj->is_loaded() || PatchALot) { 448 assert(info != NULL, "info must be set if class is not loaded"); 449 __ oop2reg_patch(NULL, r, info); 450 } else { 451 // no patching needed 452 __ oop2reg(obj->encoding(), r); 453 } 454} 455 456 457void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, 458 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 459 CodeStub* stub = new RangeCheckStub(range_check_info, index); 460 if (index->is_constant()) { 461 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), 462 index->as_jint(), null_check_info); 463 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 464 } else { 465 cmp_reg_mem(lir_cond_aboveEqual, index, array, 466 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); 467 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 468 } 469} 470 471 472void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 473 CodeStub* stub = new RangeCheckStub(info, index, true); 474 if (index->is_constant()) { 475 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); 476 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 477 } else { 478 cmp_reg_mem(lir_cond_aboveEqual, index, buffer, 479 java_nio_Buffer::limit_offset(), T_INT, info); 480 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 481 } 482 __ move(index, result); 483} 484 485 486// increment a counter returning the incremented value 487LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) { 488 LIR_Address* counter = new LIR_Address(base, offset, T_INT); 489 LIR_Opr result = new_register(T_INT); 490 __ load(counter, result); 491 __ add(result, LIR_OprFact::intConst(increment), result); 492 __ store(result, counter); 493 return result; 494} 495 496 497void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { 498 LIR_Opr result_op = result; 499 LIR_Opr left_op = left; 500 LIR_Opr right_op = right; 501 502 if (TwoOperandLIRForm && left_op != result_op) { 503 assert(right_op != result_op, "malformed"); 504 __ move(left_op, result_op); 505 left_op = result_op; 506 } 507 508 switch(code) { 509 case Bytecodes::_dadd: 510 case Bytecodes::_fadd: 511 case Bytecodes::_ladd: 512 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; 513 case Bytecodes::_fmul: 514 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; 515 516 case Bytecodes::_dmul: 517 { 518 if (is_strictfp) { 519 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break; 520 } else { 521 __ mul(left_op, right_op, result_op); break; 522 } 523 } 524 break; 525 526 case Bytecodes::_imul: 527 { 528 bool did_strength_reduce = false; 529 530 if (right->is_constant()) { 531 int c = right->as_jint(); 532 if (is_power_of_2(c)) { 533 // do not need tmp here 534 __ shift_left(left_op, exact_log2(c), result_op); 535 did_strength_reduce = true; 536 } else { 537 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); 538 } 539 } 540 // we couldn't strength reduce so just emit the multiply 541 if (!did_strength_reduce) { 542 __ mul(left_op, right_op, result_op); 543 } 544 } 545 break; 546 547 case Bytecodes::_dsub: 548 case Bytecodes::_fsub: 549 case Bytecodes::_lsub: 550 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; 551 552 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; 553 // ldiv and lrem are implemented with a direct runtime call 554 555 case Bytecodes::_ddiv: 556 { 557 if (is_strictfp) { 558 __ div_strictfp (left_op, right_op, result_op, tmp_op); break; 559 } else { 560 __ div (left_op, right_op, result_op); break; 561 } 562 } 563 break; 564 565 case Bytecodes::_drem: 566 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; 567 568 default: ShouldNotReachHere(); 569 } 570} 571 572 573void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 574 arithmetic_op(code, result, left, right, false, tmp); 575} 576 577 578void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 579 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info); 580} 581 582 583void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) { 584 arithmetic_op(code, result, left, right, is_strictfp, tmp); 585} 586 587 588void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { 589 if (TwoOperandLIRForm && value != result_op) { 590 assert(count != result_op, "malformed"); 591 __ move(value, result_op); 592 value = result_op; 593 } 594 595 assert(count->is_constant() || count->is_register(), "must be"); 596 switch(code) { 597 case Bytecodes::_ishl: 598 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; 599 case Bytecodes::_ishr: 600 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; 601 case Bytecodes::_iushr: 602 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; 603 default: ShouldNotReachHere(); 604 } 605} 606 607 608void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { 609 if (TwoOperandLIRForm && left_op != result_op) { 610 assert(right_op != result_op, "malformed"); 611 __ move(left_op, result_op); 612 left_op = result_op; 613 } 614 615 switch(code) { 616 case Bytecodes::_iand: 617 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; 618 619 case Bytecodes::_ior: 620 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; 621 622 case Bytecodes::_ixor: 623 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; 624 625 default: ShouldNotReachHere(); 626 } 627} 628 629 630void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { 631 if (!GenerateSynchronizationCode) return; 632 // for slow path, use debug info for state after successful locking 633 CodeStub* slow_path = new MonitorEnterStub(object, lock, info); 634 __ load_stack_address_monitor(monitor_no, lock); 635 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 636 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); 637} 638 639 640void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, int monitor_no) { 641 if (!GenerateSynchronizationCode) return; 642 // setup registers 643 LIR_Opr hdr = lock; 644 lock = new_hdr; 645 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); 646 __ load_stack_address_monitor(monitor_no, lock); 647 __ unlock_object(hdr, object, lock, slow_path); 648} 649 650 651void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 652 jobject2reg_with_patching(klass_reg, klass, info); 653 // If klass is not loaded we do not know if the klass has finalizers: 654 if (UseFastNewInstance && klass->is_loaded() 655 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { 656 657 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; 658 659 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); 660 661 assert(klass->is_loaded(), "must be loaded"); 662 // allocate space for instance 663 assert(klass->size_helper() >= 0, "illegal instance size"); 664 const int instance_size = align_object_size(klass->size_helper()); 665 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, 666 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); 667 } else { 668 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); 669 __ branch(lir_cond_always, T_ILLEGAL, slow_path); 670 __ branch_destination(slow_path->continuation()); 671 } 672} 673 674 675static bool is_constant_zero(Instruction* inst) { 676 IntConstant* c = inst->type()->as_IntConstant(); 677 if (c) { 678 return (c->value() == 0); 679 } 680 return false; 681} 682 683 684static bool positive_constant(Instruction* inst) { 685 IntConstant* c = inst->type()->as_IntConstant(); 686 if (c) { 687 return (c->value() >= 0); 688 } 689 return false; 690} 691 692 693static ciArrayKlass* as_array_klass(ciType* type) { 694 if (type != NULL && type->is_array_klass() && type->is_loaded()) { 695 return (ciArrayKlass*)type; 696 } else { 697 return NULL; 698 } 699} 700 701void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { 702 Instruction* src = x->argument_at(0); 703 Instruction* src_pos = x->argument_at(1); 704 Instruction* dst = x->argument_at(2); 705 Instruction* dst_pos = x->argument_at(3); 706 Instruction* length = x->argument_at(4); 707 708 // first try to identify the likely type of the arrays involved 709 ciArrayKlass* expected_type = NULL; 710 bool is_exact = false; 711 { 712 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); 713 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); 714 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); 715 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); 716 if (src_exact_type != NULL && src_exact_type == dst_exact_type) { 717 // the types exactly match so the type is fully known 718 is_exact = true; 719 expected_type = src_exact_type; 720 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { 721 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; 722 ciArrayKlass* src_type = NULL; 723 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { 724 src_type = (ciArrayKlass*) src_exact_type; 725 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { 726 src_type = (ciArrayKlass*) src_declared_type; 727 } 728 if (src_type != NULL) { 729 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { 730 is_exact = true; 731 expected_type = dst_type; 732 } 733 } 734 } 735 // at least pass along a good guess 736 if (expected_type == NULL) expected_type = dst_exact_type; 737 if (expected_type == NULL) expected_type = src_declared_type; 738 if (expected_type == NULL) expected_type = dst_declared_type; 739 } 740 741 // if a probable array type has been identified, figure out if any 742 // of the required checks for a fast case can be elided. 743 int flags = LIR_OpArrayCopy::all_flags; 744 if (expected_type != NULL) { 745 // try to skip null checks 746 if (src->as_NewArray() != NULL) 747 flags &= ~LIR_OpArrayCopy::src_null_check; 748 if (dst->as_NewArray() != NULL) 749 flags &= ~LIR_OpArrayCopy::dst_null_check; 750 751 // check from incoming constant values 752 if (positive_constant(src_pos)) 753 flags &= ~LIR_OpArrayCopy::src_pos_positive_check; 754 if (positive_constant(dst_pos)) 755 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; 756 if (positive_constant(length)) 757 flags &= ~LIR_OpArrayCopy::length_positive_check; 758 759 // see if the range check can be elided, which might also imply 760 // that src or dst is non-null. 761 ArrayLength* al = length->as_ArrayLength(); 762 if (al != NULL) { 763 if (al->array() == src) { 764 // it's the length of the source array 765 flags &= ~LIR_OpArrayCopy::length_positive_check; 766 flags &= ~LIR_OpArrayCopy::src_null_check; 767 if (is_constant_zero(src_pos)) 768 flags &= ~LIR_OpArrayCopy::src_range_check; 769 } 770 if (al->array() == dst) { 771 // it's the length of the destination array 772 flags &= ~LIR_OpArrayCopy::length_positive_check; 773 flags &= ~LIR_OpArrayCopy::dst_null_check; 774 if (is_constant_zero(dst_pos)) 775 flags &= ~LIR_OpArrayCopy::dst_range_check; 776 } 777 } 778 if (is_exact) { 779 flags &= ~LIR_OpArrayCopy::type_check; 780 } 781 } 782 783 if (src == dst) { 784 // moving within a single array so no type checks are needed 785 if (flags & LIR_OpArrayCopy::type_check) { 786 flags &= ~LIR_OpArrayCopy::type_check; 787 } 788 } 789 *flagsp = flags; 790 *expected_typep = (ciArrayKlass*)expected_type; 791} 792 793 794LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { 795 assert(opr->is_register(), "why spill if item is not register?"); 796 797 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) { 798 LIR_Opr result = new_register(T_FLOAT); 799 set_vreg_flag(result, must_start_in_memory); 800 assert(opr->is_register(), "only a register can be spilled"); 801 assert(opr->value_type()->is_float(), "rounding only for floats available"); 802 __ roundfp(opr, LIR_OprFact::illegalOpr, result); 803 return result; 804 } 805 return opr; 806} 807 808 809LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { 810 assert(type2size[t] == type2size[value->type()], "size mismatch"); 811 if (!value->is_register()) { 812 // force into a register 813 LIR_Opr r = new_register(value->type()); 814 __ move(value, r); 815 value = r; 816 } 817 818 // create a spill location 819 LIR_Opr tmp = new_register(t); 820 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); 821 822 // move from register to spill 823 __ move(value, tmp); 824 return tmp; 825} 826 827 828void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { 829 if (if_instr->should_profile()) { 830 ciMethod* method = if_instr->profiled_method(); 831 assert(method != NULL, "method should be set if branch is profiled"); 832 ciMethodData* md = method->method_data(); 833 if (md == NULL) { 834 bailout("out of memory building methodDataOop"); 835 return; 836 } 837 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 838 assert(data != NULL, "must have profiling data"); 839 assert(data->is_BranchData(), "need BranchData for two-way branches"); 840 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 841 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 842 LIR_Opr md_reg = new_register(T_OBJECT); 843 __ move(LIR_OprFact::oopConst(md->encoding()), md_reg); 844 LIR_Opr data_offset_reg = new_register(T_INT); 845 __ cmove(lir_cond(cond), 846 LIR_OprFact::intConst(taken_count_offset), 847 LIR_OprFact::intConst(not_taken_count_offset), 848 data_offset_reg); 849 LIR_Opr data_reg = new_register(T_INT); 850 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT); 851 __ move(LIR_OprFact::address(data_addr), data_reg); 852 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 853 // Use leal instead of add to avoid destroying condition codes on x86 854 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 855 __ move(data_reg, LIR_OprFact::address(data_addr)); 856 } 857} 858 859 860// Phi technique: 861// This is about passing live values from one basic block to the other. 862// In code generated with Java it is rather rare that more than one 863// value is on the stack from one basic block to the other. 864// We optimize our technique for efficient passing of one value 865// (of type long, int, double..) but it can be extended. 866// When entering or leaving a basic block, all registers and all spill 867// slots are release and empty. We use the released registers 868// and spill slots to pass the live values from one block 869// to the other. The topmost value, i.e., the value on TOS of expression 870// stack is passed in registers. All other values are stored in spilling 871// area. Every Phi has an index which designates its spill slot 872// At exit of a basic block, we fill the register(s) and spill slots. 873// At entry of a basic block, the block_prolog sets up the content of phi nodes 874// and locks necessary registers and spilling slots. 875 876 877// move current value to referenced phi function 878void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { 879 Phi* phi = sux_val->as_Phi(); 880 // cur_val can be null without phi being null in conjunction with inlining 881 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { 882 LIR_Opr operand = cur_val->operand(); 883 if (cur_val->operand()->is_illegal()) { 884 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, 885 "these can be produced lazily"); 886 operand = operand_for_instruction(cur_val); 887 } 888 resolver->move(operand, operand_for_instruction(phi)); 889 } 890} 891 892 893// Moves all stack values into their PHI position 894void LIRGenerator::move_to_phi(ValueStack* cur_state) { 895 BlockBegin* bb = block(); 896 if (bb->number_of_sux() == 1) { 897 BlockBegin* sux = bb->sux_at(0); 898 assert(sux->number_of_preds() > 0, "invalid CFG"); 899 900 // a block with only one predecessor never has phi functions 901 if (sux->number_of_preds() > 1) { 902 int max_phis = cur_state->stack_size() + cur_state->locals_size(); 903 PhiResolver resolver(this, _virtual_register_number + max_phis * 2); 904 905 ValueStack* sux_state = sux->state(); 906 Value sux_value; 907 int index; 908 909 for_each_stack_value(sux_state, index, sux_value) { 910 move_to_phi(&resolver, cur_state->stack_at(index), sux_value); 911 } 912 913 // Inlining may cause the local state not to match up, so walk up 914 // the caller state until we get to the same scope as the 915 // successor and then start processing from there. 916 while (cur_state->scope() != sux_state->scope()) { 917 cur_state = cur_state->caller_state(); 918 assert(cur_state != NULL, "scopes don't match up"); 919 } 920 921 for_each_local_value(sux_state, index, sux_value) { 922 move_to_phi(&resolver, cur_state->local_at(index), sux_value); 923 } 924 925 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); 926 } 927 } 928} 929 930 931LIR_Opr LIRGenerator::new_register(BasicType type) { 932 int vreg = _virtual_register_number; 933 // add a little fudge factor for the bailout, since the bailout is 934 // only checked periodically. This gives a few extra registers to 935 // hand out before we really run out, which helps us keep from 936 // tripping over assertions. 937 if (vreg + 20 >= LIR_OprDesc::vreg_max) { 938 bailout("out of virtual registers"); 939 if (vreg + 2 >= LIR_OprDesc::vreg_max) { 940 // wrap it around 941 _virtual_register_number = LIR_OprDesc::vreg_base; 942 } 943 } 944 _virtual_register_number += 1; 945 if (type == T_ADDRESS) type = T_INT; 946 return LIR_OprFact::virtual_register(vreg, type); 947} 948 949 950// Try to lock using register in hint 951LIR_Opr LIRGenerator::rlock(Value instr) { 952 return new_register(instr->type()); 953} 954 955 956// does an rlock and sets result 957LIR_Opr LIRGenerator::rlock_result(Value x) { 958 LIR_Opr reg = rlock(x); 959 set_result(x, reg); 960 return reg; 961} 962 963 964// does an rlock and sets result 965LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { 966 LIR_Opr reg; 967 switch (type) { 968 case T_BYTE: 969 case T_BOOLEAN: 970 reg = rlock_byte(type); 971 break; 972 default: 973 reg = rlock(x); 974 break; 975 } 976 977 set_result(x, reg); 978 return reg; 979} 980 981 982//--------------------------------------------------------------------- 983ciObject* LIRGenerator::get_jobject_constant(Value value) { 984 ObjectType* oc = value->type()->as_ObjectType(); 985 if (oc) { 986 return oc->constant_value(); 987 } 988 return NULL; 989} 990 991 992void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { 993 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); 994 assert(block()->next() == x, "ExceptionObject must be first instruction of block"); 995 996 // no moves are created for phi functions at the begin of exception 997 // handlers, so assign operands manually here 998 for_each_phi_fun(block(), phi, 999 operand_for_instruction(phi)); 1000 1001 LIR_Opr thread_reg = getThreadPointer(); 1002 __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 1003 exceptionOopOpr()); 1004 __ move(LIR_OprFact::oopConst(NULL), 1005 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 1006 __ move(LIR_OprFact::oopConst(NULL), 1007 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 1008 1009 LIR_Opr result = new_register(T_OBJECT); 1010 __ move(exceptionOopOpr(), result); 1011 set_result(x, result); 1012} 1013 1014 1015//---------------------------------------------------------------------- 1016//---------------------------------------------------------------------- 1017//---------------------------------------------------------------------- 1018//---------------------------------------------------------------------- 1019// visitor functions 1020//---------------------------------------------------------------------- 1021//---------------------------------------------------------------------- 1022//---------------------------------------------------------------------- 1023//---------------------------------------------------------------------- 1024 1025void LIRGenerator::do_Phi(Phi* x) { 1026 // phi functions are never visited directly 1027 ShouldNotReachHere(); 1028} 1029 1030 1031// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. 1032void LIRGenerator::do_Constant(Constant* x) { 1033 if (x->state() != NULL) { 1034 // Any constant with a ValueStack requires patching so emit the patch here 1035 LIR_Opr reg = rlock_result(x); 1036 CodeEmitInfo* info = state_for(x, x->state()); 1037 __ oop2reg_patch(NULL, reg, info); 1038 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) { 1039 if (!x->is_pinned()) { 1040 // unpinned constants are handled specially so that they can be 1041 // put into registers when they are used multiple times within a 1042 // block. After the block completes their operand will be 1043 // cleared so that other blocks can't refer to that register. 1044 set_result(x, load_constant(x)); 1045 } else { 1046 LIR_Opr res = x->operand(); 1047 if (!res->is_valid()) { 1048 res = LIR_OprFact::value_type(x->type()); 1049 } 1050 if (res->is_constant()) { 1051 LIR_Opr reg = rlock_result(x); 1052 __ move(res, reg); 1053 } else { 1054 set_result(x, res); 1055 } 1056 } 1057 } else { 1058 set_result(x, LIR_OprFact::value_type(x->type())); 1059 } 1060} 1061 1062 1063void LIRGenerator::do_Local(Local* x) { 1064 // operand_for_instruction has the side effect of setting the result 1065 // so there's no need to do it here. 1066 operand_for_instruction(x); 1067} 1068 1069 1070void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) { 1071 Unimplemented(); 1072} 1073 1074 1075void LIRGenerator::do_Return(Return* x) { 1076 if (DTraceMethodProbes) { 1077 BasicTypeList signature; 1078 signature.append(T_INT); // thread 1079 signature.append(T_OBJECT); // methodOop 1080 LIR_OprList* args = new LIR_OprList(); 1081 args->append(getThreadPointer()); 1082 LIR_Opr meth = new_register(T_OBJECT); 1083 __ oop2reg(method()->encoding(), meth); 1084 args->append(meth); 1085 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1086 } 1087 1088 if (x->type()->is_void()) { 1089 __ return_op(LIR_OprFact::illegalOpr); 1090 } else { 1091 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); 1092 LIRItem result(x->result(), this); 1093 1094 result.load_item_force(reg); 1095 __ return_op(result.result()); 1096 } 1097 set_no_result(x); 1098} 1099 1100 1101// Example: object.getClass () 1102void LIRGenerator::do_getClass(Intrinsic* x) { 1103 assert(x->number_of_arguments() == 1, "wrong type"); 1104 1105 LIRItem rcvr(x->argument_at(0), this); 1106 rcvr.load_item(); 1107 LIR_Opr result = rlock_result(x); 1108 1109 // need to perform the null check on the rcvr 1110 CodeEmitInfo* info = NULL; 1111 if (x->needs_null_check()) { 1112 info = state_for(x, x->state()->copy_locks()); 1113 } 1114 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); 1115 __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 1116 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 1117} 1118 1119 1120// Example: Thread.currentThread() 1121void LIRGenerator::do_currentThread(Intrinsic* x) { 1122 assert(x->number_of_arguments() == 0, "wrong type"); 1123 LIR_Opr reg = rlock_result(x); 1124 __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 1125} 1126 1127 1128void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { 1129 assert(x->number_of_arguments() == 1, "wrong type"); 1130 LIRItem receiver(x->argument_at(0), this); 1131 1132 receiver.load_item(); 1133 BasicTypeList signature; 1134 signature.append(T_OBJECT); // receiver 1135 LIR_OprList* args = new LIR_OprList(); 1136 args->append(receiver.result()); 1137 CodeEmitInfo* info = state_for(x, x->state()); 1138 call_runtime(&signature, args, 1139 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), 1140 voidType, info); 1141 1142 set_no_result(x); 1143} 1144 1145 1146//------------------------local access-------------------------------------- 1147 1148LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { 1149 if (x->operand()->is_illegal()) { 1150 Constant* c = x->as_Constant(); 1151 if (c != NULL) { 1152 x->set_operand(LIR_OprFact::value_type(c->type())); 1153 } else { 1154 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); 1155 // allocate a virtual register for this local or phi 1156 x->set_operand(rlock(x)); 1157 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); 1158 } 1159 } 1160 return x->operand(); 1161} 1162 1163 1164Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { 1165 if (opr->is_virtual()) { 1166 return instruction_for_vreg(opr->vreg_number()); 1167 } 1168 return NULL; 1169} 1170 1171 1172Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { 1173 if (reg_num < _instruction_for_operand.length()) { 1174 return _instruction_for_operand.at(reg_num); 1175 } 1176 return NULL; 1177} 1178 1179 1180void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { 1181 if (_vreg_flags.size_in_bits() == 0) { 1182 BitMap2D temp(100, num_vreg_flags); 1183 temp.clear(); 1184 _vreg_flags = temp; 1185 } 1186 _vreg_flags.at_put_grow(vreg_num, f, true); 1187} 1188 1189bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { 1190 if (!_vreg_flags.is_valid_index(vreg_num, f)) { 1191 return false; 1192 } 1193 return _vreg_flags.at(vreg_num, f); 1194} 1195 1196 1197// Block local constant handling. This code is useful for keeping 1198// unpinned constants and constants which aren't exposed in the IR in 1199// registers. Unpinned Constant instructions have their operands 1200// cleared when the block is finished so that other blocks can't end 1201// up referring to their registers. 1202 1203LIR_Opr LIRGenerator::load_constant(Constant* x) { 1204 assert(!x->is_pinned(), "only for unpinned constants"); 1205 _unpinned_constants.append(x); 1206 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); 1207} 1208 1209 1210LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { 1211 BasicType t = c->type(); 1212 for (int i = 0; i < _constants.length(); i++) { 1213 LIR_Const* other = _constants.at(i); 1214 if (t == other->type()) { 1215 switch (t) { 1216 case T_INT: 1217 case T_FLOAT: 1218 if (c->as_jint_bits() != other->as_jint_bits()) continue; 1219 break; 1220 case T_LONG: 1221 case T_DOUBLE: 1222 if (c->as_jint_hi_bits() != other->as_jint_lo_bits()) continue; 1223 if (c->as_jint_lo_bits() != other->as_jint_hi_bits()) continue; 1224 break; 1225 case T_OBJECT: 1226 if (c->as_jobject() != other->as_jobject()) continue; 1227 break; 1228 } 1229 return _reg_for_constants.at(i); 1230 } 1231 } 1232 1233 LIR_Opr result = new_register(t); 1234 __ move((LIR_Opr)c, result); 1235 _constants.append(c); 1236 _reg_for_constants.append(result); 1237 return result; 1238} 1239 1240// Various barriers 1241 1242void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1243 switch (Universe::heap()->barrier_set()->kind()) { 1244 case BarrierSet::CardTableModRef: 1245 case BarrierSet::CardTableExtension: 1246 CardTableModRef_post_barrier(addr, new_val); 1247 break; 1248 case BarrierSet::ModRef: 1249 case BarrierSet::Other: 1250 // No post barriers 1251 break; 1252 default : 1253 ShouldNotReachHere(); 1254 } 1255} 1256 1257void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1258 1259 BarrierSet* bs = Universe::heap()->barrier_set(); 1260 assert(sizeof(*((CardTableModRefBS*)bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); 1261 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)bs)->byte_map_base); 1262 if (addr->is_address()) { 1263 LIR_Address* address = addr->as_address_ptr(); 1264 LIR_Opr ptr = new_register(T_OBJECT); 1265 if (!address->index()->is_valid() && address->disp() == 0) { 1266 __ move(address->base(), ptr); 1267 } else { 1268 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1269 __ leal(addr, ptr); 1270 } 1271 addr = ptr; 1272 } 1273 assert(addr->is_register(), "must be a register at this point"); 1274 1275 LIR_Opr tmp = new_pointer_register(); 1276 if (TwoOperandLIRForm) { 1277 __ move(addr, tmp); 1278 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); 1279 } else { 1280 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); 1281 } 1282 if (can_inline_as_constant(card_table_base)) { 1283 __ move(LIR_OprFact::intConst(0), 1284 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE)); 1285 } else { 1286 __ move(LIR_OprFact::intConst(0), 1287 new LIR_Address(tmp, load_constant(card_table_base), 1288 T_BYTE)); 1289 } 1290} 1291 1292 1293//------------------------field access-------------------------------------- 1294 1295// Comment copied form templateTable_i486.cpp 1296// ---------------------------------------------------------------------------- 1297// Volatile variables demand their effects be made known to all CPU's in 1298// order. Store buffers on most chips allow reads & writes to reorder; the 1299// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1300// memory barrier (i.e., it's not sufficient that the interpreter does not 1301// reorder volatile references, the hardware also must not reorder them). 1302// 1303// According to the new Java Memory Model (JMM): 1304// (1) All volatiles are serialized wrt to each other. 1305// ALSO reads & writes act as aquire & release, so: 1306// (2) A read cannot let unrelated NON-volatile memory refs that happen after 1307// the read float up to before the read. It's OK for non-volatile memory refs 1308// that happen before the volatile read to float down below it. 1309// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1310// that happen BEFORE the write float down to after the write. It's OK for 1311// non-volatile memory refs that happen after the volatile write to float up 1312// before it. 1313// 1314// We only put in barriers around volatile refs (they are expensive), not 1315// _between_ memory refs (that would require us to track the flavor of the 1316// previous memory refs). Requirements (2) and (3) require some barriers 1317// before volatile stores and after volatile loads. These nearly cover 1318// requirement (1) but miss the volatile-store-volatile-load case. This final 1319// case is placed after volatile-stores although it could just as well go 1320// before volatile-loads. 1321 1322 1323void LIRGenerator::do_StoreField(StoreField* x) { 1324 bool needs_patching = x->needs_patching(); 1325 bool is_volatile = x->field()->is_volatile(); 1326 BasicType field_type = x->field_type(); 1327 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT); 1328 1329 CodeEmitInfo* info = NULL; 1330 if (needs_patching) { 1331 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1332 info = state_for(x, x->state_before()); 1333 } else if (x->needs_null_check()) { 1334 NullCheck* nc = x->explicit_null_check(); 1335 if (nc == NULL) { 1336 info = state_for(x, x->lock_stack()); 1337 } else { 1338 info = state_for(nc); 1339 } 1340 } 1341 1342 1343 LIRItem object(x->obj(), this); 1344 LIRItem value(x->value(), this); 1345 1346 object.load_item(); 1347 1348 if (is_volatile || needs_patching) { 1349 // load item if field is volatile (fewer special cases for volatiles) 1350 // load item if field not initialized 1351 // load item if field not constant 1352 // because of code patching we cannot inline constants 1353 if (field_type == T_BYTE || field_type == T_BOOLEAN) { 1354 value.load_byte_item(); 1355 } else { 1356 value.load_item(); 1357 } 1358 } else { 1359 value.load_for_store(field_type); 1360 } 1361 1362 set_no_result(x); 1363 1364 if (PrintNotLoaded && needs_patching) { 1365 tty->print_cr(" ###class not loaded at store_%s bci %d", 1366 x->is_static() ? "static" : "field", x->bci()); 1367 } 1368 1369 if (x->needs_null_check() && 1370 (needs_patching || 1371 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1372 // emit an explicit null check because the offset is too large 1373 __ null_check(object.result(), new CodeEmitInfo(info)); 1374 } 1375 1376 LIR_Address* address; 1377 if (needs_patching) { 1378 // we need to patch the offset in the instruction so don't allow 1379 // generate_address to try to be smart about emitting the -1. 1380 // Otherwise the patching code won't know how to find the 1381 // instruction to patch. 1382 address = new LIR_Address(object.result(), max_jint, field_type); 1383 } else { 1384 address = generate_address(object.result(), x->offset(), field_type); 1385 } 1386 1387 if (is_volatile && os::is_MP()) { 1388 __ membar_release(); 1389 } 1390 1391 if (is_volatile) { 1392 assert(!needs_patching && x->is_loaded(), 1393 "how do we know it's volatile if it's not loaded"); 1394 volatile_field_store(value.result(), address, info); 1395 } else { 1396 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1397 __ store(value.result(), address, info, patch_code); 1398 } 1399 1400 if (is_oop) { 1401 post_barrier(object.result(), value.result()); 1402 } 1403 1404 if (is_volatile && os::is_MP()) { 1405 __ membar(); 1406 } 1407} 1408 1409 1410void LIRGenerator::do_LoadField(LoadField* x) { 1411 bool needs_patching = x->needs_patching(); 1412 bool is_volatile = x->field()->is_volatile(); 1413 BasicType field_type = x->field_type(); 1414 1415 CodeEmitInfo* info = NULL; 1416 if (needs_patching) { 1417 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1418 info = state_for(x, x->state_before()); 1419 } else if (x->needs_null_check()) { 1420 NullCheck* nc = x->explicit_null_check(); 1421 if (nc == NULL) { 1422 info = state_for(x, x->lock_stack()); 1423 } else { 1424 info = state_for(nc); 1425 } 1426 } 1427 1428 LIRItem object(x->obj(), this); 1429 1430 object.load_item(); 1431 1432 if (PrintNotLoaded && needs_patching) { 1433 tty->print_cr(" ###class not loaded at load_%s bci %d", 1434 x->is_static() ? "static" : "field", x->bci()); 1435 } 1436 1437 if (x->needs_null_check() && 1438 (needs_patching || 1439 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1440 // emit an explicit null check because the offset is too large 1441 __ null_check(object.result(), new CodeEmitInfo(info)); 1442 } 1443 1444 LIR_Opr reg = rlock_result(x, field_type); 1445 LIR_Address* address; 1446 if (needs_patching) { 1447 // we need to patch the offset in the instruction so don't allow 1448 // generate_address to try to be smart about emitting the -1. 1449 // Otherwise the patching code won't know how to find the 1450 // instruction to patch. 1451 address = new LIR_Address(object.result(), max_jint, field_type); 1452 } else { 1453 address = generate_address(object.result(), x->offset(), field_type); 1454 } 1455 1456 if (is_volatile) { 1457 assert(!needs_patching && x->is_loaded(), 1458 "how do we know it's volatile if it's not loaded"); 1459 volatile_field_load(address, reg, info); 1460 } else { 1461 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1462 __ load(address, reg, info, patch_code); 1463 } 1464 1465 if (is_volatile && os::is_MP()) { 1466 __ membar_acquire(); 1467 } 1468} 1469 1470 1471//------------------------java.nio.Buffer.checkIndex------------------------ 1472 1473// int java.nio.Buffer.checkIndex(int) 1474void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { 1475 // NOTE: by the time we are in checkIndex() we are guaranteed that 1476 // the buffer is non-null (because checkIndex is package-private and 1477 // only called from within other methods in the buffer). 1478 assert(x->number_of_arguments() == 2, "wrong type"); 1479 LIRItem buf (x->argument_at(0), this); 1480 LIRItem index(x->argument_at(1), this); 1481 buf.load_item(); 1482 index.load_item(); 1483 1484 LIR_Opr result = rlock_result(x); 1485 if (GenerateRangeChecks) { 1486 CodeEmitInfo* info = state_for(x); 1487 CodeStub* stub = new RangeCheckStub(info, index.result(), true); 1488 if (index.result()->is_constant()) { 1489 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); 1490 __ branch(lir_cond_belowEqual, T_INT, stub); 1491 } else { 1492 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), 1493 java_nio_Buffer::limit_offset(), T_INT, info); 1494 __ branch(lir_cond_aboveEqual, T_INT, stub); 1495 } 1496 __ move(index.result(), result); 1497 } else { 1498 // Just load the index into the result register 1499 __ move(index.result(), result); 1500 } 1501} 1502 1503 1504//------------------------array access-------------------------------------- 1505 1506 1507void LIRGenerator::do_ArrayLength(ArrayLength* x) { 1508 LIRItem array(x->array(), this); 1509 array.load_item(); 1510 LIR_Opr reg = rlock_result(x); 1511 1512 CodeEmitInfo* info = NULL; 1513 if (x->needs_null_check()) { 1514 NullCheck* nc = x->explicit_null_check(); 1515 if (nc == NULL) { 1516 info = state_for(x); 1517 } else { 1518 info = state_for(nc); 1519 } 1520 } 1521 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 1522} 1523 1524 1525void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 1526 bool use_length = x->length() != NULL; 1527 LIRItem array(x->array(), this); 1528 LIRItem index(x->index(), this); 1529 LIRItem length(this); 1530 bool needs_range_check = true; 1531 1532 if (use_length) { 1533 needs_range_check = x->compute_needs_range_check(); 1534 if (needs_range_check) { 1535 length.set_instruction(x->length()); 1536 length.load_item(); 1537 } 1538 } 1539 1540 array.load_item(); 1541 if (index.is_constant() && can_inline_as_constant(x->index())) { 1542 // let it be a constant 1543 index.dont_load_item(); 1544 } else { 1545 index.load_item(); 1546 } 1547 1548 CodeEmitInfo* range_check_info = state_for(x); 1549 CodeEmitInfo* null_check_info = NULL; 1550 if (x->needs_null_check()) { 1551 NullCheck* nc = x->explicit_null_check(); 1552 if (nc != NULL) { 1553 null_check_info = state_for(nc); 1554 } else { 1555 null_check_info = range_check_info; 1556 } 1557 } 1558 1559 // emit array address setup early so it schedules better 1560 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); 1561 1562 if (GenerateRangeChecks && needs_range_check) { 1563 if (use_length) { 1564 // TODO: use a (modified) version of array_range_check that does not require a 1565 // constant length to be loaded to a register 1566 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1567 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 1568 } else { 1569 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 1570 // The range check performs the null check, so clear it out for the load 1571 null_check_info = NULL; 1572 } 1573 } 1574 1575 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); 1576} 1577 1578 1579void LIRGenerator::do_NullCheck(NullCheck* x) { 1580 if (x->can_trap()) { 1581 LIRItem value(x->obj(), this); 1582 value.load_item(); 1583 CodeEmitInfo* info = state_for(x); 1584 __ null_check(value.result(), info); 1585 } 1586} 1587 1588 1589void LIRGenerator::do_Throw(Throw* x) { 1590 LIRItem exception(x->exception(), this); 1591 exception.load_item(); 1592 set_no_result(x); 1593 LIR_Opr exception_opr = exception.result(); 1594 CodeEmitInfo* info = state_for(x, x->state()); 1595 1596#ifndef PRODUCT 1597 if (PrintC1Statistics) { 1598 increment_counter(Runtime1::throw_count_address()); 1599 } 1600#endif 1601 1602 // check if the instruction has an xhandler in any of the nested scopes 1603 bool unwind = false; 1604 if (info->exception_handlers()->length() == 0) { 1605 // this throw is not inside an xhandler 1606 unwind = true; 1607 } else { 1608 // get some idea of the throw type 1609 bool type_is_exact = true; 1610 ciType* throw_type = x->exception()->exact_type(); 1611 if (throw_type == NULL) { 1612 type_is_exact = false; 1613 throw_type = x->exception()->declared_type(); 1614 } 1615 if (throw_type != NULL && throw_type->is_instance_klass()) { 1616 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; 1617 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); 1618 } 1619 } 1620 1621 // do null check before moving exception oop into fixed register 1622 // to avoid a fixed interval with an oop during the null check. 1623 // Use a copy of the CodeEmitInfo because debug information is 1624 // different for null_check and throw. 1625 if (GenerateCompilerNullChecks && 1626 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { 1627 // if the exception object wasn't created using new then it might be null. 1628 __ null_check(exception_opr, new CodeEmitInfo(info, true)); 1629 } 1630 1631 if (JvmtiExport::can_post_exceptions() && 1632 !block()->is_set(BlockBegin::default_exception_handler_flag)) { 1633 // we need to go through the exception lookup path to get JVMTI 1634 // notification done 1635 unwind = false; 1636 } 1637 1638 assert(!block()->is_set(BlockBegin::default_exception_handler_flag) || unwind, 1639 "should be no more handlers to dispatch to"); 1640 1641 if (DTraceMethodProbes && 1642 block()->is_set(BlockBegin::default_exception_handler_flag)) { 1643 // notify that this frame is unwinding 1644 BasicTypeList signature; 1645 signature.append(T_INT); // thread 1646 signature.append(T_OBJECT); // methodOop 1647 LIR_OprList* args = new LIR_OprList(); 1648 args->append(getThreadPointer()); 1649 LIR_Opr meth = new_register(T_OBJECT); 1650 __ oop2reg(method()->encoding(), meth); 1651 args->append(meth); 1652 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1653 } 1654 1655 // move exception oop into fixed register 1656 __ move(exception_opr, exceptionOopOpr()); 1657 1658 if (unwind) { 1659 __ unwind_exception(LIR_OprFact::illegalOpr, exceptionOopOpr(), info); 1660 } else { 1661 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); 1662 } 1663} 1664 1665 1666void LIRGenerator::do_RoundFP(RoundFP* x) { 1667 LIRItem input(x->input(), this); 1668 input.load_item(); 1669 LIR_Opr input_opr = input.result(); 1670 assert(input_opr->is_register(), "why round if value is not in a register?"); 1671 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); 1672 if (input_opr->is_single_fpu()) { 1673 set_result(x, round_item(input_opr)); // This code path not currently taken 1674 } else { 1675 LIR_Opr result = new_register(T_DOUBLE); 1676 set_vreg_flag(result, must_start_in_memory); 1677 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result); 1678 set_result(x, result); 1679 } 1680} 1681 1682void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { 1683 LIRItem base(x->base(), this); 1684 LIRItem idx(this); 1685 1686 base.load_item(); 1687 if (x->has_index()) { 1688 idx.set_instruction(x->index()); 1689 idx.load_nonconstant(); 1690 } 1691 1692 LIR_Opr reg = rlock_result(x, x->basic_type()); 1693 1694 int log2_scale = 0; 1695 if (x->has_index()) { 1696 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 1697 log2_scale = x->log2_scale(); 1698 } 1699 1700 assert(!x->has_index() || idx.value() == x->index(), "should match"); 1701 1702 LIR_Opr base_op = base.result(); 1703#ifndef _LP64 1704 if (x->base()->type()->tag() == longTag) { 1705 base_op = new_register(T_INT); 1706 __ convert(Bytecodes::_l2i, base.result(), base_op); 1707 } else { 1708 assert(x->base()->type()->tag() == intTag, "must be"); 1709 } 1710#endif 1711 1712 BasicType dst_type = x->basic_type(); 1713 LIR_Opr index_op = idx.result(); 1714 1715 LIR_Address* addr; 1716 if (index_op->is_constant()) { 1717 assert(log2_scale == 0, "must not have a scale"); 1718 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); 1719 } else { 1720#ifdef X86 1721 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); 1722#else 1723 if (index_op->is_illegal() || log2_scale == 0) { 1724 addr = new LIR_Address(base_op, index_op, dst_type); 1725 } else { 1726 LIR_Opr tmp = new_register(T_INT); 1727 __ shift_left(index_op, log2_scale, tmp); 1728 addr = new LIR_Address(base_op, tmp, dst_type); 1729 } 1730#endif 1731 } 1732 1733 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 1734 __ unaligned_move(addr, reg); 1735 } else { 1736 __ move(addr, reg); 1737 } 1738} 1739 1740 1741void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { 1742 int log2_scale = 0; 1743 BasicType type = x->basic_type(); 1744 1745 if (x->has_index()) { 1746 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 1747 log2_scale = x->log2_scale(); 1748 } 1749 1750 LIRItem base(x->base(), this); 1751 LIRItem value(x->value(), this); 1752 LIRItem idx(this); 1753 1754 base.load_item(); 1755 if (x->has_index()) { 1756 idx.set_instruction(x->index()); 1757 idx.load_item(); 1758 } 1759 1760 if (type == T_BYTE || type == T_BOOLEAN) { 1761 value.load_byte_item(); 1762 } else { 1763 value.load_item(); 1764 } 1765 1766 set_no_result(x); 1767 1768 LIR_Opr base_op = base.result(); 1769#ifndef _LP64 1770 if (x->base()->type()->tag() == longTag) { 1771 base_op = new_register(T_INT); 1772 __ convert(Bytecodes::_l2i, base.result(), base_op); 1773 } else { 1774 assert(x->base()->type()->tag() == intTag, "must be"); 1775 } 1776#endif 1777 1778 LIR_Opr index_op = idx.result(); 1779 if (log2_scale != 0) { 1780 // temporary fix (platform dependent code without shift on Intel would be better) 1781 index_op = new_register(T_INT); 1782 __ move(idx.result(), index_op); 1783 __ shift_left(index_op, log2_scale, index_op); 1784 } 1785 1786 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); 1787 __ move(value.result(), addr); 1788} 1789 1790 1791void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { 1792 BasicType type = x->basic_type(); 1793 LIRItem src(x->object(), this); 1794 LIRItem off(x->offset(), this); 1795 1796 off.load_item(); 1797 src.load_item(); 1798 1799 LIR_Opr reg = reg = rlock_result(x, x->basic_type()); 1800 1801 if (x->is_volatile() && os::is_MP()) __ membar_acquire(); 1802 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); 1803 if (x->is_volatile() && os::is_MP()) __ membar(); 1804} 1805 1806 1807void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { 1808 BasicType type = x->basic_type(); 1809 LIRItem src(x->object(), this); 1810 LIRItem off(x->offset(), this); 1811 LIRItem data(x->value(), this); 1812 1813 src.load_item(); 1814 if (type == T_BOOLEAN || type == T_BYTE) { 1815 data.load_byte_item(); 1816 } else { 1817 data.load_item(); 1818 } 1819 off.load_item(); 1820 1821 set_no_result(x); 1822 1823 if (x->is_volatile() && os::is_MP()) __ membar_release(); 1824 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); 1825} 1826 1827 1828void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) { 1829 LIRItem src(x->object(), this); 1830 LIRItem off(x->offset(), this); 1831 1832 src.load_item(); 1833 if (off.is_constant() && can_inline_as_constant(x->offset())) { 1834 // let it be a constant 1835 off.dont_load_item(); 1836 } else { 1837 off.load_item(); 1838 } 1839 1840 set_no_result(x); 1841 1842 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE); 1843 __ prefetch(addr, is_store); 1844} 1845 1846 1847void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) { 1848 do_UnsafePrefetch(x, false); 1849} 1850 1851 1852void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { 1853 do_UnsafePrefetch(x, true); 1854} 1855 1856 1857void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { 1858 int lng = x->length(); 1859 1860 for (int i = 0; i < lng; i++) { 1861 SwitchRange* one_range = x->at(i); 1862 int low_key = one_range->low_key(); 1863 int high_key = one_range->high_key(); 1864 BlockBegin* dest = one_range->sux(); 1865 if (low_key == high_key) { 1866 __ cmp(lir_cond_equal, value, low_key); 1867 __ branch(lir_cond_equal, T_INT, dest); 1868 } else if (high_key - low_key == 1) { 1869 __ cmp(lir_cond_equal, value, low_key); 1870 __ branch(lir_cond_equal, T_INT, dest); 1871 __ cmp(lir_cond_equal, value, high_key); 1872 __ branch(lir_cond_equal, T_INT, dest); 1873 } else { 1874 LabelObj* L = new LabelObj(); 1875 __ cmp(lir_cond_less, value, low_key); 1876 __ branch(lir_cond_less, L->label()); 1877 __ cmp(lir_cond_lessEqual, value, high_key); 1878 __ branch(lir_cond_lessEqual, T_INT, dest); 1879 __ branch_destination(L->label()); 1880 } 1881 } 1882 __ jump(default_sux); 1883} 1884 1885 1886SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { 1887 SwitchRangeList* res = new SwitchRangeList(); 1888 int len = x->length(); 1889 if (len > 0) { 1890 BlockBegin* sux = x->sux_at(0); 1891 int key = x->lo_key(); 1892 BlockBegin* default_sux = x->default_sux(); 1893 SwitchRange* range = new SwitchRange(key, sux); 1894 for (int i = 0; i < len; i++, key++) { 1895 BlockBegin* new_sux = x->sux_at(i); 1896 if (sux == new_sux) { 1897 // still in same range 1898 range->set_high_key(key); 1899 } else { 1900 // skip tests which explicitly dispatch to the default 1901 if (sux != default_sux) { 1902 res->append(range); 1903 } 1904 range = new SwitchRange(key, new_sux); 1905 } 1906 sux = new_sux; 1907 } 1908 if (res->length() == 0 || res->last() != range) res->append(range); 1909 } 1910 return res; 1911} 1912 1913 1914// we expect the keys to be sorted by increasing value 1915SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { 1916 SwitchRangeList* res = new SwitchRangeList(); 1917 int len = x->length(); 1918 if (len > 0) { 1919 BlockBegin* default_sux = x->default_sux(); 1920 int key = x->key_at(0); 1921 BlockBegin* sux = x->sux_at(0); 1922 SwitchRange* range = new SwitchRange(key, sux); 1923 for (int i = 1; i < len; i++) { 1924 int new_key = x->key_at(i); 1925 BlockBegin* new_sux = x->sux_at(i); 1926 if (key+1 == new_key && sux == new_sux) { 1927 // still in same range 1928 range->set_high_key(new_key); 1929 } else { 1930 // skip tests which explicitly dispatch to the default 1931 if (range->sux() != default_sux) { 1932 res->append(range); 1933 } 1934 range = new SwitchRange(new_key, new_sux); 1935 } 1936 key = new_key; 1937 sux = new_sux; 1938 } 1939 if (res->length() == 0 || res->last() != range) res->append(range); 1940 } 1941 return res; 1942} 1943 1944 1945void LIRGenerator::do_TableSwitch(TableSwitch* x) { 1946 LIRItem tag(x->tag(), this); 1947 tag.load_item(); 1948 set_no_result(x); 1949 1950 if (x->is_safepoint()) { 1951 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1952 } 1953 1954 // move values into phi locations 1955 move_to_phi(x->state()); 1956 1957 int lo_key = x->lo_key(); 1958 int hi_key = x->hi_key(); 1959 int len = x->length(); 1960 CodeEmitInfo* info = state_for(x, x->state()); 1961 LIR_Opr value = tag.result(); 1962 if (UseTableRanges) { 1963 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 1964 } else { 1965 for (int i = 0; i < len; i++) { 1966 __ cmp(lir_cond_equal, value, i + lo_key); 1967 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 1968 } 1969 __ jump(x->default_sux()); 1970 } 1971} 1972 1973 1974void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { 1975 LIRItem tag(x->tag(), this); 1976 tag.load_item(); 1977 set_no_result(x); 1978 1979 if (x->is_safepoint()) { 1980 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 1981 } 1982 1983 // move values into phi locations 1984 move_to_phi(x->state()); 1985 1986 LIR_Opr value = tag.result(); 1987 if (UseTableRanges) { 1988 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 1989 } else { 1990 int len = x->length(); 1991 for (int i = 0; i < len; i++) { 1992 __ cmp(lir_cond_equal, value, x->key_at(i)); 1993 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 1994 } 1995 __ jump(x->default_sux()); 1996 } 1997} 1998 1999 2000void LIRGenerator::do_Goto(Goto* x) { 2001 set_no_result(x); 2002 2003 if (block()->next()->as_OsrEntry()) { 2004 // need to free up storage used for OSR entry point 2005 LIR_Opr osrBuffer = block()->next()->operand(); 2006 BasicTypeList signature; 2007 signature.append(T_INT); 2008 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 2009 __ move(osrBuffer, cc->args()->at(0)); 2010 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 2011 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); 2012 } 2013 2014 if (x->is_safepoint()) { 2015 ValueStack* state = x->state_before() ? x->state_before() : x->state(); 2016 2017 // increment backedge counter if needed 2018 increment_backedge_counter(state_for(x, state)); 2019 2020 CodeEmitInfo* safepoint_info = state_for(x, state); 2021 __ safepoint(safepoint_poll_register(), safepoint_info); 2022 } 2023 2024 // emit phi-instruction move after safepoint since this simplifies 2025 // describing the state as the safepoint. 2026 move_to_phi(x->state()); 2027 2028 __ jump(x->default_sux()); 2029} 2030 2031 2032void LIRGenerator::do_Base(Base* x) { 2033 __ std_entry(LIR_OprFact::illegalOpr); 2034 // Emit moves from physical registers / stack slots to virtual registers 2035 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 2036 IRScope* irScope = compilation()->hir()->top_scope(); 2037 int java_index = 0; 2038 for (int i = 0; i < args->length(); i++) { 2039 LIR_Opr src = args->at(i); 2040 assert(!src->is_illegal(), "check"); 2041 BasicType t = src->type(); 2042 2043 // Types which are smaller than int are passed as int, so 2044 // correct the type which passed. 2045 switch (t) { 2046 case T_BYTE: 2047 case T_BOOLEAN: 2048 case T_SHORT: 2049 case T_CHAR: 2050 t = T_INT; 2051 break; 2052 } 2053 2054 LIR_Opr dest = new_register(t); 2055 __ move(src, dest); 2056 2057 // Assign new location to Local instruction for this local 2058 Local* local = x->state()->local_at(java_index)->as_Local(); 2059 assert(local != NULL, "Locals for incoming arguments must have been created"); 2060 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 2061 local->set_operand(dest); 2062 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 2063 java_index += type2size[t]; 2064 } 2065 2066 if (DTraceMethodProbes) { 2067 BasicTypeList signature; 2068 signature.append(T_INT); // thread 2069 signature.append(T_OBJECT); // methodOop 2070 LIR_OprList* args = new LIR_OprList(); 2071 args->append(getThreadPointer()); 2072 LIR_Opr meth = new_register(T_OBJECT); 2073 __ oop2reg(method()->encoding(), meth); 2074 args->append(meth); 2075 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); 2076 } 2077 2078 if (method()->is_synchronized()) { 2079 LIR_Opr obj; 2080 if (method()->is_static()) { 2081 obj = new_register(T_OBJECT); 2082 __ oop2reg(method()->holder()->java_mirror()->encoding(), obj); 2083 } else { 2084 Local* receiver = x->state()->local_at(0)->as_Local(); 2085 assert(receiver != NULL, "must already exist"); 2086 obj = receiver->operand(); 2087 } 2088 assert(obj->is_valid(), "must be valid"); 2089 2090 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2091 LIR_Opr lock = new_register(T_INT); 2092 __ load_stack_address_monitor(0, lock); 2093 2094 CodeEmitInfo* info = new CodeEmitInfo(SynchronizationEntryBCI, scope()->start()->state(), NULL); 2095 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2096 2097 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2098 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2099 } 2100 } 2101 2102 // increment invocation counters if needed 2103 increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL)); 2104 2105 // all blocks with a successor must end with an unconditional jump 2106 // to the successor even if they are consecutive 2107 __ jump(x->default_sux()); 2108} 2109 2110 2111void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2112 // construct our frame and model the production of incoming pointer 2113 // to the OSR buffer. 2114 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2115 LIR_Opr result = rlock_result(x); 2116 __ move(LIR_Assembler::osrBufferPointer(), result); 2117} 2118 2119 2120void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2121 int i = x->has_receiver() ? 1 : 0; 2122 for (; i < args->length(); i++) { 2123 LIRItem* param = args->at(i); 2124 LIR_Opr loc = arg_list->at(i); 2125 if (loc->is_register()) { 2126 param->load_item_force(loc); 2127 } else { 2128 LIR_Address* addr = loc->as_address_ptr(); 2129 param->load_for_store(addr->type()); 2130 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2131 __ unaligned_move(param->result(), addr); 2132 } else { 2133 __ move(param->result(), addr); 2134 } 2135 } 2136 } 2137 2138 if (x->has_receiver()) { 2139 LIRItem* receiver = args->at(0); 2140 LIR_Opr loc = arg_list->at(0); 2141 if (loc->is_register()) { 2142 receiver->load_item_force(loc); 2143 } else { 2144 assert(loc->is_address(), "just checking"); 2145 receiver->load_for_store(T_OBJECT); 2146 __ move(receiver->result(), loc); 2147 } 2148 } 2149} 2150 2151 2152// Visits all arguments, returns appropriate items without loading them 2153LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { 2154 LIRItemList* argument_items = new LIRItemList(); 2155 if (x->has_receiver()) { 2156 LIRItem* receiver = new LIRItem(x->receiver(), this); 2157 argument_items->append(receiver); 2158 } 2159 int idx = x->has_receiver() ? 1 : 0; 2160 for (int i = 0; i < x->number_of_arguments(); i++) { 2161 LIRItem* param = new LIRItem(x->argument_at(i), this); 2162 argument_items->append(param); 2163 idx += (param->type()->is_double_word() ? 2 : 1); 2164 } 2165 return argument_items; 2166} 2167 2168 2169// The invoke with receiver has following phases: 2170// a) traverse and load/lock receiver; 2171// b) traverse all arguments -> item-array (invoke_visit_argument) 2172// c) push receiver on stack 2173// d) load each of the items and push on stack 2174// e) unlock receiver 2175// f) move receiver into receiver-register %o0 2176// g) lock result registers and emit call operation 2177// 2178// Before issuing a call, we must spill-save all values on stack 2179// that are in caller-save register. "spill-save" moves thos registers 2180// either in a free callee-save register or spills them if no free 2181// callee save register is available. 2182// 2183// The problem is where to invoke spill-save. 2184// - if invoked between e) and f), we may lock callee save 2185// register in "spill-save" that destroys the receiver register 2186// before f) is executed 2187// - if we rearange the f) to be earlier, by loading %o0, it 2188// may destroy a value on the stack that is currently in %o0 2189// and is waiting to be spilled 2190// - if we keep the receiver locked while doing spill-save, 2191// we cannot spill it as it is spill-locked 2192// 2193void LIRGenerator::do_Invoke(Invoke* x) { 2194 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); 2195 2196 LIR_OprList* arg_list = cc->args(); 2197 LIRItemList* args = invoke_visit_arguments(x); 2198 LIR_Opr receiver = LIR_OprFact::illegalOpr; 2199 2200 // setup result register 2201 LIR_Opr result_register = LIR_OprFact::illegalOpr; 2202 if (x->type() != voidType) { 2203 result_register = result_register_for(x->type()); 2204 } 2205 2206 CodeEmitInfo* info = state_for(x, x->state()); 2207 2208 invoke_load_arguments(x, args, arg_list); 2209 2210 if (x->has_receiver()) { 2211 args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); 2212 receiver = args->at(0)->result(); 2213 } 2214 2215 // emit invoke code 2216 bool optimized = x->target_is_loaded() && x->target_is_final(); 2217 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 2218 2219 switch (x->code()) { 2220 case Bytecodes::_invokestatic: 2221 __ call_static(x->target(), result_register, 2222 SharedRuntime::get_resolve_static_call_stub(), 2223 arg_list, info); 2224 break; 2225 case Bytecodes::_invokespecial: 2226 case Bytecodes::_invokevirtual: 2227 case Bytecodes::_invokeinterface: 2228 // for final target we still produce an inline cache, in order 2229 // to be able to call mixed mode 2230 if (x->code() == Bytecodes::_invokespecial || optimized) { 2231 __ call_opt_virtual(x->target(), receiver, result_register, 2232 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2233 arg_list, info); 2234 } else if (x->vtable_index() < 0) { 2235 __ call_icvirtual(x->target(), receiver, result_register, 2236 SharedRuntime::get_resolve_virtual_call_stub(), 2237 arg_list, info); 2238 } else { 2239 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); 2240 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); 2241 __ call_virtual(x->target(), receiver, result_register, vtable_offset, arg_list, info); 2242 } 2243 break; 2244 default: 2245 ShouldNotReachHere(); 2246 break; 2247 } 2248 2249 if (x->type()->is_float() || x->type()->is_double()) { 2250 // Force rounding of results from non-strictfp when in strictfp 2251 // scope (or when we don't know the strictness of the callee, to 2252 // be safe.) 2253 if (method()->is_strict()) { 2254 if (!x->target_is_loaded() || !x->target_is_strictfp()) { 2255 result_register = round_item(result_register); 2256 } 2257 } 2258 } 2259 2260 if (result_register->is_valid()) { 2261 LIR_Opr result = rlock_result(x); 2262 __ move(result_register, result); 2263 } 2264} 2265 2266 2267void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { 2268 assert(x->number_of_arguments() == 1, "wrong type"); 2269 LIRItem value (x->argument_at(0), this); 2270 LIR_Opr reg = rlock_result(x); 2271 value.load_item(); 2272 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); 2273 __ move(tmp, reg); 2274} 2275 2276 2277 2278// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() 2279void LIRGenerator::do_IfOp(IfOp* x) { 2280#ifdef ASSERT 2281 { 2282 ValueTag xtag = x->x()->type()->tag(); 2283 ValueTag ttag = x->tval()->type()->tag(); 2284 assert(xtag == intTag || xtag == objectTag, "cannot handle others"); 2285 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); 2286 assert(ttag == x->fval()->type()->tag(), "cannot handle others"); 2287 } 2288#endif 2289 2290 LIRItem left(x->x(), this); 2291 LIRItem right(x->y(), this); 2292 left.load_item(); 2293 if (can_inline_as_constant(right.value())) { 2294 right.dont_load_item(); 2295 } else { 2296 right.load_item(); 2297 } 2298 2299 LIRItem t_val(x->tval(), this); 2300 LIRItem f_val(x->fval(), this); 2301 t_val.dont_load_item(); 2302 f_val.dont_load_item(); 2303 LIR_Opr reg = rlock_result(x); 2304 2305 __ cmp(lir_cond(x->cond()), left.result(), right.result()); 2306 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg); 2307} 2308 2309 2310void LIRGenerator::do_Intrinsic(Intrinsic* x) { 2311 switch (x->id()) { 2312 case vmIntrinsics::_intBitsToFloat : 2313 case vmIntrinsics::_doubleToRawLongBits : 2314 case vmIntrinsics::_longBitsToDouble : 2315 case vmIntrinsics::_floatToRawIntBits : { 2316 do_FPIntrinsics(x); 2317 break; 2318 } 2319 2320 case vmIntrinsics::_currentTimeMillis: { 2321 assert(x->number_of_arguments() == 0, "wrong type"); 2322 LIR_Opr reg = result_register_for(x->type()); 2323 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(), 2324 reg, new LIR_OprList()); 2325 LIR_Opr result = rlock_result(x); 2326 __ move(reg, result); 2327 break; 2328 } 2329 2330 case vmIntrinsics::_nanoTime: { 2331 assert(x->number_of_arguments() == 0, "wrong type"); 2332 LIR_Opr reg = result_register_for(x->type()); 2333 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(), 2334 reg, new LIR_OprList()); 2335 LIR_Opr result = rlock_result(x); 2336 __ move(reg, result); 2337 break; 2338 } 2339 2340 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; 2341 case vmIntrinsics::_getClass: do_getClass(x); break; 2342 case vmIntrinsics::_currentThread: do_currentThread(x); break; 2343 2344 case vmIntrinsics::_dlog: // fall through 2345 case vmIntrinsics::_dlog10: // fall through 2346 case vmIntrinsics::_dabs: // fall through 2347 case vmIntrinsics::_dsqrt: // fall through 2348 case vmIntrinsics::_dtan: // fall through 2349 case vmIntrinsics::_dsin : // fall through 2350 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break; 2351 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; 2352 2353 // java.nio.Buffer.checkIndex 2354 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break; 2355 2356 case vmIntrinsics::_compareAndSwapObject: 2357 do_CompareAndSwap(x, objectType); 2358 break; 2359 case vmIntrinsics::_compareAndSwapInt: 2360 do_CompareAndSwap(x, intType); 2361 break; 2362 case vmIntrinsics::_compareAndSwapLong: 2363 do_CompareAndSwap(x, longType); 2364 break; 2365 2366 // sun.misc.AtomicLongCSImpl.attemptUpdate 2367 case vmIntrinsics::_attemptUpdate: 2368 do_AttemptUpdate(x); 2369 break; 2370 2371 default: ShouldNotReachHere(); break; 2372 } 2373} 2374 2375 2376void LIRGenerator::do_ProfileCall(ProfileCall* x) { 2377 // Need recv in a temporary register so it interferes with the other temporaries 2378 LIR_Opr recv = LIR_OprFact::illegalOpr; 2379 LIR_Opr mdo = new_register(T_OBJECT); 2380 LIR_Opr tmp = new_register(T_INT); 2381 if (x->recv() != NULL) { 2382 LIRItem value(x->recv(), this); 2383 value.load_item(); 2384 recv = new_register(T_OBJECT); 2385 __ move(value.result(), recv); 2386 } 2387 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); 2388} 2389 2390 2391void LIRGenerator::do_ProfileCounter(ProfileCounter* x) { 2392 LIRItem mdo(x->mdo(), this); 2393 mdo.load_item(); 2394 2395 increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment()); 2396} 2397 2398 2399LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 2400 LIRItemList args(1); 2401 LIRItem value(arg1, this); 2402 args.append(&value); 2403 BasicTypeList signature; 2404 signature.append(as_BasicType(arg1->type())); 2405 2406 return call_runtime(&signature, &args, entry, result_type, info); 2407} 2408 2409 2410LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { 2411 LIRItemList args(2); 2412 LIRItem value1(arg1, this); 2413 LIRItem value2(arg2, this); 2414 args.append(&value1); 2415 args.append(&value2); 2416 BasicTypeList signature; 2417 signature.append(as_BasicType(arg1->type())); 2418 signature.append(as_BasicType(arg2->type())); 2419 2420 return call_runtime(&signature, &args, entry, result_type, info); 2421} 2422 2423 2424LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, 2425 address entry, ValueType* result_type, CodeEmitInfo* info) { 2426 // get a result register 2427 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2428 LIR_Opr result = LIR_OprFact::illegalOpr; 2429 if (result_type->tag() != voidTag) { 2430 result = new_register(result_type); 2431 phys_reg = result_register_for(result_type); 2432 } 2433 2434 // move the arguments into the correct location 2435 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2436 assert(cc->length() == args->length(), "argument mismatch"); 2437 for (int i = 0; i < args->length(); i++) { 2438 LIR_Opr arg = args->at(i); 2439 LIR_Opr loc = cc->at(i); 2440 if (loc->is_register()) { 2441 __ move(arg, loc); 2442 } else { 2443 LIR_Address* addr = loc->as_address_ptr(); 2444// if (!can_store_as_constant(arg)) { 2445// LIR_Opr tmp = new_register(arg->type()); 2446// __ move(arg, tmp); 2447// arg = tmp; 2448// } 2449 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2450 __ unaligned_move(arg, addr); 2451 } else { 2452 __ move(arg, addr); 2453 } 2454 } 2455 } 2456 2457 if (info) { 2458 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2459 } else { 2460 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2461 } 2462 if (result->is_valid()) { 2463 __ move(phys_reg, result); 2464 } 2465 return result; 2466} 2467 2468 2469LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, 2470 address entry, ValueType* result_type, CodeEmitInfo* info) { 2471 // get a result register 2472 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2473 LIR_Opr result = LIR_OprFact::illegalOpr; 2474 if (result_type->tag() != voidTag) { 2475 result = new_register(result_type); 2476 phys_reg = result_register_for(result_type); 2477 } 2478 2479 // move the arguments into the correct location 2480 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2481 2482 assert(cc->length() == args->length(), "argument mismatch"); 2483 for (int i = 0; i < args->length(); i++) { 2484 LIRItem* arg = args->at(i); 2485 LIR_Opr loc = cc->at(i); 2486 if (loc->is_register()) { 2487 arg->load_item_force(loc); 2488 } else { 2489 LIR_Address* addr = loc->as_address_ptr(); 2490 arg->load_for_store(addr->type()); 2491 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2492 __ unaligned_move(arg->result(), addr); 2493 } else { 2494 __ move(arg->result(), addr); 2495 } 2496 } 2497 } 2498 2499 if (info) { 2500 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2501 } else { 2502 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2503 } 2504 if (result->is_valid()) { 2505 __ move(phys_reg, result); 2506 } 2507 return result; 2508} 2509 2510 2511 2512void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) { 2513#ifdef TIERED 2514 if (_compilation->env()->comp_level() == CompLevel_fast_compile && 2515 (method()->code_size() >= Tier1BytecodeLimit || backedge)) { 2516 int limit = InvocationCounter::Tier1InvocationLimit; 2517 int offset = in_bytes(methodOopDesc::invocation_counter_offset() + 2518 InvocationCounter::counter_offset()); 2519 if (backedge) { 2520 limit = InvocationCounter::Tier1BackEdgeLimit; 2521 offset = in_bytes(methodOopDesc::backedge_counter_offset() + 2522 InvocationCounter::counter_offset()); 2523 } 2524 2525 LIR_Opr meth = new_register(T_OBJECT); 2526 __ oop2reg(method()->encoding(), meth); 2527 LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment); 2528 __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit)); 2529 CodeStub* overflow = new CounterOverflowStub(info, info->bci()); 2530 __ branch(lir_cond_aboveEqual, T_INT, overflow); 2531 __ branch_destination(overflow->continuation()); 2532 } 2533#endif 2534} 2535