c1_LIRGenerator.cpp revision 4456:8be1318fbe77
1/* 2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "c1/c1_Compilation.hpp" 27#include "c1/c1_FrameMap.hpp" 28#include "c1/c1_Instruction.hpp" 29#include "c1/c1_LIRAssembler.hpp" 30#include "c1/c1_LIRGenerator.hpp" 31#include "c1/c1_ValueStack.hpp" 32#include "ci/ciArrayKlass.hpp" 33#include "ci/ciInstance.hpp" 34#include "ci/ciObjArray.hpp" 35#include "runtime/sharedRuntime.hpp" 36#include "runtime/stubRoutines.hpp" 37#include "utilities/bitMap.inline.hpp" 38#include "utilities/macros.hpp" 39#if INCLUDE_ALL_GCS 40#include "gc_implementation/g1/heapRegion.hpp" 41#endif // INCLUDE_ALL_GCS 42 43#ifdef ASSERT 44#define __ gen()->lir(__FILE__, __LINE__)-> 45#else 46#define __ gen()->lir()-> 47#endif 48 49// TODO: ARM - Use some recognizable constant which still fits architectural constraints 50#ifdef ARM 51#define PATCHED_ADDR (204) 52#else 53#define PATCHED_ADDR (max_jint) 54#endif 55 56void PhiResolverState::reset(int max_vregs) { 57 // Initialize array sizes 58 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); 59 _virtual_operands.trunc_to(0); 60 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL); 61 _other_operands.trunc_to(0); 62 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL); 63 _vreg_table.trunc_to(0); 64} 65 66 67 68//-------------------------------------------------------------- 69// PhiResolver 70 71// Resolves cycles: 72// 73// r1 := r2 becomes temp := r1 74// r2 := r1 r1 := r2 75// r2 := temp 76// and orders moves: 77// 78// r2 := r3 becomes r1 := r2 79// r1 := r2 r2 := r3 80 81PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) 82 : _gen(gen) 83 , _state(gen->resolver_state()) 84 , _temp(LIR_OprFact::illegalOpr) 85{ 86 // reinitialize the shared state arrays 87 _state.reset(max_vregs); 88} 89 90 91void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { 92 assert(src->is_valid(), ""); 93 assert(dest->is_valid(), ""); 94 __ move(src, dest); 95} 96 97 98void PhiResolver::move_temp_to(LIR_Opr dest) { 99 assert(_temp->is_valid(), ""); 100 emit_move(_temp, dest); 101 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); 102} 103 104 105void PhiResolver::move_to_temp(LIR_Opr src) { 106 assert(_temp->is_illegal(), ""); 107 _temp = _gen->new_register(src->type()); 108 emit_move(src, _temp); 109} 110 111 112// Traverse assignment graph in depth first order and generate moves in post order 113// ie. two assignments: b := c, a := b start with node c: 114// Call graph: move(NULL, c) -> move(c, b) -> move(b, a) 115// Generates moves in this order: move b to a and move c to b 116// ie. cycle a := b, b := a start with node a 117// Call graph: move(NULL, a) -> move(a, b) -> move(b, a) 118// Generates moves in this order: move b to temp, move a to b, move temp to a 119void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { 120 if (!dest->visited()) { 121 dest->set_visited(); 122 for (int i = dest->no_of_destinations()-1; i >= 0; i --) { 123 move(dest, dest->destination_at(i)); 124 } 125 } else if (!dest->start_node()) { 126 // cylce in graph detected 127 assert(_loop == NULL, "only one loop valid!"); 128 _loop = dest; 129 move_to_temp(src->operand()); 130 return; 131 } // else dest is a start node 132 133 if (!dest->assigned()) { 134 if (_loop == dest) { 135 move_temp_to(dest->operand()); 136 dest->set_assigned(); 137 } else if (src != NULL) { 138 emit_move(src->operand(), dest->operand()); 139 dest->set_assigned(); 140 } 141 } 142} 143 144 145PhiResolver::~PhiResolver() { 146 int i; 147 // resolve any cycles in moves from and to virtual registers 148 for (i = virtual_operands().length() - 1; i >= 0; i --) { 149 ResolveNode* node = virtual_operands()[i]; 150 if (!node->visited()) { 151 _loop = NULL; 152 move(NULL, node); 153 node->set_start_node(); 154 assert(_temp->is_illegal(), "move_temp_to() call missing"); 155 } 156 } 157 158 // generate move for move from non virtual register to abitrary destination 159 for (i = other_operands().length() - 1; i >= 0; i --) { 160 ResolveNode* node = other_operands()[i]; 161 for (int j = node->no_of_destinations() - 1; j >= 0; j --) { 162 emit_move(node->operand(), node->destination_at(j)->operand()); 163 } 164 } 165} 166 167 168ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { 169 ResolveNode* node; 170 if (opr->is_virtual()) { 171 int vreg_num = opr->vreg_number(); 172 node = vreg_table().at_grow(vreg_num, NULL); 173 assert(node == NULL || node->operand() == opr, ""); 174 if (node == NULL) { 175 node = new ResolveNode(opr); 176 vreg_table()[vreg_num] = node; 177 } 178 // Make sure that all virtual operands show up in the list when 179 // they are used as the source of a move. 180 if (source && !virtual_operands().contains(node)) { 181 virtual_operands().append(node); 182 } 183 } else { 184 assert(source, ""); 185 node = new ResolveNode(opr); 186 other_operands().append(node); 187 } 188 return node; 189} 190 191 192void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { 193 assert(dest->is_virtual(), ""); 194 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); 195 assert(src->is_valid(), ""); 196 assert(dest->is_valid(), ""); 197 ResolveNode* source = source_node(src); 198 source->append(destination_node(dest)); 199} 200 201 202//-------------------------------------------------------------- 203// LIRItem 204 205void LIRItem::set_result(LIR_Opr opr) { 206 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); 207 value()->set_operand(opr); 208 209 if (opr->is_virtual()) { 210 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); 211 } 212 213 _result = opr; 214} 215 216void LIRItem::load_item() { 217 if (result()->is_illegal()) { 218 // update the items result 219 _result = value()->operand(); 220 } 221 if (!result()->is_register()) { 222 LIR_Opr reg = _gen->new_register(value()->type()); 223 __ move(result(), reg); 224 if (result()->is_constant()) { 225 _result = reg; 226 } else { 227 set_result(reg); 228 } 229 } 230} 231 232 233void LIRItem::load_for_store(BasicType type) { 234 if (_gen->can_store_as_constant(value(), type)) { 235 _result = value()->operand(); 236 if (!_result->is_constant()) { 237 _result = LIR_OprFact::value_type(value()->type()); 238 } 239 } else if (type == T_BYTE || type == T_BOOLEAN) { 240 load_byte_item(); 241 } else { 242 load_item(); 243 } 244} 245 246void LIRItem::load_item_force(LIR_Opr reg) { 247 LIR_Opr r = result(); 248 if (r != reg) { 249#if !defined(ARM) && !defined(E500V2) 250 if (r->type() != reg->type()) { 251 // moves between different types need an intervening spill slot 252 r = _gen->force_to_spill(r, reg->type()); 253 } 254#endif 255 __ move(r, reg); 256 _result = reg; 257 } 258} 259 260ciObject* LIRItem::get_jobject_constant() const { 261 ObjectType* oc = type()->as_ObjectType(); 262 if (oc) { 263 return oc->constant_value(); 264 } 265 return NULL; 266} 267 268 269jint LIRItem::get_jint_constant() const { 270 assert(is_constant() && value() != NULL, ""); 271 assert(type()->as_IntConstant() != NULL, "type check"); 272 return type()->as_IntConstant()->value(); 273} 274 275 276jint LIRItem::get_address_constant() const { 277 assert(is_constant() && value() != NULL, ""); 278 assert(type()->as_AddressConstant() != NULL, "type check"); 279 return type()->as_AddressConstant()->value(); 280} 281 282 283jfloat LIRItem::get_jfloat_constant() const { 284 assert(is_constant() && value() != NULL, ""); 285 assert(type()->as_FloatConstant() != NULL, "type check"); 286 return type()->as_FloatConstant()->value(); 287} 288 289 290jdouble LIRItem::get_jdouble_constant() const { 291 assert(is_constant() && value() != NULL, ""); 292 assert(type()->as_DoubleConstant() != NULL, "type check"); 293 return type()->as_DoubleConstant()->value(); 294} 295 296 297jlong LIRItem::get_jlong_constant() const { 298 assert(is_constant() && value() != NULL, ""); 299 assert(type()->as_LongConstant() != NULL, "type check"); 300 return type()->as_LongConstant()->value(); 301} 302 303 304 305//-------------------------------------------------------------- 306 307 308void LIRGenerator::init() { 309 _bs = Universe::heap()->barrier_set(); 310} 311 312 313void LIRGenerator::block_do_prolog(BlockBegin* block) { 314#ifndef PRODUCT 315 if (PrintIRWithLIR) { 316 block->print(); 317 } 318#endif 319 320 // set up the list of LIR instructions 321 assert(block->lir() == NULL, "LIR list already computed for this block"); 322 _lir = new LIR_List(compilation(), block); 323 block->set_lir(_lir); 324 325 __ branch_destination(block->label()); 326 327 if (LIRTraceExecution && 328 Compilation::current()->hir()->start()->block_id() != block->block_id() && 329 !block->is_set(BlockBegin::exception_entry_flag)) { 330 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); 331 trace_block_entry(block); 332 } 333} 334 335 336void LIRGenerator::block_do_epilog(BlockBegin* block) { 337#ifndef PRODUCT 338 if (PrintIRWithLIR) { 339 tty->cr(); 340 } 341#endif 342 343 // LIR_Opr for unpinned constants shouldn't be referenced by other 344 // blocks so clear them out after processing the block. 345 for (int i = 0; i < _unpinned_constants.length(); i++) { 346 _unpinned_constants.at(i)->clear_operand(); 347 } 348 _unpinned_constants.trunc_to(0); 349 350 // clear our any registers for other local constants 351 _constants.trunc_to(0); 352 _reg_for_constants.trunc_to(0); 353} 354 355 356void LIRGenerator::block_do(BlockBegin* block) { 357 CHECK_BAILOUT(); 358 359 block_do_prolog(block); 360 set_block(block); 361 362 for (Instruction* instr = block; instr != NULL; instr = instr->next()) { 363 if (instr->is_pinned()) do_root(instr); 364 } 365 366 set_block(NULL); 367 block_do_epilog(block); 368} 369 370 371//-------------------------LIRGenerator----------------------------- 372 373// This is where the tree-walk starts; instr must be root; 374void LIRGenerator::do_root(Value instr) { 375 CHECK_BAILOUT(); 376 377 InstructionMark im(compilation(), instr); 378 379 assert(instr->is_pinned(), "use only with roots"); 380 assert(instr->subst() == instr, "shouldn't have missed substitution"); 381 382 instr->visit(this); 383 384 assert(!instr->has_uses() || instr->operand()->is_valid() || 385 instr->as_Constant() != NULL || bailed_out(), "invalid item set"); 386} 387 388 389// This is called for each node in tree; the walk stops if a root is reached 390void LIRGenerator::walk(Value instr) { 391 InstructionMark im(compilation(), instr); 392 //stop walk when encounter a root 393 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) { 394 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); 395 } else { 396 assert(instr->subst() == instr, "shouldn't have missed substitution"); 397 instr->visit(this); 398 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); 399 } 400} 401 402 403CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 404 assert(state != NULL, "state must be defined"); 405 406#ifndef PRODUCT 407 state->verify(); 408#endif 409 410 ValueStack* s = state; 411 for_each_state(s) { 412 if (s->kind() == ValueStack::EmptyExceptionState) { 413 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); 414 continue; 415 } 416 417 int index; 418 Value value; 419 for_each_stack_value(s, index, value) { 420 assert(value->subst() == value, "missed substitution"); 421 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 422 walk(value); 423 assert(value->operand()->is_valid(), "must be evaluated now"); 424 } 425 } 426 427 int bci = s->bci(); 428 IRScope* scope = s->scope(); 429 ciMethod* method = scope->method(); 430 431 MethodLivenessResult liveness = method->liveness_at_bci(bci); 432 if (bci == SynchronizationEntryBCI) { 433 if (x->as_ExceptionObject() || x->as_Throw()) { 434 // all locals are dead on exit from the synthetic unlocker 435 liveness.clear(); 436 } else { 437 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke"); 438 } 439 } 440 if (!liveness.is_valid()) { 441 // Degenerate or breakpointed method. 442 bailout("Degenerate or breakpointed method"); 443 } else { 444 assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); 445 for_each_local_value(s, index, value) { 446 assert(value->subst() == value, "missed substition"); 447 if (liveness.at(index) && !value->type()->is_illegal()) { 448 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 449 walk(value); 450 assert(value->operand()->is_valid(), "must be evaluated now"); 451 } 452 } else { 453 // NULL out this local so that linear scan can assume that all non-NULL values are live. 454 s->invalidate_local(index); 455 } 456 } 457 } 458 } 459 460 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException)); 461} 462 463 464CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 465 return state_for(x, x->exception_state()); 466} 467 468 469void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info) { 470 if (!obj->is_loaded() || PatchALot) { 471 assert(info != NULL, "info must be set if class is not loaded"); 472 __ klass2reg_patch(NULL, r, info); 473 } else { 474 // no patching needed 475 __ metadata2reg(obj->constant_encoding(), r); 476 } 477} 478 479 480void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, 481 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 482 CodeStub* stub = new RangeCheckStub(range_check_info, index); 483 if (index->is_constant()) { 484 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), 485 index->as_jint(), null_check_info); 486 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 487 } else { 488 cmp_reg_mem(lir_cond_aboveEqual, index, array, 489 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); 490 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 491 } 492} 493 494 495void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 496 CodeStub* stub = new RangeCheckStub(info, index, true); 497 if (index->is_constant()) { 498 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); 499 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 500 } else { 501 cmp_reg_mem(lir_cond_aboveEqual, index, buffer, 502 java_nio_Buffer::limit_offset(), T_INT, info); 503 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 504 } 505 __ move(index, result); 506} 507 508 509 510void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { 511 LIR_Opr result_op = result; 512 LIR_Opr left_op = left; 513 LIR_Opr right_op = right; 514 515 if (TwoOperandLIRForm && left_op != result_op) { 516 assert(right_op != result_op, "malformed"); 517 __ move(left_op, result_op); 518 left_op = result_op; 519 } 520 521 switch(code) { 522 case Bytecodes::_dadd: 523 case Bytecodes::_fadd: 524 case Bytecodes::_ladd: 525 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; 526 case Bytecodes::_fmul: 527 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; 528 529 case Bytecodes::_dmul: 530 { 531 if (is_strictfp) { 532 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break; 533 } else { 534 __ mul(left_op, right_op, result_op); break; 535 } 536 } 537 break; 538 539 case Bytecodes::_imul: 540 { 541 bool did_strength_reduce = false; 542 543 if (right->is_constant()) { 544 int c = right->as_jint(); 545 if (is_power_of_2(c)) { 546 // do not need tmp here 547 __ shift_left(left_op, exact_log2(c), result_op); 548 did_strength_reduce = true; 549 } else { 550 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); 551 } 552 } 553 // we couldn't strength reduce so just emit the multiply 554 if (!did_strength_reduce) { 555 __ mul(left_op, right_op, result_op); 556 } 557 } 558 break; 559 560 case Bytecodes::_dsub: 561 case Bytecodes::_fsub: 562 case Bytecodes::_lsub: 563 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; 564 565 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; 566 // ldiv and lrem are implemented with a direct runtime call 567 568 case Bytecodes::_ddiv: 569 { 570 if (is_strictfp) { 571 __ div_strictfp (left_op, right_op, result_op, tmp_op); break; 572 } else { 573 __ div (left_op, right_op, result_op); break; 574 } 575 } 576 break; 577 578 case Bytecodes::_drem: 579 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; 580 581 default: ShouldNotReachHere(); 582 } 583} 584 585 586void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 587 arithmetic_op(code, result, left, right, false, tmp); 588} 589 590 591void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 592 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info); 593} 594 595 596void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) { 597 arithmetic_op(code, result, left, right, is_strictfp, tmp); 598} 599 600 601void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { 602 if (TwoOperandLIRForm && value != result_op) { 603 assert(count != result_op, "malformed"); 604 __ move(value, result_op); 605 value = result_op; 606 } 607 608 assert(count->is_constant() || count->is_register(), "must be"); 609 switch(code) { 610 case Bytecodes::_ishl: 611 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; 612 case Bytecodes::_ishr: 613 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; 614 case Bytecodes::_iushr: 615 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; 616 default: ShouldNotReachHere(); 617 } 618} 619 620 621void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { 622 if (TwoOperandLIRForm && left_op != result_op) { 623 assert(right_op != result_op, "malformed"); 624 __ move(left_op, result_op); 625 left_op = result_op; 626 } 627 628 switch(code) { 629 case Bytecodes::_iand: 630 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; 631 632 case Bytecodes::_ior: 633 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; 634 635 case Bytecodes::_ixor: 636 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; 637 638 default: ShouldNotReachHere(); 639 } 640} 641 642 643void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { 644 if (!GenerateSynchronizationCode) return; 645 // for slow path, use debug info for state after successful locking 646 CodeStub* slow_path = new MonitorEnterStub(object, lock, info); 647 __ load_stack_address_monitor(monitor_no, lock); 648 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 649 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); 650} 651 652 653void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { 654 if (!GenerateSynchronizationCode) return; 655 // setup registers 656 LIR_Opr hdr = lock; 657 lock = new_hdr; 658 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); 659 __ load_stack_address_monitor(monitor_no, lock); 660 __ unlock_object(hdr, object, lock, scratch, slow_path); 661} 662 663 664void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 665 klass2reg_with_patching(klass_reg, klass, info); 666 // If klass is not loaded we do not know if the klass has finalizers: 667 if (UseFastNewInstance && klass->is_loaded() 668 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { 669 670 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; 671 672 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); 673 674 assert(klass->is_loaded(), "must be loaded"); 675 // allocate space for instance 676 assert(klass->size_helper() >= 0, "illegal instance size"); 677 const int instance_size = align_object_size(klass->size_helper()); 678 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, 679 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); 680 } else { 681 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); 682 __ branch(lir_cond_always, T_ILLEGAL, slow_path); 683 __ branch_destination(slow_path->continuation()); 684 } 685} 686 687 688static bool is_constant_zero(Instruction* inst) { 689 IntConstant* c = inst->type()->as_IntConstant(); 690 if (c) { 691 return (c->value() == 0); 692 } 693 return false; 694} 695 696 697static bool positive_constant(Instruction* inst) { 698 IntConstant* c = inst->type()->as_IntConstant(); 699 if (c) { 700 return (c->value() >= 0); 701 } 702 return false; 703} 704 705 706static ciArrayKlass* as_array_klass(ciType* type) { 707 if (type != NULL && type->is_array_klass() && type->is_loaded()) { 708 return (ciArrayKlass*)type; 709 } else { 710 return NULL; 711 } 712} 713 714static ciType* phi_declared_type(Phi* phi) { 715 ciType* t = phi->operand_at(0)->declared_type(); 716 if (t == NULL) { 717 return NULL; 718 } 719 for(int i = 1; i < phi->operand_count(); i++) { 720 if (t != phi->operand_at(i)->declared_type()) { 721 return NULL; 722 } 723 } 724 return t; 725} 726 727void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { 728 Instruction* src = x->argument_at(0); 729 Instruction* src_pos = x->argument_at(1); 730 Instruction* dst = x->argument_at(2); 731 Instruction* dst_pos = x->argument_at(3); 732 Instruction* length = x->argument_at(4); 733 734 // first try to identify the likely type of the arrays involved 735 ciArrayKlass* expected_type = NULL; 736 bool is_exact = false, src_objarray = false, dst_objarray = false; 737 { 738 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); 739 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); 740 Phi* phi; 741 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) { 742 src_declared_type = as_array_klass(phi_declared_type(phi)); 743 } 744 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); 745 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); 746 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) { 747 dst_declared_type = as_array_klass(phi_declared_type(phi)); 748 } 749 750 if (src_exact_type != NULL && src_exact_type == dst_exact_type) { 751 // the types exactly match so the type is fully known 752 is_exact = true; 753 expected_type = src_exact_type; 754 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { 755 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; 756 ciArrayKlass* src_type = NULL; 757 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { 758 src_type = (ciArrayKlass*) src_exact_type; 759 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { 760 src_type = (ciArrayKlass*) src_declared_type; 761 } 762 if (src_type != NULL) { 763 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { 764 is_exact = true; 765 expected_type = dst_type; 766 } 767 } 768 } 769 // at least pass along a good guess 770 if (expected_type == NULL) expected_type = dst_exact_type; 771 if (expected_type == NULL) expected_type = src_declared_type; 772 if (expected_type == NULL) expected_type = dst_declared_type; 773 774 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass()); 775 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass()); 776 } 777 778 // if a probable array type has been identified, figure out if any 779 // of the required checks for a fast case can be elided. 780 int flags = LIR_OpArrayCopy::all_flags; 781 782 if (!src_objarray) 783 flags &= ~LIR_OpArrayCopy::src_objarray; 784 if (!dst_objarray) 785 flags &= ~LIR_OpArrayCopy::dst_objarray; 786 787 if (!x->arg_needs_null_check(0)) 788 flags &= ~LIR_OpArrayCopy::src_null_check; 789 if (!x->arg_needs_null_check(2)) 790 flags &= ~LIR_OpArrayCopy::dst_null_check; 791 792 793 if (expected_type != NULL) { 794 Value length_limit = NULL; 795 796 IfOp* ifop = length->as_IfOp(); 797 if (ifop != NULL) { 798 // look for expressions like min(v, a.length) which ends up as 799 // x > y ? y : x or x >= y ? y : x 800 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) && 801 ifop->x() == ifop->fval() && 802 ifop->y() == ifop->tval()) { 803 length_limit = ifop->y(); 804 } 805 } 806 807 // try to skip null checks and range checks 808 NewArray* src_array = src->as_NewArray(); 809 if (src_array != NULL) { 810 flags &= ~LIR_OpArrayCopy::src_null_check; 811 if (length_limit != NULL && 812 src_array->length() == length_limit && 813 is_constant_zero(src_pos)) { 814 flags &= ~LIR_OpArrayCopy::src_range_check; 815 } 816 } 817 818 NewArray* dst_array = dst->as_NewArray(); 819 if (dst_array != NULL) { 820 flags &= ~LIR_OpArrayCopy::dst_null_check; 821 if (length_limit != NULL && 822 dst_array->length() == length_limit && 823 is_constant_zero(dst_pos)) { 824 flags &= ~LIR_OpArrayCopy::dst_range_check; 825 } 826 } 827 828 // check from incoming constant values 829 if (positive_constant(src_pos)) 830 flags &= ~LIR_OpArrayCopy::src_pos_positive_check; 831 if (positive_constant(dst_pos)) 832 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; 833 if (positive_constant(length)) 834 flags &= ~LIR_OpArrayCopy::length_positive_check; 835 836 // see if the range check can be elided, which might also imply 837 // that src or dst is non-null. 838 ArrayLength* al = length->as_ArrayLength(); 839 if (al != NULL) { 840 if (al->array() == src) { 841 // it's the length of the source array 842 flags &= ~LIR_OpArrayCopy::length_positive_check; 843 flags &= ~LIR_OpArrayCopy::src_null_check; 844 if (is_constant_zero(src_pos)) 845 flags &= ~LIR_OpArrayCopy::src_range_check; 846 } 847 if (al->array() == dst) { 848 // it's the length of the destination array 849 flags &= ~LIR_OpArrayCopy::length_positive_check; 850 flags &= ~LIR_OpArrayCopy::dst_null_check; 851 if (is_constant_zero(dst_pos)) 852 flags &= ~LIR_OpArrayCopy::dst_range_check; 853 } 854 } 855 if (is_exact) { 856 flags &= ~LIR_OpArrayCopy::type_check; 857 } 858 } 859 860 IntConstant* src_int = src_pos->type()->as_IntConstant(); 861 IntConstant* dst_int = dst_pos->type()->as_IntConstant(); 862 if (src_int && dst_int) { 863 int s_offs = src_int->value(); 864 int d_offs = dst_int->value(); 865 if (src_int->value() >= dst_int->value()) { 866 flags &= ~LIR_OpArrayCopy::overlapping; 867 } 868 if (expected_type != NULL) { 869 BasicType t = expected_type->element_type()->basic_type(); 870 int element_size = type2aelembytes(t); 871 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && 872 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { 873 flags &= ~LIR_OpArrayCopy::unaligned; 874 } 875 } 876 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) { 877 // src and dest positions are the same, or dst is zero so assume 878 // nonoverlapping copy. 879 flags &= ~LIR_OpArrayCopy::overlapping; 880 } 881 882 if (src == dst) { 883 // moving within a single array so no type checks are needed 884 if (flags & LIR_OpArrayCopy::type_check) { 885 flags &= ~LIR_OpArrayCopy::type_check; 886 } 887 } 888 *flagsp = flags; 889 *expected_typep = (ciArrayKlass*)expected_type; 890} 891 892 893LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { 894 assert(opr->is_register(), "why spill if item is not register?"); 895 896 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) { 897 LIR_Opr result = new_register(T_FLOAT); 898 set_vreg_flag(result, must_start_in_memory); 899 assert(opr->is_register(), "only a register can be spilled"); 900 assert(opr->value_type()->is_float(), "rounding only for floats available"); 901 __ roundfp(opr, LIR_OprFact::illegalOpr, result); 902 return result; 903 } 904 return opr; 905} 906 907 908LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { 909 assert(type2size[t] == type2size[value->type()], 910 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type()))); 911 if (!value->is_register()) { 912 // force into a register 913 LIR_Opr r = new_register(value->type()); 914 __ move(value, r); 915 value = r; 916 } 917 918 // create a spill location 919 LIR_Opr tmp = new_register(t); 920 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); 921 922 // move from register to spill 923 __ move(value, tmp); 924 return tmp; 925} 926 927void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { 928 if (if_instr->should_profile()) { 929 ciMethod* method = if_instr->profiled_method(); 930 assert(method != NULL, "method should be set if branch is profiled"); 931 ciMethodData* md = method->method_data_or_null(); 932 assert(md != NULL, "Sanity"); 933 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 934 assert(data != NULL, "must have profiling data"); 935 assert(data->is_BranchData(), "need BranchData for two-way branches"); 936 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 937 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 938 if (if_instr->is_swapped()) { 939 int t = taken_count_offset; 940 taken_count_offset = not_taken_count_offset; 941 not_taken_count_offset = t; 942 } 943 944 LIR_Opr md_reg = new_register(T_METADATA); 945 __ metadata2reg(md->constant_encoding(), md_reg); 946 947 LIR_Opr data_offset_reg = new_pointer_register(); 948 __ cmove(lir_cond(cond), 949 LIR_OprFact::intptrConst(taken_count_offset), 950 LIR_OprFact::intptrConst(not_taken_count_offset), 951 data_offset_reg, as_BasicType(if_instr->x()->type())); 952 953 // MDO cells are intptr_t, so the data_reg width is arch-dependent. 954 LIR_Opr data_reg = new_pointer_register(); 955 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 956 __ move(data_addr, data_reg); 957 // Use leal instead of add to avoid destroying condition codes on x86 958 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 959 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 960 __ move(data_reg, data_addr); 961 } 962} 963 964// Phi technique: 965// This is about passing live values from one basic block to the other. 966// In code generated with Java it is rather rare that more than one 967// value is on the stack from one basic block to the other. 968// We optimize our technique for efficient passing of one value 969// (of type long, int, double..) but it can be extended. 970// When entering or leaving a basic block, all registers and all spill 971// slots are release and empty. We use the released registers 972// and spill slots to pass the live values from one block 973// to the other. The topmost value, i.e., the value on TOS of expression 974// stack is passed in registers. All other values are stored in spilling 975// area. Every Phi has an index which designates its spill slot 976// At exit of a basic block, we fill the register(s) and spill slots. 977// At entry of a basic block, the block_prolog sets up the content of phi nodes 978// and locks necessary registers and spilling slots. 979 980 981// move current value to referenced phi function 982void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { 983 Phi* phi = sux_val->as_Phi(); 984 // cur_val can be null without phi being null in conjunction with inlining 985 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { 986 LIR_Opr operand = cur_val->operand(); 987 if (cur_val->operand()->is_illegal()) { 988 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, 989 "these can be produced lazily"); 990 operand = operand_for_instruction(cur_val); 991 } 992 resolver->move(operand, operand_for_instruction(phi)); 993 } 994} 995 996 997// Moves all stack values into their PHI position 998void LIRGenerator::move_to_phi(ValueStack* cur_state) { 999 BlockBegin* bb = block(); 1000 if (bb->number_of_sux() == 1) { 1001 BlockBegin* sux = bb->sux_at(0); 1002 assert(sux->number_of_preds() > 0, "invalid CFG"); 1003 1004 // a block with only one predecessor never has phi functions 1005 if (sux->number_of_preds() > 1) { 1006 int max_phis = cur_state->stack_size() + cur_state->locals_size(); 1007 PhiResolver resolver(this, _virtual_register_number + max_phis * 2); 1008 1009 ValueStack* sux_state = sux->state(); 1010 Value sux_value; 1011 int index; 1012 1013 assert(cur_state->scope() == sux_state->scope(), "not matching"); 1014 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching"); 1015 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching"); 1016 1017 for_each_stack_value(sux_state, index, sux_value) { 1018 move_to_phi(&resolver, cur_state->stack_at(index), sux_value); 1019 } 1020 1021 for_each_local_value(sux_state, index, sux_value) { 1022 move_to_phi(&resolver, cur_state->local_at(index), sux_value); 1023 } 1024 1025 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); 1026 } 1027 } 1028} 1029 1030 1031LIR_Opr LIRGenerator::new_register(BasicType type) { 1032 int vreg = _virtual_register_number; 1033 // add a little fudge factor for the bailout, since the bailout is 1034 // only checked periodically. This gives a few extra registers to 1035 // hand out before we really run out, which helps us keep from 1036 // tripping over assertions. 1037 if (vreg + 20 >= LIR_OprDesc::vreg_max) { 1038 bailout("out of virtual registers"); 1039 if (vreg + 2 >= LIR_OprDesc::vreg_max) { 1040 // wrap it around 1041 _virtual_register_number = LIR_OprDesc::vreg_base; 1042 } 1043 } 1044 _virtual_register_number += 1; 1045 return LIR_OprFact::virtual_register(vreg, type); 1046} 1047 1048 1049// Try to lock using register in hint 1050LIR_Opr LIRGenerator::rlock(Value instr) { 1051 return new_register(instr->type()); 1052} 1053 1054 1055// does an rlock and sets result 1056LIR_Opr LIRGenerator::rlock_result(Value x) { 1057 LIR_Opr reg = rlock(x); 1058 set_result(x, reg); 1059 return reg; 1060} 1061 1062 1063// does an rlock and sets result 1064LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { 1065 LIR_Opr reg; 1066 switch (type) { 1067 case T_BYTE: 1068 case T_BOOLEAN: 1069 reg = rlock_byte(type); 1070 break; 1071 default: 1072 reg = rlock(x); 1073 break; 1074 } 1075 1076 set_result(x, reg); 1077 return reg; 1078} 1079 1080 1081//--------------------------------------------------------------------- 1082ciObject* LIRGenerator::get_jobject_constant(Value value) { 1083 ObjectType* oc = value->type()->as_ObjectType(); 1084 if (oc) { 1085 return oc->constant_value(); 1086 } 1087 return NULL; 1088} 1089 1090 1091void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { 1092 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); 1093 assert(block()->next() == x, "ExceptionObject must be first instruction of block"); 1094 1095 // no moves are created for phi functions at the begin of exception 1096 // handlers, so assign operands manually here 1097 for_each_phi_fun(block(), phi, 1098 operand_for_instruction(phi)); 1099 1100 LIR_Opr thread_reg = getThreadPointer(); 1101 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 1102 exceptionOopOpr()); 1103 __ move_wide(LIR_OprFact::oopConst(NULL), 1104 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 1105 __ move_wide(LIR_OprFact::oopConst(NULL), 1106 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 1107 1108 LIR_Opr result = new_register(T_OBJECT); 1109 __ move(exceptionOopOpr(), result); 1110 set_result(x, result); 1111} 1112 1113 1114//---------------------------------------------------------------------- 1115//---------------------------------------------------------------------- 1116//---------------------------------------------------------------------- 1117//---------------------------------------------------------------------- 1118// visitor functions 1119//---------------------------------------------------------------------- 1120//---------------------------------------------------------------------- 1121//---------------------------------------------------------------------- 1122//---------------------------------------------------------------------- 1123 1124void LIRGenerator::do_Phi(Phi* x) { 1125 // phi functions are never visited directly 1126 ShouldNotReachHere(); 1127} 1128 1129 1130// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. 1131void LIRGenerator::do_Constant(Constant* x) { 1132 if (x->state_before() != NULL) { 1133 // Any constant with a ValueStack requires patching so emit the patch here 1134 LIR_Opr reg = rlock_result(x); 1135 CodeEmitInfo* info = state_for(x, x->state_before()); 1136 __ oop2reg_patch(NULL, reg, info); 1137 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) { 1138 if (!x->is_pinned()) { 1139 // unpinned constants are handled specially so that they can be 1140 // put into registers when they are used multiple times within a 1141 // block. After the block completes their operand will be 1142 // cleared so that other blocks can't refer to that register. 1143 set_result(x, load_constant(x)); 1144 } else { 1145 LIR_Opr res = x->operand(); 1146 if (!res->is_valid()) { 1147 res = LIR_OprFact::value_type(x->type()); 1148 } 1149 if (res->is_constant()) { 1150 LIR_Opr reg = rlock_result(x); 1151 __ move(res, reg); 1152 } else { 1153 set_result(x, res); 1154 } 1155 } 1156 } else { 1157 set_result(x, LIR_OprFact::value_type(x->type())); 1158 } 1159} 1160 1161 1162void LIRGenerator::do_Local(Local* x) { 1163 // operand_for_instruction has the side effect of setting the result 1164 // so there's no need to do it here. 1165 operand_for_instruction(x); 1166} 1167 1168 1169void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) { 1170 Unimplemented(); 1171} 1172 1173 1174void LIRGenerator::do_Return(Return* x) { 1175 if (compilation()->env()->dtrace_method_probes()) { 1176 BasicTypeList signature; 1177 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 1178 signature.append(T_OBJECT); // Method* 1179 LIR_OprList* args = new LIR_OprList(); 1180 args->append(getThreadPointer()); 1181 LIR_Opr meth = new_register(T_METADATA); 1182 __ metadata2reg(method()->constant_encoding(), meth); 1183 args->append(meth); 1184 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1185 } 1186 1187 if (x->type()->is_void()) { 1188 __ return_op(LIR_OprFact::illegalOpr); 1189 } else { 1190 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); 1191 LIRItem result(x->result(), this); 1192 1193 result.load_item_force(reg); 1194 __ return_op(result.result()); 1195 } 1196 set_no_result(x); 1197} 1198 1199// Examble: ref.get() 1200// Combination of LoadField and g1 pre-write barrier 1201void LIRGenerator::do_Reference_get(Intrinsic* x) { 1202 1203 const int referent_offset = java_lang_ref_Reference::referent_offset; 1204 guarantee(referent_offset > 0, "referent offset not initialized"); 1205 1206 assert(x->number_of_arguments() == 1, "wrong type"); 1207 1208 LIRItem reference(x->argument_at(0), this); 1209 reference.load_item(); 1210 1211 // need to perform the null check on the reference objecy 1212 CodeEmitInfo* info = NULL; 1213 if (x->needs_null_check()) { 1214 info = state_for(x); 1215 } 1216 1217 LIR_Address* referent_field_adr = 1218 new LIR_Address(reference.result(), referent_offset, T_OBJECT); 1219 1220 LIR_Opr result = rlock_result(x); 1221 1222 __ load(referent_field_adr, result, info); 1223 1224 // Register the value in the referent field with the pre-barrier 1225 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */, 1226 result /* pre_val */, 1227 false /* do_load */, 1228 false /* patch */, 1229 NULL /* info */); 1230} 1231 1232// Example: clazz.isInstance(object) 1233void LIRGenerator::do_isInstance(Intrinsic* x) { 1234 assert(x->number_of_arguments() == 2, "wrong type"); 1235 1236 // TODO could try to substitute this node with an equivalent InstanceOf 1237 // if clazz is known to be a constant Class. This will pick up newly found 1238 // constants after HIR construction. I'll leave this to a future change. 1239 1240 // as a first cut, make a simple leaf call to runtime to stay platform independent. 1241 // could follow the aastore example in a future change. 1242 1243 LIRItem clazz(x->argument_at(0), this); 1244 LIRItem object(x->argument_at(1), this); 1245 clazz.load_item(); 1246 object.load_item(); 1247 LIR_Opr result = rlock_result(x); 1248 1249 // need to perform null check on clazz 1250 if (x->needs_null_check()) { 1251 CodeEmitInfo* info = state_for(x); 1252 __ null_check(clazz.result(), info); 1253 } 1254 1255 LIR_Opr call_result = call_runtime(clazz.value(), object.value(), 1256 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of), 1257 x->type(), 1258 NULL); // NULL CodeEmitInfo results in a leaf call 1259 __ move(call_result, result); 1260} 1261 1262// Example: object.getClass () 1263void LIRGenerator::do_getClass(Intrinsic* x) { 1264 assert(x->number_of_arguments() == 1, "wrong type"); 1265 1266 LIRItem rcvr(x->argument_at(0), this); 1267 rcvr.load_item(); 1268 LIR_Opr result = rlock_result(x); 1269 1270 // need to perform the null check on the rcvr 1271 CodeEmitInfo* info = NULL; 1272 if (x->needs_null_check()) { 1273 info = state_for(x); 1274 } 1275 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), result, info); 1276 __ move_wide(new LIR_Address(result, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result); 1277} 1278 1279 1280// Example: Thread.currentThread() 1281void LIRGenerator::do_currentThread(Intrinsic* x) { 1282 assert(x->number_of_arguments() == 0, "wrong type"); 1283 LIR_Opr reg = rlock_result(x); 1284 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 1285} 1286 1287 1288void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { 1289 assert(x->number_of_arguments() == 1, "wrong type"); 1290 LIRItem receiver(x->argument_at(0), this); 1291 1292 receiver.load_item(); 1293 BasicTypeList signature; 1294 signature.append(T_OBJECT); // receiver 1295 LIR_OprList* args = new LIR_OprList(); 1296 args->append(receiver.result()); 1297 CodeEmitInfo* info = state_for(x, x->state()); 1298 call_runtime(&signature, args, 1299 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), 1300 voidType, info); 1301 1302 set_no_result(x); 1303} 1304 1305 1306//------------------------local access-------------------------------------- 1307 1308LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { 1309 if (x->operand()->is_illegal()) { 1310 Constant* c = x->as_Constant(); 1311 if (c != NULL) { 1312 x->set_operand(LIR_OprFact::value_type(c->type())); 1313 } else { 1314 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); 1315 // allocate a virtual register for this local or phi 1316 x->set_operand(rlock(x)); 1317 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); 1318 } 1319 } 1320 return x->operand(); 1321} 1322 1323 1324Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { 1325 if (opr->is_virtual()) { 1326 return instruction_for_vreg(opr->vreg_number()); 1327 } 1328 return NULL; 1329} 1330 1331 1332Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { 1333 if (reg_num < _instruction_for_operand.length()) { 1334 return _instruction_for_operand.at(reg_num); 1335 } 1336 return NULL; 1337} 1338 1339 1340void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { 1341 if (_vreg_flags.size_in_bits() == 0) { 1342 BitMap2D temp(100, num_vreg_flags); 1343 temp.clear(); 1344 _vreg_flags = temp; 1345 } 1346 _vreg_flags.at_put_grow(vreg_num, f, true); 1347} 1348 1349bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { 1350 if (!_vreg_flags.is_valid_index(vreg_num, f)) { 1351 return false; 1352 } 1353 return _vreg_flags.at(vreg_num, f); 1354} 1355 1356 1357// Block local constant handling. This code is useful for keeping 1358// unpinned constants and constants which aren't exposed in the IR in 1359// registers. Unpinned Constant instructions have their operands 1360// cleared when the block is finished so that other blocks can't end 1361// up referring to their registers. 1362 1363LIR_Opr LIRGenerator::load_constant(Constant* x) { 1364 assert(!x->is_pinned(), "only for unpinned constants"); 1365 _unpinned_constants.append(x); 1366 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); 1367} 1368 1369 1370LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { 1371 BasicType t = c->type(); 1372 for (int i = 0; i < _constants.length(); i++) { 1373 LIR_Const* other = _constants.at(i); 1374 if (t == other->type()) { 1375 switch (t) { 1376 case T_INT: 1377 case T_FLOAT: 1378 if (c->as_jint_bits() != other->as_jint_bits()) continue; 1379 break; 1380 case T_LONG: 1381 case T_DOUBLE: 1382 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; 1383 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; 1384 break; 1385 case T_OBJECT: 1386 if (c->as_jobject() != other->as_jobject()) continue; 1387 break; 1388 } 1389 return _reg_for_constants.at(i); 1390 } 1391 } 1392 1393 LIR_Opr result = new_register(t); 1394 __ move((LIR_Opr)c, result); 1395 _constants.append(c); 1396 _reg_for_constants.append(result); 1397 return result; 1398} 1399 1400// Various barriers 1401 1402void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, 1403 bool do_load, bool patch, CodeEmitInfo* info) { 1404 // Do the pre-write barrier, if any. 1405 switch (_bs->kind()) { 1406#if INCLUDE_ALL_GCS 1407 case BarrierSet::G1SATBCT: 1408 case BarrierSet::G1SATBCTLogging: 1409 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info); 1410 break; 1411#endif // INCLUDE_ALL_GCS 1412 case BarrierSet::CardTableModRef: 1413 case BarrierSet::CardTableExtension: 1414 // No pre barriers 1415 break; 1416 case BarrierSet::ModRef: 1417 case BarrierSet::Other: 1418 // No pre barriers 1419 break; 1420 default : 1421 ShouldNotReachHere(); 1422 1423 } 1424} 1425 1426void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1427 switch (_bs->kind()) { 1428#if INCLUDE_ALL_GCS 1429 case BarrierSet::G1SATBCT: 1430 case BarrierSet::G1SATBCTLogging: 1431 G1SATBCardTableModRef_post_barrier(addr, new_val); 1432 break; 1433#endif // INCLUDE_ALL_GCS 1434 case BarrierSet::CardTableModRef: 1435 case BarrierSet::CardTableExtension: 1436 CardTableModRef_post_barrier(addr, new_val); 1437 break; 1438 case BarrierSet::ModRef: 1439 case BarrierSet::Other: 1440 // No post barriers 1441 break; 1442 default : 1443 ShouldNotReachHere(); 1444 } 1445} 1446 1447//////////////////////////////////////////////////////////////////////// 1448#if INCLUDE_ALL_GCS 1449 1450void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val, 1451 bool do_load, bool patch, CodeEmitInfo* info) { 1452 // First we test whether marking is in progress. 1453 BasicType flag_type; 1454 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 1455 flag_type = T_INT; 1456 } else { 1457 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 1458 "Assumption"); 1459 flag_type = T_BYTE; 1460 } 1461 LIR_Opr thrd = getThreadPointer(); 1462 LIR_Address* mark_active_flag_addr = 1463 new LIR_Address(thrd, 1464 in_bytes(JavaThread::satb_mark_queue_offset() + 1465 PtrQueue::byte_offset_of_active()), 1466 flag_type); 1467 // Read the marking-in-progress flag. 1468 LIR_Opr flag_val = new_register(T_INT); 1469 __ load(mark_active_flag_addr, flag_val); 1470 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 1471 1472 LIR_PatchCode pre_val_patch_code = lir_patch_none; 1473 1474 CodeStub* slow; 1475 1476 if (do_load) { 1477 assert(pre_val == LIR_OprFact::illegalOpr, "sanity"); 1478 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity"); 1479 1480 if (patch) 1481 pre_val_patch_code = lir_patch_normal; 1482 1483 pre_val = new_register(T_OBJECT); 1484 1485 if (!addr_opr->is_address()) { 1486 assert(addr_opr->is_register(), "must be"); 1487 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); 1488 } 1489 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info); 1490 } else { 1491 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity"); 1492 assert(pre_val->is_register(), "must be"); 1493 assert(pre_val->type() == T_OBJECT, "must be an object"); 1494 assert(info == NULL, "sanity"); 1495 1496 slow = new G1PreBarrierStub(pre_val); 1497 } 1498 1499 __ branch(lir_cond_notEqual, T_INT, slow); 1500 __ branch_destination(slow->continuation()); 1501} 1502 1503void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1504 // If the "new_val" is a constant NULL, no barrier is necessary. 1505 if (new_val->is_constant() && 1506 new_val->as_constant_ptr()->as_jobject() == NULL) return; 1507 1508 if (!new_val->is_register()) { 1509 LIR_Opr new_val_reg = new_register(T_OBJECT); 1510 if (new_val->is_constant()) { 1511 __ move(new_val, new_val_reg); 1512 } else { 1513 __ leal(new_val, new_val_reg); 1514 } 1515 new_val = new_val_reg; 1516 } 1517 assert(new_val->is_register(), "must be a register at this point"); 1518 1519 if (addr->is_address()) { 1520 LIR_Address* address = addr->as_address_ptr(); 1521 LIR_Opr ptr = new_pointer_register(); 1522 if (!address->index()->is_valid() && address->disp() == 0) { 1523 __ move(address->base(), ptr); 1524 } else { 1525 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1526 __ leal(addr, ptr); 1527 } 1528 addr = ptr; 1529 } 1530 assert(addr->is_register(), "must be a register at this point"); 1531 1532 LIR_Opr xor_res = new_pointer_register(); 1533 LIR_Opr xor_shift_res = new_pointer_register(); 1534 if (TwoOperandLIRForm ) { 1535 __ move(addr, xor_res); 1536 __ logical_xor(xor_res, new_val, xor_res); 1537 __ move(xor_res, xor_shift_res); 1538 __ unsigned_shift_right(xor_shift_res, 1539 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1540 xor_shift_res, 1541 LIR_OprDesc::illegalOpr()); 1542 } else { 1543 __ logical_xor(addr, new_val, xor_res); 1544 __ unsigned_shift_right(xor_res, 1545 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1546 xor_shift_res, 1547 LIR_OprDesc::illegalOpr()); 1548 } 1549 1550 if (!new_val->is_register()) { 1551 LIR_Opr new_val_reg = new_register(T_OBJECT); 1552 __ leal(new_val, new_val_reg); 1553 new_val = new_val_reg; 1554 } 1555 assert(new_val->is_register(), "must be a register at this point"); 1556 1557 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); 1558 1559 CodeStub* slow = new G1PostBarrierStub(addr, new_val); 1560 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow); 1561 __ branch_destination(slow->continuation()); 1562} 1563 1564#endif // INCLUDE_ALL_GCS 1565//////////////////////////////////////////////////////////////////////// 1566 1567void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1568 1569 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); 1570 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); 1571 if (addr->is_address()) { 1572 LIR_Address* address = addr->as_address_ptr(); 1573 // ptr cannot be an object because we use this barrier for array card marks 1574 // and addr can point in the middle of an array. 1575 LIR_Opr ptr = new_pointer_register(); 1576 if (!address->index()->is_valid() && address->disp() == 0) { 1577 __ move(address->base(), ptr); 1578 } else { 1579 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1580 __ leal(addr, ptr); 1581 } 1582 addr = ptr; 1583 } 1584 assert(addr->is_register(), "must be a register at this point"); 1585 1586#ifdef ARM 1587 // TODO: ARM - move to platform-dependent code 1588 LIR_Opr tmp = FrameMap::R14_opr; 1589 if (VM_Version::supports_movw()) { 1590 __ move((LIR_Opr)card_table_base, tmp); 1591 } else { 1592 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); 1593 } 1594 1595 CardTableModRefBS* ct = (CardTableModRefBS*)_bs; 1596 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE); 1597 if(((int)ct->byte_map_base & 0xff) == 0) { 1598 __ move(tmp, card_addr); 1599 } else { 1600 LIR_Opr tmp_zero = new_register(T_INT); 1601 __ move(LIR_OprFact::intConst(0), tmp_zero); 1602 __ move(tmp_zero, card_addr); 1603 } 1604#else // ARM 1605 LIR_Opr tmp = new_pointer_register(); 1606 if (TwoOperandLIRForm) { 1607 __ move(addr, tmp); 1608 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); 1609 } else { 1610 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); 1611 } 1612 if (can_inline_as_constant(card_table_base)) { 1613 __ move(LIR_OprFact::intConst(0), 1614 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE)); 1615 } else { 1616 __ move(LIR_OprFact::intConst(0), 1617 new LIR_Address(tmp, load_constant(card_table_base), 1618 T_BYTE)); 1619 } 1620#endif // ARM 1621} 1622 1623 1624//------------------------field access-------------------------------------- 1625 1626// Comment copied form templateTable_i486.cpp 1627// ---------------------------------------------------------------------------- 1628// Volatile variables demand their effects be made known to all CPU's in 1629// order. Store buffers on most chips allow reads & writes to reorder; the 1630// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1631// memory barrier (i.e., it's not sufficient that the interpreter does not 1632// reorder volatile references, the hardware also must not reorder them). 1633// 1634// According to the new Java Memory Model (JMM): 1635// (1) All volatiles are serialized wrt to each other. 1636// ALSO reads & writes act as aquire & release, so: 1637// (2) A read cannot let unrelated NON-volatile memory refs that happen after 1638// the read float up to before the read. It's OK for non-volatile memory refs 1639// that happen before the volatile read to float down below it. 1640// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1641// that happen BEFORE the write float down to after the write. It's OK for 1642// non-volatile memory refs that happen after the volatile write to float up 1643// before it. 1644// 1645// We only put in barriers around volatile refs (they are expensive), not 1646// _between_ memory refs (that would require us to track the flavor of the 1647// previous memory refs). Requirements (2) and (3) require some barriers 1648// before volatile stores and after volatile loads. These nearly cover 1649// requirement (1) but miss the volatile-store-volatile-load case. This final 1650// case is placed after volatile-stores although it could just as well go 1651// before volatile-loads. 1652 1653 1654void LIRGenerator::do_StoreField(StoreField* x) { 1655 bool needs_patching = x->needs_patching(); 1656 bool is_volatile = x->field()->is_volatile(); 1657 BasicType field_type = x->field_type(); 1658 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT); 1659 1660 CodeEmitInfo* info = NULL; 1661 if (needs_patching) { 1662 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1663 info = state_for(x, x->state_before()); 1664 } else if (x->needs_null_check()) { 1665 NullCheck* nc = x->explicit_null_check(); 1666 if (nc == NULL) { 1667 info = state_for(x); 1668 } else { 1669 info = state_for(nc); 1670 } 1671 } 1672 1673 1674 LIRItem object(x->obj(), this); 1675 LIRItem value(x->value(), this); 1676 1677 object.load_item(); 1678 1679 if (is_volatile || needs_patching) { 1680 // load item if field is volatile (fewer special cases for volatiles) 1681 // load item if field not initialized 1682 // load item if field not constant 1683 // because of code patching we cannot inline constants 1684 if (field_type == T_BYTE || field_type == T_BOOLEAN) { 1685 value.load_byte_item(); 1686 } else { 1687 value.load_item(); 1688 } 1689 } else { 1690 value.load_for_store(field_type); 1691 } 1692 1693 set_no_result(x); 1694 1695#ifndef PRODUCT 1696 if (PrintNotLoaded && needs_patching) { 1697 tty->print_cr(" ###class not loaded at store_%s bci %d", 1698 x->is_static() ? "static" : "field", x->printable_bci()); 1699 } 1700#endif 1701 1702 if (x->needs_null_check() && 1703 (needs_patching || 1704 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1705 // emit an explicit null check because the offset is too large 1706 __ null_check(object.result(), new CodeEmitInfo(info)); 1707 } 1708 1709 LIR_Address* address; 1710 if (needs_patching) { 1711 // we need to patch the offset in the instruction so don't allow 1712 // generate_address to try to be smart about emitting the -1. 1713 // Otherwise the patching code won't know how to find the 1714 // instruction to patch. 1715 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type); 1716 } else { 1717 address = generate_address(object.result(), x->offset(), field_type); 1718 } 1719 1720 if (is_volatile && os::is_MP()) { 1721 __ membar_release(); 1722 } 1723 1724 if (is_oop) { 1725 // Do the pre-write barrier, if any. 1726 pre_barrier(LIR_OprFact::address(address), 1727 LIR_OprFact::illegalOpr /* pre_val */, 1728 true /* do_load*/, 1729 needs_patching, 1730 (info ? new CodeEmitInfo(info) : NULL)); 1731 } 1732 1733 if (is_volatile && !needs_patching) { 1734 volatile_field_store(value.result(), address, info); 1735 } else { 1736 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1737 __ store(value.result(), address, info, patch_code); 1738 } 1739 1740 if (is_oop) { 1741 // Store to object so mark the card of the header 1742 post_barrier(object.result(), value.result()); 1743 } 1744 1745 if (is_volatile && os::is_MP()) { 1746 __ membar(); 1747 } 1748} 1749 1750 1751void LIRGenerator::do_LoadField(LoadField* x) { 1752 bool needs_patching = x->needs_patching(); 1753 bool is_volatile = x->field()->is_volatile(); 1754 BasicType field_type = x->field_type(); 1755 1756 CodeEmitInfo* info = NULL; 1757 if (needs_patching) { 1758 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1759 info = state_for(x, x->state_before()); 1760 } else if (x->needs_null_check()) { 1761 NullCheck* nc = x->explicit_null_check(); 1762 if (nc == NULL) { 1763 info = state_for(x); 1764 } else { 1765 info = state_for(nc); 1766 } 1767 } 1768 1769 LIRItem object(x->obj(), this); 1770 1771 object.load_item(); 1772 1773#ifndef PRODUCT 1774 if (PrintNotLoaded && needs_patching) { 1775 tty->print_cr(" ###class not loaded at load_%s bci %d", 1776 x->is_static() ? "static" : "field", x->printable_bci()); 1777 } 1778#endif 1779 1780 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception(); 1781 if (x->needs_null_check() && 1782 (needs_patching || 1783 MacroAssembler::needs_explicit_null_check(x->offset()) || 1784 stress_deopt)) { 1785 LIR_Opr obj = object.result(); 1786 if (stress_deopt) { 1787 obj = new_register(T_OBJECT); 1788 __ move(LIR_OprFact::oopConst(NULL), obj); 1789 } 1790 // emit an explicit null check because the offset is too large 1791 __ null_check(obj, new CodeEmitInfo(info)); 1792 } 1793 1794 LIR_Opr reg = rlock_result(x, field_type); 1795 LIR_Address* address; 1796 if (needs_patching) { 1797 // we need to patch the offset in the instruction so don't allow 1798 // generate_address to try to be smart about emitting the -1. 1799 // Otherwise the patching code won't know how to find the 1800 // instruction to patch. 1801 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type); 1802 } else { 1803 address = generate_address(object.result(), x->offset(), field_type); 1804 } 1805 1806 if (is_volatile && !needs_patching) { 1807 volatile_field_load(address, reg, info); 1808 } else { 1809 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1810 __ load(address, reg, info, patch_code); 1811 } 1812 1813 if (is_volatile && os::is_MP()) { 1814 __ membar_acquire(); 1815 } 1816} 1817 1818 1819//------------------------java.nio.Buffer.checkIndex------------------------ 1820 1821// int java.nio.Buffer.checkIndex(int) 1822void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { 1823 // NOTE: by the time we are in checkIndex() we are guaranteed that 1824 // the buffer is non-null (because checkIndex is package-private and 1825 // only called from within other methods in the buffer). 1826 assert(x->number_of_arguments() == 2, "wrong type"); 1827 LIRItem buf (x->argument_at(0), this); 1828 LIRItem index(x->argument_at(1), this); 1829 buf.load_item(); 1830 index.load_item(); 1831 1832 LIR_Opr result = rlock_result(x); 1833 if (GenerateRangeChecks) { 1834 CodeEmitInfo* info = state_for(x); 1835 CodeStub* stub = new RangeCheckStub(info, index.result(), true); 1836 if (index.result()->is_constant()) { 1837 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); 1838 __ branch(lir_cond_belowEqual, T_INT, stub); 1839 } else { 1840 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), 1841 java_nio_Buffer::limit_offset(), T_INT, info); 1842 __ branch(lir_cond_aboveEqual, T_INT, stub); 1843 } 1844 __ move(index.result(), result); 1845 } else { 1846 // Just load the index into the result register 1847 __ move(index.result(), result); 1848 } 1849} 1850 1851 1852//------------------------array access-------------------------------------- 1853 1854 1855void LIRGenerator::do_ArrayLength(ArrayLength* x) { 1856 LIRItem array(x->array(), this); 1857 array.load_item(); 1858 LIR_Opr reg = rlock_result(x); 1859 1860 CodeEmitInfo* info = NULL; 1861 if (x->needs_null_check()) { 1862 NullCheck* nc = x->explicit_null_check(); 1863 if (nc == NULL) { 1864 info = state_for(x); 1865 } else { 1866 info = state_for(nc); 1867 } 1868 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) { 1869 LIR_Opr obj = new_register(T_OBJECT); 1870 __ move(LIR_OprFact::oopConst(NULL), obj); 1871 __ null_check(obj, new CodeEmitInfo(info)); 1872 } 1873 } 1874 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 1875} 1876 1877 1878void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 1879 bool use_length = x->length() != NULL; 1880 LIRItem array(x->array(), this); 1881 LIRItem index(x->index(), this); 1882 LIRItem length(this); 1883 bool needs_range_check = x->compute_needs_range_check(); 1884 1885 if (use_length && needs_range_check) { 1886 length.set_instruction(x->length()); 1887 length.load_item(); 1888 } 1889 1890 array.load_item(); 1891 if (index.is_constant() && can_inline_as_constant(x->index())) { 1892 // let it be a constant 1893 index.dont_load_item(); 1894 } else { 1895 index.load_item(); 1896 } 1897 1898 CodeEmitInfo* range_check_info = state_for(x); 1899 CodeEmitInfo* null_check_info = NULL; 1900 if (x->needs_null_check()) { 1901 NullCheck* nc = x->explicit_null_check(); 1902 if (nc != NULL) { 1903 null_check_info = state_for(nc); 1904 } else { 1905 null_check_info = range_check_info; 1906 } 1907 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) { 1908 LIR_Opr obj = new_register(T_OBJECT); 1909 __ move(LIR_OprFact::oopConst(NULL), obj); 1910 __ null_check(obj, new CodeEmitInfo(null_check_info)); 1911 } 1912 } 1913 1914 // emit array address setup early so it schedules better 1915 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); 1916 1917 if (GenerateRangeChecks && needs_range_check) { 1918 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) { 1919 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result())); 1920 } else if (use_length) { 1921 // TODO: use a (modified) version of array_range_check that does not require a 1922 // constant length to be loaded to a register 1923 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1924 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 1925 } else { 1926 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 1927 // The range check performs the null check, so clear it out for the load 1928 null_check_info = NULL; 1929 } 1930 } 1931 1932 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); 1933} 1934 1935 1936void LIRGenerator::do_NullCheck(NullCheck* x) { 1937 if (x->can_trap()) { 1938 LIRItem value(x->obj(), this); 1939 value.load_item(); 1940 CodeEmitInfo* info = state_for(x); 1941 __ null_check(value.result(), info); 1942 } 1943} 1944 1945 1946void LIRGenerator::do_TypeCast(TypeCast* x) { 1947 LIRItem value(x->obj(), this); 1948 value.load_item(); 1949 // the result is the same as from the node we are casting 1950 set_result(x, value.result()); 1951} 1952 1953 1954void LIRGenerator::do_Throw(Throw* x) { 1955 LIRItem exception(x->exception(), this); 1956 exception.load_item(); 1957 set_no_result(x); 1958 LIR_Opr exception_opr = exception.result(); 1959 CodeEmitInfo* info = state_for(x, x->state()); 1960 1961#ifndef PRODUCT 1962 if (PrintC1Statistics) { 1963 increment_counter(Runtime1::throw_count_address(), T_INT); 1964 } 1965#endif 1966 1967 // check if the instruction has an xhandler in any of the nested scopes 1968 bool unwind = false; 1969 if (info->exception_handlers()->length() == 0) { 1970 // this throw is not inside an xhandler 1971 unwind = true; 1972 } else { 1973 // get some idea of the throw type 1974 bool type_is_exact = true; 1975 ciType* throw_type = x->exception()->exact_type(); 1976 if (throw_type == NULL) { 1977 type_is_exact = false; 1978 throw_type = x->exception()->declared_type(); 1979 } 1980 if (throw_type != NULL && throw_type->is_instance_klass()) { 1981 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; 1982 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); 1983 } 1984 } 1985 1986 // do null check before moving exception oop into fixed register 1987 // to avoid a fixed interval with an oop during the null check. 1988 // Use a copy of the CodeEmitInfo because debug information is 1989 // different for null_check and throw. 1990 if (GenerateCompilerNullChecks && 1991 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { 1992 // if the exception object wasn't created using new then it might be null. 1993 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci()))); 1994 } 1995 1996 if (compilation()->env()->jvmti_can_post_on_exceptions()) { 1997 // we need to go through the exception lookup path to get JVMTI 1998 // notification done 1999 unwind = false; 2000 } 2001 2002 // move exception oop into fixed register 2003 __ move(exception_opr, exceptionOopOpr()); 2004 2005 if (unwind) { 2006 __ unwind_exception(exceptionOopOpr()); 2007 } else { 2008 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); 2009 } 2010} 2011 2012 2013void LIRGenerator::do_RoundFP(RoundFP* x) { 2014 LIRItem input(x->input(), this); 2015 input.load_item(); 2016 LIR_Opr input_opr = input.result(); 2017 assert(input_opr->is_register(), "why round if value is not in a register?"); 2018 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); 2019 if (input_opr->is_single_fpu()) { 2020 set_result(x, round_item(input_opr)); // This code path not currently taken 2021 } else { 2022 LIR_Opr result = new_register(T_DOUBLE); 2023 set_vreg_flag(result, must_start_in_memory); 2024 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result); 2025 set_result(x, result); 2026 } 2027} 2028 2029void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { 2030 LIRItem base(x->base(), this); 2031 LIRItem idx(this); 2032 2033 base.load_item(); 2034 if (x->has_index()) { 2035 idx.set_instruction(x->index()); 2036 idx.load_nonconstant(); 2037 } 2038 2039 LIR_Opr reg = rlock_result(x, x->basic_type()); 2040 2041 int log2_scale = 0; 2042 if (x->has_index()) { 2043 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 2044 log2_scale = x->log2_scale(); 2045 } 2046 2047 assert(!x->has_index() || idx.value() == x->index(), "should match"); 2048 2049 LIR_Opr base_op = base.result(); 2050#ifndef _LP64 2051 if (x->base()->type()->tag() == longTag) { 2052 base_op = new_register(T_INT); 2053 __ convert(Bytecodes::_l2i, base.result(), base_op); 2054 } else { 2055 assert(x->base()->type()->tag() == intTag, "must be"); 2056 } 2057#endif 2058 2059 BasicType dst_type = x->basic_type(); 2060 LIR_Opr index_op = idx.result(); 2061 2062 LIR_Address* addr; 2063 if (index_op->is_constant()) { 2064 assert(log2_scale == 0, "must not have a scale"); 2065 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); 2066 } else { 2067#ifdef X86 2068#ifdef _LP64 2069 if (!index_op->is_illegal() && index_op->type() == T_INT) { 2070 LIR_Opr tmp = new_pointer_register(); 2071 __ convert(Bytecodes::_i2l, index_op, tmp); 2072 index_op = tmp; 2073 } 2074#endif 2075 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); 2076#elif defined(ARM) 2077 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type); 2078#else 2079 if (index_op->is_illegal() || log2_scale == 0) { 2080#ifdef _LP64 2081 if (!index_op->is_illegal() && index_op->type() == T_INT) { 2082 LIR_Opr tmp = new_pointer_register(); 2083 __ convert(Bytecodes::_i2l, index_op, tmp); 2084 index_op = tmp; 2085 } 2086#endif 2087 addr = new LIR_Address(base_op, index_op, dst_type); 2088 } else { 2089 LIR_Opr tmp = new_pointer_register(); 2090 __ shift_left(index_op, log2_scale, tmp); 2091 addr = new LIR_Address(base_op, tmp, dst_type); 2092 } 2093#endif 2094 } 2095 2096 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 2097 __ unaligned_move(addr, reg); 2098 } else { 2099 if (dst_type == T_OBJECT && x->is_wide()) { 2100 __ move_wide(addr, reg); 2101 } else { 2102 __ move(addr, reg); 2103 } 2104 } 2105} 2106 2107 2108void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { 2109 int log2_scale = 0; 2110 BasicType type = x->basic_type(); 2111 2112 if (x->has_index()) { 2113 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 2114 log2_scale = x->log2_scale(); 2115 } 2116 2117 LIRItem base(x->base(), this); 2118 LIRItem value(x->value(), this); 2119 LIRItem idx(this); 2120 2121 base.load_item(); 2122 if (x->has_index()) { 2123 idx.set_instruction(x->index()); 2124 idx.load_item(); 2125 } 2126 2127 if (type == T_BYTE || type == T_BOOLEAN) { 2128 value.load_byte_item(); 2129 } else { 2130 value.load_item(); 2131 } 2132 2133 set_no_result(x); 2134 2135 LIR_Opr base_op = base.result(); 2136#ifndef _LP64 2137 if (x->base()->type()->tag() == longTag) { 2138 base_op = new_register(T_INT); 2139 __ convert(Bytecodes::_l2i, base.result(), base_op); 2140 } else { 2141 assert(x->base()->type()->tag() == intTag, "must be"); 2142 } 2143#endif 2144 2145 LIR_Opr index_op = idx.result(); 2146 if (log2_scale != 0) { 2147 // temporary fix (platform dependent code without shift on Intel would be better) 2148 index_op = new_pointer_register(); 2149#ifdef _LP64 2150 if(idx.result()->type() == T_INT) { 2151 __ convert(Bytecodes::_i2l, idx.result(), index_op); 2152 } else { 2153#endif 2154 // TODO: ARM also allows embedded shift in the address 2155 __ move(idx.result(), index_op); 2156#ifdef _LP64 2157 } 2158#endif 2159 __ shift_left(index_op, log2_scale, index_op); 2160 } 2161#ifdef _LP64 2162 else if(!index_op->is_illegal() && index_op->type() == T_INT) { 2163 LIR_Opr tmp = new_pointer_register(); 2164 __ convert(Bytecodes::_i2l, index_op, tmp); 2165 index_op = tmp; 2166 } 2167#endif 2168 2169 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); 2170 __ move(value.result(), addr); 2171} 2172 2173 2174void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { 2175 BasicType type = x->basic_type(); 2176 LIRItem src(x->object(), this); 2177 LIRItem off(x->offset(), this); 2178 2179 off.load_item(); 2180 src.load_item(); 2181 2182 LIR_Opr value = rlock_result(x, x->basic_type()); 2183 2184 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile()); 2185 2186#if INCLUDE_ALL_GCS 2187 // We might be reading the value of the referent field of a 2188 // Reference object in order to attach it back to the live 2189 // object graph. If G1 is enabled then we need to record 2190 // the value that is being returned in an SATB log buffer. 2191 // 2192 // We need to generate code similar to the following... 2193 // 2194 // if (offset == java_lang_ref_Reference::referent_offset) { 2195 // if (src != NULL) { 2196 // if (klass(src)->reference_type() != REF_NONE) { 2197 // pre_barrier(..., value, ...); 2198 // } 2199 // } 2200 // } 2201 2202 if (UseG1GC && type == T_OBJECT) { 2203 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier. 2204 bool gen_offset_check = true; // Assume we need to generate the offset guard. 2205 bool gen_source_check = true; // Assume we need to check the src object for null. 2206 bool gen_type_check = true; // Assume we need to check the reference_type. 2207 2208 if (off.is_constant()) { 2209 jlong off_con = (off.type()->is_int() ? 2210 (jlong) off.get_jint_constant() : 2211 off.get_jlong_constant()); 2212 2213 2214 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) { 2215 // The constant offset is something other than referent_offset. 2216 // We can skip generating/checking the remaining guards and 2217 // skip generation of the code stub. 2218 gen_pre_barrier = false; 2219 } else { 2220 // The constant offset is the same as referent_offset - 2221 // we do not need to generate a runtime offset check. 2222 gen_offset_check = false; 2223 } 2224 } 2225 2226 // We don't need to generate stub if the source object is an array 2227 if (gen_pre_barrier && src.type()->is_array()) { 2228 gen_pre_barrier = false; 2229 } 2230 2231 if (gen_pre_barrier) { 2232 // We still need to continue with the checks. 2233 if (src.is_constant()) { 2234 ciObject* src_con = src.get_jobject_constant(); 2235 2236 if (src_con->is_null_object()) { 2237 // The constant src object is null - We can skip 2238 // generating the code stub. 2239 gen_pre_barrier = false; 2240 } else { 2241 // Non-null constant source object. We still have to generate 2242 // the slow stub - but we don't need to generate the runtime 2243 // null object check. 2244 gen_source_check = false; 2245 } 2246 } 2247 } 2248 if (gen_pre_barrier && !PatchALot) { 2249 // Can the klass of object be statically determined to be 2250 // a sub-class of Reference? 2251 ciType* type = src.value()->declared_type(); 2252 if ((type != NULL) && type->is_loaded()) { 2253 if (type->is_subtype_of(compilation()->env()->Reference_klass())) { 2254 gen_type_check = false; 2255 } else if (type->is_klass() && 2256 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) { 2257 // Not Reference and not Object klass. 2258 gen_pre_barrier = false; 2259 } 2260 } 2261 } 2262 2263 if (gen_pre_barrier) { 2264 LabelObj* Lcont = new LabelObj(); 2265 2266 // We can have generate one runtime check here. Let's start with 2267 // the offset check. 2268 if (gen_offset_check) { 2269 // if (offset != referent_offset) -> continue 2270 // If offset is an int then we can do the comparison with the 2271 // referent_offset constant; otherwise we need to move 2272 // referent_offset into a temporary register and generate 2273 // a reg-reg compare. 2274 2275 LIR_Opr referent_off; 2276 2277 if (off.type()->is_int()) { 2278 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset); 2279 } else { 2280 assert(off.type()->is_long(), "what else?"); 2281 referent_off = new_register(T_LONG); 2282 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off); 2283 } 2284 __ cmp(lir_cond_notEqual, off.result(), referent_off); 2285 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label()); 2286 } 2287 if (gen_source_check) { 2288 // offset is a const and equals referent offset 2289 // if (source == null) -> continue 2290 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL)); 2291 __ branch(lir_cond_equal, T_OBJECT, Lcont->label()); 2292 } 2293 LIR_Opr src_klass = new_register(T_OBJECT); 2294 if (gen_type_check) { 2295 // We have determined that offset == referent_offset && src != null. 2296 // if (src->_klass->_reference_type == REF_NONE) -> continue 2297 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), UseCompressedKlassPointers ? T_OBJECT : T_ADDRESS), src_klass); 2298 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE); 2299 LIR_Opr reference_type = new_register(T_INT); 2300 __ move(reference_type_addr, reference_type); 2301 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE)); 2302 __ branch(lir_cond_equal, T_INT, Lcont->label()); 2303 } 2304 { 2305 // We have determined that src->_klass->_reference_type != REF_NONE 2306 // so register the value in the referent field with the pre-barrier. 2307 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */, 2308 value /* pre_val */, 2309 false /* do_load */, 2310 false /* patch */, 2311 NULL /* info */); 2312 } 2313 __ branch_destination(Lcont->label()); 2314 } 2315 } 2316#endif // INCLUDE_ALL_GCS 2317 2318 if (x->is_volatile() && os::is_MP()) __ membar_acquire(); 2319} 2320 2321 2322void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { 2323 BasicType type = x->basic_type(); 2324 LIRItem src(x->object(), this); 2325 LIRItem off(x->offset(), this); 2326 LIRItem data(x->value(), this); 2327 2328 src.load_item(); 2329 if (type == T_BOOLEAN || type == T_BYTE) { 2330 data.load_byte_item(); 2331 } else { 2332 data.load_item(); 2333 } 2334 off.load_item(); 2335 2336 set_no_result(x); 2337 2338 if (x->is_volatile() && os::is_MP()) __ membar_release(); 2339 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); 2340 if (x->is_volatile() && os::is_MP()) __ membar(); 2341} 2342 2343 2344void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) { 2345 LIRItem src(x->object(), this); 2346 LIRItem off(x->offset(), this); 2347 2348 src.load_item(); 2349 if (off.is_constant() && can_inline_as_constant(x->offset())) { 2350 // let it be a constant 2351 off.dont_load_item(); 2352 } else { 2353 off.load_item(); 2354 } 2355 2356 set_no_result(x); 2357 2358 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE); 2359 __ prefetch(addr, is_store); 2360} 2361 2362 2363void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) { 2364 do_UnsafePrefetch(x, false); 2365} 2366 2367 2368void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { 2369 do_UnsafePrefetch(x, true); 2370} 2371 2372 2373void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { 2374 int lng = x->length(); 2375 2376 for (int i = 0; i < lng; i++) { 2377 SwitchRange* one_range = x->at(i); 2378 int low_key = one_range->low_key(); 2379 int high_key = one_range->high_key(); 2380 BlockBegin* dest = one_range->sux(); 2381 if (low_key == high_key) { 2382 __ cmp(lir_cond_equal, value, low_key); 2383 __ branch(lir_cond_equal, T_INT, dest); 2384 } else if (high_key - low_key == 1) { 2385 __ cmp(lir_cond_equal, value, low_key); 2386 __ branch(lir_cond_equal, T_INT, dest); 2387 __ cmp(lir_cond_equal, value, high_key); 2388 __ branch(lir_cond_equal, T_INT, dest); 2389 } else { 2390 LabelObj* L = new LabelObj(); 2391 __ cmp(lir_cond_less, value, low_key); 2392 __ branch(lir_cond_less, T_INT, L->label()); 2393 __ cmp(lir_cond_lessEqual, value, high_key); 2394 __ branch(lir_cond_lessEqual, T_INT, dest); 2395 __ branch_destination(L->label()); 2396 } 2397 } 2398 __ jump(default_sux); 2399} 2400 2401 2402SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { 2403 SwitchRangeList* res = new SwitchRangeList(); 2404 int len = x->length(); 2405 if (len > 0) { 2406 BlockBegin* sux = x->sux_at(0); 2407 int key = x->lo_key(); 2408 BlockBegin* default_sux = x->default_sux(); 2409 SwitchRange* range = new SwitchRange(key, sux); 2410 for (int i = 0; i < len; i++, key++) { 2411 BlockBegin* new_sux = x->sux_at(i); 2412 if (sux == new_sux) { 2413 // still in same range 2414 range->set_high_key(key); 2415 } else { 2416 // skip tests which explicitly dispatch to the default 2417 if (sux != default_sux) { 2418 res->append(range); 2419 } 2420 range = new SwitchRange(key, new_sux); 2421 } 2422 sux = new_sux; 2423 } 2424 if (res->length() == 0 || res->last() != range) res->append(range); 2425 } 2426 return res; 2427} 2428 2429 2430// we expect the keys to be sorted by increasing value 2431SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { 2432 SwitchRangeList* res = new SwitchRangeList(); 2433 int len = x->length(); 2434 if (len > 0) { 2435 BlockBegin* default_sux = x->default_sux(); 2436 int key = x->key_at(0); 2437 BlockBegin* sux = x->sux_at(0); 2438 SwitchRange* range = new SwitchRange(key, sux); 2439 for (int i = 1; i < len; i++) { 2440 int new_key = x->key_at(i); 2441 BlockBegin* new_sux = x->sux_at(i); 2442 if (key+1 == new_key && sux == new_sux) { 2443 // still in same range 2444 range->set_high_key(new_key); 2445 } else { 2446 // skip tests which explicitly dispatch to the default 2447 if (range->sux() != default_sux) { 2448 res->append(range); 2449 } 2450 range = new SwitchRange(new_key, new_sux); 2451 } 2452 key = new_key; 2453 sux = new_sux; 2454 } 2455 if (res->length() == 0 || res->last() != range) res->append(range); 2456 } 2457 return res; 2458} 2459 2460 2461void LIRGenerator::do_TableSwitch(TableSwitch* x) { 2462 LIRItem tag(x->tag(), this); 2463 tag.load_item(); 2464 set_no_result(x); 2465 2466 if (x->is_safepoint()) { 2467 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2468 } 2469 2470 // move values into phi locations 2471 move_to_phi(x->state()); 2472 2473 int lo_key = x->lo_key(); 2474 int hi_key = x->hi_key(); 2475 int len = x->length(); 2476 LIR_Opr value = tag.result(); 2477 if (UseTableRanges) { 2478 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2479 } else { 2480 for (int i = 0; i < len; i++) { 2481 __ cmp(lir_cond_equal, value, i + lo_key); 2482 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2483 } 2484 __ jump(x->default_sux()); 2485 } 2486} 2487 2488 2489void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { 2490 LIRItem tag(x->tag(), this); 2491 tag.load_item(); 2492 set_no_result(x); 2493 2494 if (x->is_safepoint()) { 2495 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2496 } 2497 2498 // move values into phi locations 2499 move_to_phi(x->state()); 2500 2501 LIR_Opr value = tag.result(); 2502 if (UseTableRanges) { 2503 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2504 } else { 2505 int len = x->length(); 2506 for (int i = 0; i < len; i++) { 2507 __ cmp(lir_cond_equal, value, x->key_at(i)); 2508 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2509 } 2510 __ jump(x->default_sux()); 2511 } 2512} 2513 2514 2515void LIRGenerator::do_Goto(Goto* x) { 2516 set_no_result(x); 2517 2518 if (block()->next()->as_OsrEntry()) { 2519 // need to free up storage used for OSR entry point 2520 LIR_Opr osrBuffer = block()->next()->operand(); 2521 BasicTypeList signature; 2522 signature.append(T_INT); 2523 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 2524 __ move(osrBuffer, cc->args()->at(0)); 2525 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 2526 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); 2527 } 2528 2529 if (x->is_safepoint()) { 2530 ValueStack* state = x->state_before() ? x->state_before() : x->state(); 2531 2532 // increment backedge counter if needed 2533 CodeEmitInfo* info = state_for(x, state); 2534 increment_backedge_counter(info, x->profiled_bci()); 2535 CodeEmitInfo* safepoint_info = state_for(x, state); 2536 __ safepoint(safepoint_poll_register(), safepoint_info); 2537 } 2538 2539 // Gotos can be folded Ifs, handle this case. 2540 if (x->should_profile()) { 2541 ciMethod* method = x->profiled_method(); 2542 assert(method != NULL, "method should be set if branch is profiled"); 2543 ciMethodData* md = method->method_data_or_null(); 2544 assert(md != NULL, "Sanity"); 2545 ciProfileData* data = md->bci_to_data(x->profiled_bci()); 2546 assert(data != NULL, "must have profiling data"); 2547 int offset; 2548 if (x->direction() == Goto::taken) { 2549 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2550 offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 2551 } else if (x->direction() == Goto::not_taken) { 2552 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2553 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 2554 } else { 2555 assert(data->is_JumpData(), "need JumpData for branches"); 2556 offset = md->byte_offset_of_slot(data, JumpData::taken_offset()); 2557 } 2558 LIR_Opr md_reg = new_register(T_METADATA); 2559 __ metadata2reg(md->constant_encoding(), md_reg); 2560 2561 increment_counter(new LIR_Address(md_reg, offset, 2562 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment); 2563 } 2564 2565 // emit phi-instruction move after safepoint since this simplifies 2566 // describing the state as the safepoint. 2567 move_to_phi(x->state()); 2568 2569 __ jump(x->default_sux()); 2570} 2571 2572 2573void LIRGenerator::do_Base(Base* x) { 2574 __ std_entry(LIR_OprFact::illegalOpr); 2575 // Emit moves from physical registers / stack slots to virtual registers 2576 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 2577 IRScope* irScope = compilation()->hir()->top_scope(); 2578 int java_index = 0; 2579 for (int i = 0; i < args->length(); i++) { 2580 LIR_Opr src = args->at(i); 2581 assert(!src->is_illegal(), "check"); 2582 BasicType t = src->type(); 2583 2584 // Types which are smaller than int are passed as int, so 2585 // correct the type which passed. 2586 switch (t) { 2587 case T_BYTE: 2588 case T_BOOLEAN: 2589 case T_SHORT: 2590 case T_CHAR: 2591 t = T_INT; 2592 break; 2593 } 2594 2595 LIR_Opr dest = new_register(t); 2596 __ move(src, dest); 2597 2598 // Assign new location to Local instruction for this local 2599 Local* local = x->state()->local_at(java_index)->as_Local(); 2600 assert(local != NULL, "Locals for incoming arguments must have been created"); 2601#ifndef __SOFTFP__ 2602 // The java calling convention passes double as long and float as int. 2603 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 2604#endif // __SOFTFP__ 2605 local->set_operand(dest); 2606 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 2607 java_index += type2size[t]; 2608 } 2609 2610 if (compilation()->env()->dtrace_method_probes()) { 2611 BasicTypeList signature; 2612 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 2613 signature.append(T_OBJECT); // Method* 2614 LIR_OprList* args = new LIR_OprList(); 2615 args->append(getThreadPointer()); 2616 LIR_Opr meth = new_register(T_METADATA); 2617 __ metadata2reg(method()->constant_encoding(), meth); 2618 args->append(meth); 2619 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); 2620 } 2621 2622 if (method()->is_synchronized()) { 2623 LIR_Opr obj; 2624 if (method()->is_static()) { 2625 obj = new_register(T_OBJECT); 2626 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj); 2627 } else { 2628 Local* receiver = x->state()->local_at(0)->as_Local(); 2629 assert(receiver != NULL, "must already exist"); 2630 obj = receiver->operand(); 2631 } 2632 assert(obj->is_valid(), "must be valid"); 2633 2634 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2635 LIR_Opr lock = new_register(T_INT); 2636 __ load_stack_address_monitor(0, lock); 2637 2638 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); 2639 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2640 2641 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2642 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2643 } 2644 } 2645 2646 // increment invocation counters if needed 2647 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 2648 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); 2649 increment_invocation_counter(info); 2650 } 2651 2652 // all blocks with a successor must end with an unconditional jump 2653 // to the successor even if they are consecutive 2654 __ jump(x->default_sux()); 2655} 2656 2657 2658void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2659 // construct our frame and model the production of incoming pointer 2660 // to the OSR buffer. 2661 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2662 LIR_Opr result = rlock_result(x); 2663 __ move(LIR_Assembler::osrBufferPointer(), result); 2664} 2665 2666 2667void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2668 assert(args->length() == arg_list->length(), 2669 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length())); 2670 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) { 2671 LIRItem* param = args->at(i); 2672 LIR_Opr loc = arg_list->at(i); 2673 if (loc->is_register()) { 2674 param->load_item_force(loc); 2675 } else { 2676 LIR_Address* addr = loc->as_address_ptr(); 2677 param->load_for_store(addr->type()); 2678 if (addr->type() == T_OBJECT) { 2679 __ move_wide(param->result(), addr); 2680 } else 2681 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2682 __ unaligned_move(param->result(), addr); 2683 } else { 2684 __ move(param->result(), addr); 2685 } 2686 } 2687 } 2688 2689 if (x->has_receiver()) { 2690 LIRItem* receiver = args->at(0); 2691 LIR_Opr loc = arg_list->at(0); 2692 if (loc->is_register()) { 2693 receiver->load_item_force(loc); 2694 } else { 2695 assert(loc->is_address(), "just checking"); 2696 receiver->load_for_store(T_OBJECT); 2697 __ move_wide(receiver->result(), loc->as_address_ptr()); 2698 } 2699 } 2700} 2701 2702 2703// Visits all arguments, returns appropriate items without loading them 2704LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { 2705 LIRItemList* argument_items = new LIRItemList(); 2706 if (x->has_receiver()) { 2707 LIRItem* receiver = new LIRItem(x->receiver(), this); 2708 argument_items->append(receiver); 2709 } 2710 for (int i = 0; i < x->number_of_arguments(); i++) { 2711 LIRItem* param = new LIRItem(x->argument_at(i), this); 2712 argument_items->append(param); 2713 } 2714 return argument_items; 2715} 2716 2717 2718// The invoke with receiver has following phases: 2719// a) traverse and load/lock receiver; 2720// b) traverse all arguments -> item-array (invoke_visit_argument) 2721// c) push receiver on stack 2722// d) load each of the items and push on stack 2723// e) unlock receiver 2724// f) move receiver into receiver-register %o0 2725// g) lock result registers and emit call operation 2726// 2727// Before issuing a call, we must spill-save all values on stack 2728// that are in caller-save register. "spill-save" moves thos registers 2729// either in a free callee-save register or spills them if no free 2730// callee save register is available. 2731// 2732// The problem is where to invoke spill-save. 2733// - if invoked between e) and f), we may lock callee save 2734// register in "spill-save" that destroys the receiver register 2735// before f) is executed 2736// - if we rearange the f) to be earlier, by loading %o0, it 2737// may destroy a value on the stack that is currently in %o0 2738// and is waiting to be spilled 2739// - if we keep the receiver locked while doing spill-save, 2740// we cannot spill it as it is spill-locked 2741// 2742void LIRGenerator::do_Invoke(Invoke* x) { 2743 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); 2744 2745 LIR_OprList* arg_list = cc->args(); 2746 LIRItemList* args = invoke_visit_arguments(x); 2747 LIR_Opr receiver = LIR_OprFact::illegalOpr; 2748 2749 // setup result register 2750 LIR_Opr result_register = LIR_OprFact::illegalOpr; 2751 if (x->type() != voidType) { 2752 result_register = result_register_for(x->type()); 2753 } 2754 2755 CodeEmitInfo* info = state_for(x, x->state()); 2756 2757 invoke_load_arguments(x, args, arg_list); 2758 2759 if (x->has_receiver()) { 2760 args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); 2761 receiver = args->at(0)->result(); 2762 } 2763 2764 // emit invoke code 2765 bool optimized = x->target_is_loaded() && x->target_is_final(); 2766 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 2767 2768 // JSR 292 2769 // Preserve the SP over MethodHandle call sites. 2770 ciMethod* target = x->target(); 2771 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant? 2772 target->is_method_handle_intrinsic() || 2773 target->is_compiled_lambda_form()); 2774 if (is_method_handle_invoke) { 2775 info->set_is_method_handle_invoke(true); 2776 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); 2777 } 2778 2779 switch (x->code()) { 2780 case Bytecodes::_invokestatic: 2781 __ call_static(target, result_register, 2782 SharedRuntime::get_resolve_static_call_stub(), 2783 arg_list, info); 2784 break; 2785 case Bytecodes::_invokespecial: 2786 case Bytecodes::_invokevirtual: 2787 case Bytecodes::_invokeinterface: 2788 // for final target we still produce an inline cache, in order 2789 // to be able to call mixed mode 2790 if (x->code() == Bytecodes::_invokespecial || optimized) { 2791 __ call_opt_virtual(target, receiver, result_register, 2792 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2793 arg_list, info); 2794 } else if (x->vtable_index() < 0) { 2795 __ call_icvirtual(target, receiver, result_register, 2796 SharedRuntime::get_resolve_virtual_call_stub(), 2797 arg_list, info); 2798 } else { 2799 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); 2800 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); 2801 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info); 2802 } 2803 break; 2804 case Bytecodes::_invokedynamic: { 2805 __ call_dynamic(target, receiver, result_register, 2806 SharedRuntime::get_resolve_static_call_stub(), 2807 arg_list, info); 2808 break; 2809 } 2810 default: 2811 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code()))); 2812 break; 2813 } 2814 2815 // JSR 292 2816 // Restore the SP after MethodHandle call sites. 2817 if (is_method_handle_invoke) { 2818 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); 2819 } 2820 2821 if (x->type()->is_float() || x->type()->is_double()) { 2822 // Force rounding of results from non-strictfp when in strictfp 2823 // scope (or when we don't know the strictness of the callee, to 2824 // be safe.) 2825 if (method()->is_strict()) { 2826 if (!x->target_is_loaded() || !x->target_is_strictfp()) { 2827 result_register = round_item(result_register); 2828 } 2829 } 2830 } 2831 2832 if (result_register->is_valid()) { 2833 LIR_Opr result = rlock_result(x); 2834 __ move(result_register, result); 2835 } 2836} 2837 2838 2839void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { 2840 assert(x->number_of_arguments() == 1, "wrong type"); 2841 LIRItem value (x->argument_at(0), this); 2842 LIR_Opr reg = rlock_result(x); 2843 value.load_item(); 2844 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); 2845 __ move(tmp, reg); 2846} 2847 2848 2849 2850// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() 2851void LIRGenerator::do_IfOp(IfOp* x) { 2852#ifdef ASSERT 2853 { 2854 ValueTag xtag = x->x()->type()->tag(); 2855 ValueTag ttag = x->tval()->type()->tag(); 2856 assert(xtag == intTag || xtag == objectTag, "cannot handle others"); 2857 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); 2858 assert(ttag == x->fval()->type()->tag(), "cannot handle others"); 2859 } 2860#endif 2861 2862 LIRItem left(x->x(), this); 2863 LIRItem right(x->y(), this); 2864 left.load_item(); 2865 if (can_inline_as_constant(right.value())) { 2866 right.dont_load_item(); 2867 } else { 2868 right.load_item(); 2869 } 2870 2871 LIRItem t_val(x->tval(), this); 2872 LIRItem f_val(x->fval(), this); 2873 t_val.dont_load_item(); 2874 f_val.dont_load_item(); 2875 LIR_Opr reg = rlock_result(x); 2876 2877 __ cmp(lir_cond(x->cond()), left.result(), right.result()); 2878 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); 2879} 2880 2881void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) { 2882 assert(x->number_of_arguments() == expected_arguments, "wrong type"); 2883 LIR_Opr reg = result_register_for(x->type()); 2884 __ call_runtime_leaf(routine, getThreadTemp(), 2885 reg, new LIR_OprList()); 2886 LIR_Opr result = rlock_result(x); 2887 __ move(reg, result); 2888} 2889 2890#ifdef TRACE_HAVE_INTRINSICS 2891void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) { 2892 LIR_Opr thread = getThreadPointer(); 2893 LIR_Opr osthread = new_pointer_register(); 2894 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread); 2895 size_t thread_id_size = OSThread::thread_id_size(); 2896 if (thread_id_size == (size_t) BytesPerLong) { 2897 LIR_Opr id = new_register(T_LONG); 2898 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id); 2899 __ convert(Bytecodes::_l2i, id, rlock_result(x)); 2900 } else if (thread_id_size == (size_t) BytesPerInt) { 2901 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x)); 2902 } else { 2903 ShouldNotReachHere(); 2904 } 2905} 2906 2907void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) { 2908 CodeEmitInfo* info = state_for(x); 2909 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check 2910 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG); 2911 assert(info != NULL, "must have info"); 2912 LIRItem arg(x->argument_at(1), this); 2913 arg.load_item(); 2914 LIR_Opr klass = new_pointer_register(); 2915 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info); 2916 LIR_Opr id = new_register(T_LONG); 2917 ByteSize offset = TRACE_ID_OFFSET; 2918 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG); 2919 __ move(trace_id_addr, id); 2920 __ logical_or(id, LIR_OprFact::longConst(0x01l), id); 2921 __ store(id, trace_id_addr); 2922 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id); 2923 __ move(id, rlock_result(x)); 2924} 2925#endif 2926 2927void LIRGenerator::do_Intrinsic(Intrinsic* x) { 2928 switch (x->id()) { 2929 case vmIntrinsics::_intBitsToFloat : 2930 case vmIntrinsics::_doubleToRawLongBits : 2931 case vmIntrinsics::_longBitsToDouble : 2932 case vmIntrinsics::_floatToRawIntBits : { 2933 do_FPIntrinsics(x); 2934 break; 2935 } 2936 2937#ifdef TRACE_HAVE_INTRINSICS 2938 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break; 2939 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break; 2940 case vmIntrinsics::_counterTime: 2941 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x); 2942 break; 2943#endif 2944 2945 case vmIntrinsics::_currentTimeMillis: 2946 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x); 2947 break; 2948 2949 case vmIntrinsics::_nanoTime: 2950 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x); 2951 break; 2952 2953 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; 2954 case vmIntrinsics::_isInstance: do_isInstance(x); break; 2955 case vmIntrinsics::_getClass: do_getClass(x); break; 2956 case vmIntrinsics::_currentThread: do_currentThread(x); break; 2957 2958 case vmIntrinsics::_dlog: // fall through 2959 case vmIntrinsics::_dlog10: // fall through 2960 case vmIntrinsics::_dabs: // fall through 2961 case vmIntrinsics::_dsqrt: // fall through 2962 case vmIntrinsics::_dtan: // fall through 2963 case vmIntrinsics::_dsin : // fall through 2964 case vmIntrinsics::_dcos : // fall through 2965 case vmIntrinsics::_dexp : // fall through 2966 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break; 2967 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; 2968 2969 // java.nio.Buffer.checkIndex 2970 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break; 2971 2972 case vmIntrinsics::_compareAndSwapObject: 2973 do_CompareAndSwap(x, objectType); 2974 break; 2975 case vmIntrinsics::_compareAndSwapInt: 2976 do_CompareAndSwap(x, intType); 2977 break; 2978 case vmIntrinsics::_compareAndSwapLong: 2979 do_CompareAndSwap(x, longType); 2980 break; 2981 2982 case vmIntrinsics::_loadFence : 2983 if (os::is_MP()) __ membar_acquire(); 2984 break; 2985 case vmIntrinsics::_storeFence: 2986 if (os::is_MP()) __ membar_release(); 2987 break; 2988 case vmIntrinsics::_fullFence : 2989 if (os::is_MP()) __ membar(); 2990 break; 2991 2992 case vmIntrinsics::_Reference_get: 2993 do_Reference_get(x); 2994 break; 2995 2996 default: ShouldNotReachHere(); break; 2997 } 2998} 2999 3000void LIRGenerator::do_ProfileCall(ProfileCall* x) { 3001 // Need recv in a temporary register so it interferes with the other temporaries 3002 LIR_Opr recv = LIR_OprFact::illegalOpr; 3003 LIR_Opr mdo = new_register(T_OBJECT); 3004 // tmp is used to hold the counters on SPARC 3005 LIR_Opr tmp = new_pointer_register(); 3006 if (x->recv() != NULL) { 3007 LIRItem value(x->recv(), this); 3008 value.load_item(); 3009 recv = new_register(T_OBJECT); 3010 __ move(value.result(), recv); 3011 } 3012 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder()); 3013} 3014 3015void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { 3016 // We can safely ignore accessors here, since c2 will inline them anyway, 3017 // accessors are also always mature. 3018 if (!x->inlinee()->is_accessor()) { 3019 CodeEmitInfo* info = state_for(x, x->state(), true); 3020 // Notify the runtime very infrequently only to take care of counter overflows 3021 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true); 3022 } 3023} 3024 3025void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) { 3026 int freq_log; 3027 int level = compilation()->env()->comp_level(); 3028 if (level == CompLevel_limited_profile) { 3029 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); 3030 } else if (level == CompLevel_full_profile) { 3031 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); 3032 } else { 3033 ShouldNotReachHere(); 3034 } 3035 // Increment the appropriate invocation/backedge counter and notify the runtime. 3036 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); 3037} 3038 3039void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, 3040 ciMethod *method, int frequency, 3041 int bci, bool backedge, bool notify) { 3042 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); 3043 int level = _compilation->env()->comp_level(); 3044 assert(level > CompLevel_simple, "Shouldn't be here"); 3045 3046 int offset = -1; 3047 LIR_Opr counter_holder = new_register(T_METADATA); 3048 LIR_Opr meth; 3049 if (level == CompLevel_limited_profile) { 3050 offset = in_bytes(backedge ? Method::backedge_counter_offset() : 3051 Method::invocation_counter_offset()); 3052 __ metadata2reg(method->constant_encoding(), counter_holder); 3053 meth = counter_holder; 3054 } else if (level == CompLevel_full_profile) { 3055 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() : 3056 MethodData::invocation_counter_offset()); 3057 ciMethodData* md = method->method_data_or_null(); 3058 assert(md != NULL, "Sanity"); 3059 __ metadata2reg(md->constant_encoding(), counter_holder); 3060 meth = new_register(T_METADATA); 3061 __ metadata2reg(method->constant_encoding(), meth); 3062 } else { 3063 ShouldNotReachHere(); 3064 } 3065 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT); 3066 LIR_Opr result = new_register(T_INT); 3067 __ load(counter, result); 3068 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result); 3069 __ store(result, counter); 3070 if (notify) { 3071 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); 3072 __ logical_and(result, mask, result); 3073 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); 3074 // The bci for info can point to cmp for if's we want the if bci 3075 CodeStub* overflow = new CounterOverflowStub(info, bci, meth); 3076 __ branch(lir_cond_equal, T_INT, overflow); 3077 __ branch_destination(overflow->continuation()); 3078 } 3079} 3080 3081void LIRGenerator::do_RuntimeCall(RuntimeCall* x) { 3082 LIR_OprList* args = new LIR_OprList(x->number_of_arguments()); 3083 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments()); 3084 3085 if (x->pass_thread()) { 3086 signature->append(T_ADDRESS); 3087 args->append(getThreadPointer()); 3088 } 3089 3090 for (int i = 0; i < x->number_of_arguments(); i++) { 3091 Value a = x->argument_at(i); 3092 LIRItem* item = new LIRItem(a, this); 3093 item->load_item(); 3094 args->append(item->result()); 3095 signature->append(as_BasicType(a->type())); 3096 } 3097 3098 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL); 3099 if (x->type() == voidType) { 3100 set_no_result(x); 3101 } else { 3102 __ move(result, rlock_result(x)); 3103 } 3104} 3105 3106void LIRGenerator::do_Assert(Assert *x) { 3107#ifdef ASSERT 3108 ValueTag tag = x->x()->type()->tag(); 3109 If::Condition cond = x->cond(); 3110 3111 LIRItem xitem(x->x(), this); 3112 LIRItem yitem(x->y(), this); 3113 LIRItem* xin = &xitem; 3114 LIRItem* yin = &yitem; 3115 3116 assert(tag == intTag, "Only integer assertions are valid!"); 3117 3118 xin->load_item(); 3119 yin->dont_load_item(); 3120 3121 set_no_result(x); 3122 3123 LIR_Opr left = xin->result(); 3124 LIR_Opr right = yin->result(); 3125 3126 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true); 3127#endif 3128} 3129 3130 3131void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) { 3132 3133 3134 Instruction *a = x->x(); 3135 Instruction *b = x->y(); 3136 if (!a || StressRangeCheckElimination) { 3137 assert(!b || StressRangeCheckElimination, "B must also be null"); 3138 3139 CodeEmitInfo *info = state_for(x, x->state()); 3140 CodeStub* stub = new PredicateFailedStub(info); 3141 3142 __ jump(stub); 3143 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) { 3144 int a_int = a->type()->as_IntConstant()->value(); 3145 int b_int = b->type()->as_IntConstant()->value(); 3146 3147 bool ok = false; 3148 3149 switch(x->cond()) { 3150 case Instruction::eql: ok = (a_int == b_int); break; 3151 case Instruction::neq: ok = (a_int != b_int); break; 3152 case Instruction::lss: ok = (a_int < b_int); break; 3153 case Instruction::leq: ok = (a_int <= b_int); break; 3154 case Instruction::gtr: ok = (a_int > b_int); break; 3155 case Instruction::geq: ok = (a_int >= b_int); break; 3156 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break; 3157 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break; 3158 default: ShouldNotReachHere(); 3159 } 3160 3161 if (ok) { 3162 3163 CodeEmitInfo *info = state_for(x, x->state()); 3164 CodeStub* stub = new PredicateFailedStub(info); 3165 3166 __ jump(stub); 3167 } 3168 } else { 3169 3170 ValueTag tag = x->x()->type()->tag(); 3171 If::Condition cond = x->cond(); 3172 LIRItem xitem(x->x(), this); 3173 LIRItem yitem(x->y(), this); 3174 LIRItem* xin = &xitem; 3175 LIRItem* yin = &yitem; 3176 3177 assert(tag == intTag, "Only integer deoptimizations are valid!"); 3178 3179 xin->load_item(); 3180 yin->dont_load_item(); 3181 set_no_result(x); 3182 3183 LIR_Opr left = xin->result(); 3184 LIR_Opr right = yin->result(); 3185 3186 CodeEmitInfo *info = state_for(x, x->state()); 3187 CodeStub* stub = new PredicateFailedStub(info); 3188 3189 __ cmp(lir_cond(cond), left, right); 3190 __ branch(lir_cond(cond), right->type(), stub); 3191 } 3192} 3193 3194 3195LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 3196 LIRItemList args(1); 3197 LIRItem value(arg1, this); 3198 args.append(&value); 3199 BasicTypeList signature; 3200 signature.append(as_BasicType(arg1->type())); 3201 3202 return call_runtime(&signature, &args, entry, result_type, info); 3203} 3204 3205 3206LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { 3207 LIRItemList args(2); 3208 LIRItem value1(arg1, this); 3209 LIRItem value2(arg2, this); 3210 args.append(&value1); 3211 args.append(&value2); 3212 BasicTypeList signature; 3213 signature.append(as_BasicType(arg1->type())); 3214 signature.append(as_BasicType(arg2->type())); 3215 3216 return call_runtime(&signature, &args, entry, result_type, info); 3217} 3218 3219 3220LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, 3221 address entry, ValueType* result_type, CodeEmitInfo* info) { 3222 // get a result register 3223 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 3224 LIR_Opr result = LIR_OprFact::illegalOpr; 3225 if (result_type->tag() != voidTag) { 3226 result = new_register(result_type); 3227 phys_reg = result_register_for(result_type); 3228 } 3229 3230 // move the arguments into the correct location 3231 CallingConvention* cc = frame_map()->c_calling_convention(signature); 3232 assert(cc->length() == args->length(), "argument mismatch"); 3233 for (int i = 0; i < args->length(); i++) { 3234 LIR_Opr arg = args->at(i); 3235 LIR_Opr loc = cc->at(i); 3236 if (loc->is_register()) { 3237 __ move(arg, loc); 3238 } else { 3239 LIR_Address* addr = loc->as_address_ptr(); 3240// if (!can_store_as_constant(arg)) { 3241// LIR_Opr tmp = new_register(arg->type()); 3242// __ move(arg, tmp); 3243// arg = tmp; 3244// } 3245 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 3246 __ unaligned_move(arg, addr); 3247 } else { 3248 __ move(arg, addr); 3249 } 3250 } 3251 } 3252 3253 if (info) { 3254 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 3255 } else { 3256 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 3257 } 3258 if (result->is_valid()) { 3259 __ move(phys_reg, result); 3260 } 3261 return result; 3262} 3263 3264 3265LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, 3266 address entry, ValueType* result_type, CodeEmitInfo* info) { 3267 // get a result register 3268 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 3269 LIR_Opr result = LIR_OprFact::illegalOpr; 3270 if (result_type->tag() != voidTag) { 3271 result = new_register(result_type); 3272 phys_reg = result_register_for(result_type); 3273 } 3274 3275 // move the arguments into the correct location 3276 CallingConvention* cc = frame_map()->c_calling_convention(signature); 3277 3278 assert(cc->length() == args->length(), "argument mismatch"); 3279 for (int i = 0; i < args->length(); i++) { 3280 LIRItem* arg = args->at(i); 3281 LIR_Opr loc = cc->at(i); 3282 if (loc->is_register()) { 3283 arg->load_item_force(loc); 3284 } else { 3285 LIR_Address* addr = loc->as_address_ptr(); 3286 arg->load_for_store(addr->type()); 3287 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 3288 __ unaligned_move(arg->result(), addr); 3289 } else { 3290 __ move(arg->result(), addr); 3291 } 3292 } 3293 } 3294 3295 if (info) { 3296 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 3297 } else { 3298 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 3299 } 3300 if (result->is_valid()) { 3301 __ move(phys_reg, result); 3302 } 3303 return result; 3304} 3305 3306void LIRGenerator::do_MemBar(MemBar* x) { 3307 if (os::is_MP()) { 3308 LIR_Code code = x->code(); 3309 switch(code) { 3310 case lir_membar_acquire : __ membar_acquire(); break; 3311 case lir_membar_release : __ membar_release(); break; 3312 case lir_membar : __ membar(); break; 3313 case lir_membar_loadload : __ membar_loadload(); break; 3314 case lir_membar_storestore: __ membar_storestore(); break; 3315 case lir_membar_loadstore : __ membar_loadstore(); break; 3316 case lir_membar_storeload : __ membar_storeload(); break; 3317 default : ShouldNotReachHere(); break; 3318 } 3319 } 3320} 3321