c1_LIRGenerator.cpp revision 2311:d86923d96dca
1/* 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "c1/c1_Compilation.hpp" 27#include "c1/c1_FrameMap.hpp" 28#include "c1/c1_Instruction.hpp" 29#include "c1/c1_LIRAssembler.hpp" 30#include "c1/c1_LIRGenerator.hpp" 31#include "c1/c1_ValueStack.hpp" 32#include "ci/ciArrayKlass.hpp" 33#include "ci/ciCPCache.hpp" 34#include "ci/ciInstance.hpp" 35#include "runtime/sharedRuntime.hpp" 36#include "runtime/stubRoutines.hpp" 37#include "utilities/bitMap.inline.hpp" 38#ifndef SERIALGC 39#include "gc_implementation/g1/heapRegion.hpp" 40#endif 41 42#ifdef ASSERT 43#define __ gen()->lir(__FILE__, __LINE__)-> 44#else 45#define __ gen()->lir()-> 46#endif 47 48// TODO: ARM - Use some recognizable constant which still fits architectural constraints 49#ifdef ARM 50#define PATCHED_ADDR (204) 51#else 52#define PATCHED_ADDR (max_jint) 53#endif 54 55void PhiResolverState::reset(int max_vregs) { 56 // Initialize array sizes 57 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL); 58 _virtual_operands.trunc_to(0); 59 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL); 60 _other_operands.trunc_to(0); 61 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL); 62 _vreg_table.trunc_to(0); 63} 64 65 66 67//-------------------------------------------------------------- 68// PhiResolver 69 70// Resolves cycles: 71// 72// r1 := r2 becomes temp := r1 73// r2 := r1 r1 := r2 74// r2 := temp 75// and orders moves: 76// 77// r2 := r3 becomes r1 := r2 78// r1 := r2 r2 := r3 79 80PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs) 81 : _gen(gen) 82 , _state(gen->resolver_state()) 83 , _temp(LIR_OprFact::illegalOpr) 84{ 85 // reinitialize the shared state arrays 86 _state.reset(max_vregs); 87} 88 89 90void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) { 91 assert(src->is_valid(), ""); 92 assert(dest->is_valid(), ""); 93 __ move(src, dest); 94} 95 96 97void PhiResolver::move_temp_to(LIR_Opr dest) { 98 assert(_temp->is_valid(), ""); 99 emit_move(_temp, dest); 100 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr); 101} 102 103 104void PhiResolver::move_to_temp(LIR_Opr src) { 105 assert(_temp->is_illegal(), ""); 106 _temp = _gen->new_register(src->type()); 107 emit_move(src, _temp); 108} 109 110 111// Traverse assignment graph in depth first order and generate moves in post order 112// ie. two assignments: b := c, a := b start with node c: 113// Call graph: move(NULL, c) -> move(c, b) -> move(b, a) 114// Generates moves in this order: move b to a and move c to b 115// ie. cycle a := b, b := a start with node a 116// Call graph: move(NULL, a) -> move(a, b) -> move(b, a) 117// Generates moves in this order: move b to temp, move a to b, move temp to a 118void PhiResolver::move(ResolveNode* src, ResolveNode* dest) { 119 if (!dest->visited()) { 120 dest->set_visited(); 121 for (int i = dest->no_of_destinations()-1; i >= 0; i --) { 122 move(dest, dest->destination_at(i)); 123 } 124 } else if (!dest->start_node()) { 125 // cylce in graph detected 126 assert(_loop == NULL, "only one loop valid!"); 127 _loop = dest; 128 move_to_temp(src->operand()); 129 return; 130 } // else dest is a start node 131 132 if (!dest->assigned()) { 133 if (_loop == dest) { 134 move_temp_to(dest->operand()); 135 dest->set_assigned(); 136 } else if (src != NULL) { 137 emit_move(src->operand(), dest->operand()); 138 dest->set_assigned(); 139 } 140 } 141} 142 143 144PhiResolver::~PhiResolver() { 145 int i; 146 // resolve any cycles in moves from and to virtual registers 147 for (i = virtual_operands().length() - 1; i >= 0; i --) { 148 ResolveNode* node = virtual_operands()[i]; 149 if (!node->visited()) { 150 _loop = NULL; 151 move(NULL, node); 152 node->set_start_node(); 153 assert(_temp->is_illegal(), "move_temp_to() call missing"); 154 } 155 } 156 157 // generate move for move from non virtual register to abitrary destination 158 for (i = other_operands().length() - 1; i >= 0; i --) { 159 ResolveNode* node = other_operands()[i]; 160 for (int j = node->no_of_destinations() - 1; j >= 0; j --) { 161 emit_move(node->operand(), node->destination_at(j)->operand()); 162 } 163 } 164} 165 166 167ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) { 168 ResolveNode* node; 169 if (opr->is_virtual()) { 170 int vreg_num = opr->vreg_number(); 171 node = vreg_table().at_grow(vreg_num, NULL); 172 assert(node == NULL || node->operand() == opr, ""); 173 if (node == NULL) { 174 node = new ResolveNode(opr); 175 vreg_table()[vreg_num] = node; 176 } 177 // Make sure that all virtual operands show up in the list when 178 // they are used as the source of a move. 179 if (source && !virtual_operands().contains(node)) { 180 virtual_operands().append(node); 181 } 182 } else { 183 assert(source, ""); 184 node = new ResolveNode(opr); 185 other_operands().append(node); 186 } 187 return node; 188} 189 190 191void PhiResolver::move(LIR_Opr src, LIR_Opr dest) { 192 assert(dest->is_virtual(), ""); 193 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr(); 194 assert(src->is_valid(), ""); 195 assert(dest->is_valid(), ""); 196 ResolveNode* source = source_node(src); 197 source->append(destination_node(dest)); 198} 199 200 201//-------------------------------------------------------------- 202// LIRItem 203 204void LIRItem::set_result(LIR_Opr opr) { 205 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change"); 206 value()->set_operand(opr); 207 208 if (opr->is_virtual()) { 209 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL); 210 } 211 212 _result = opr; 213} 214 215void LIRItem::load_item() { 216 if (result()->is_illegal()) { 217 // update the items result 218 _result = value()->operand(); 219 } 220 if (!result()->is_register()) { 221 LIR_Opr reg = _gen->new_register(value()->type()); 222 __ move(result(), reg); 223 if (result()->is_constant()) { 224 _result = reg; 225 } else { 226 set_result(reg); 227 } 228 } 229} 230 231 232void LIRItem::load_for_store(BasicType type) { 233 if (_gen->can_store_as_constant(value(), type)) { 234 _result = value()->operand(); 235 if (!_result->is_constant()) { 236 _result = LIR_OprFact::value_type(value()->type()); 237 } 238 } else if (type == T_BYTE || type == T_BOOLEAN) { 239 load_byte_item(); 240 } else { 241 load_item(); 242 } 243} 244 245void LIRItem::load_item_force(LIR_Opr reg) { 246 LIR_Opr r = result(); 247 if (r != reg) { 248#if !defined(ARM) && !defined(E500V2) 249 if (r->type() != reg->type()) { 250 // moves between different types need an intervening spill slot 251 r = _gen->force_to_spill(r, reg->type()); 252 } 253#endif 254 __ move(r, reg); 255 _result = reg; 256 } 257} 258 259ciObject* LIRItem::get_jobject_constant() const { 260 ObjectType* oc = type()->as_ObjectType(); 261 if (oc) { 262 return oc->constant_value(); 263 } 264 return NULL; 265} 266 267 268jint LIRItem::get_jint_constant() const { 269 assert(is_constant() && value() != NULL, ""); 270 assert(type()->as_IntConstant() != NULL, "type check"); 271 return type()->as_IntConstant()->value(); 272} 273 274 275jint LIRItem::get_address_constant() const { 276 assert(is_constant() && value() != NULL, ""); 277 assert(type()->as_AddressConstant() != NULL, "type check"); 278 return type()->as_AddressConstant()->value(); 279} 280 281 282jfloat LIRItem::get_jfloat_constant() const { 283 assert(is_constant() && value() != NULL, ""); 284 assert(type()->as_FloatConstant() != NULL, "type check"); 285 return type()->as_FloatConstant()->value(); 286} 287 288 289jdouble LIRItem::get_jdouble_constant() const { 290 assert(is_constant() && value() != NULL, ""); 291 assert(type()->as_DoubleConstant() != NULL, "type check"); 292 return type()->as_DoubleConstant()->value(); 293} 294 295 296jlong LIRItem::get_jlong_constant() const { 297 assert(is_constant() && value() != NULL, ""); 298 assert(type()->as_LongConstant() != NULL, "type check"); 299 return type()->as_LongConstant()->value(); 300} 301 302 303 304//-------------------------------------------------------------- 305 306 307void LIRGenerator::init() { 308 _bs = Universe::heap()->barrier_set(); 309} 310 311 312void LIRGenerator::block_do_prolog(BlockBegin* block) { 313#ifndef PRODUCT 314 if (PrintIRWithLIR) { 315 block->print(); 316 } 317#endif 318 319 // set up the list of LIR instructions 320 assert(block->lir() == NULL, "LIR list already computed for this block"); 321 _lir = new LIR_List(compilation(), block); 322 block->set_lir(_lir); 323 324 __ branch_destination(block->label()); 325 326 if (LIRTraceExecution && 327 Compilation::current()->hir()->start()->block_id() != block->block_id() && 328 !block->is_set(BlockBegin::exception_entry_flag)) { 329 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst"); 330 trace_block_entry(block); 331 } 332} 333 334 335void LIRGenerator::block_do_epilog(BlockBegin* block) { 336#ifndef PRODUCT 337 if (PrintIRWithLIR) { 338 tty->cr(); 339 } 340#endif 341 342 // LIR_Opr for unpinned constants shouldn't be referenced by other 343 // blocks so clear them out after processing the block. 344 for (int i = 0; i < _unpinned_constants.length(); i++) { 345 _unpinned_constants.at(i)->clear_operand(); 346 } 347 _unpinned_constants.trunc_to(0); 348 349 // clear our any registers for other local constants 350 _constants.trunc_to(0); 351 _reg_for_constants.trunc_to(0); 352} 353 354 355void LIRGenerator::block_do(BlockBegin* block) { 356 CHECK_BAILOUT(); 357 358 block_do_prolog(block); 359 set_block(block); 360 361 for (Instruction* instr = block; instr != NULL; instr = instr->next()) { 362 if (instr->is_pinned()) do_root(instr); 363 } 364 365 set_block(NULL); 366 block_do_epilog(block); 367} 368 369 370//-------------------------LIRGenerator----------------------------- 371 372// This is where the tree-walk starts; instr must be root; 373void LIRGenerator::do_root(Value instr) { 374 CHECK_BAILOUT(); 375 376 InstructionMark im(compilation(), instr); 377 378 assert(instr->is_pinned(), "use only with roots"); 379 assert(instr->subst() == instr, "shouldn't have missed substitution"); 380 381 instr->visit(this); 382 383 assert(!instr->has_uses() || instr->operand()->is_valid() || 384 instr->as_Constant() != NULL || bailed_out(), "invalid item set"); 385} 386 387 388// This is called for each node in tree; the walk stops if a root is reached 389void LIRGenerator::walk(Value instr) { 390 InstructionMark im(compilation(), instr); 391 //stop walk when encounter a root 392 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) { 393 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited"); 394 } else { 395 assert(instr->subst() == instr, "shouldn't have missed substitution"); 396 instr->visit(this); 397 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use"); 398 } 399} 400 401 402CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) { 403 assert(state != NULL, "state must be defined"); 404 405 ValueStack* s = state; 406 for_each_state(s) { 407 if (s->kind() == ValueStack::EmptyExceptionState) { 408 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty"); 409 continue; 410 } 411 412 int index; 413 Value value; 414 for_each_stack_value(s, index, value) { 415 assert(value->subst() == value, "missed substitution"); 416 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 417 walk(value); 418 assert(value->operand()->is_valid(), "must be evaluated now"); 419 } 420 } 421 422 int bci = s->bci(); 423 IRScope* scope = s->scope(); 424 ciMethod* method = scope->method(); 425 426 MethodLivenessResult liveness = method->liveness_at_bci(bci); 427 if (bci == SynchronizationEntryBCI) { 428 if (x->as_ExceptionObject() || x->as_Throw()) { 429 // all locals are dead on exit from the synthetic unlocker 430 liveness.clear(); 431 } else { 432 assert(x->as_MonitorEnter(), "only other case is MonitorEnter"); 433 } 434 } 435 if (!liveness.is_valid()) { 436 // Degenerate or breakpointed method. 437 bailout("Degenerate or breakpointed method"); 438 } else { 439 assert((int)liveness.size() == s->locals_size(), "error in use of liveness"); 440 for_each_local_value(s, index, value) { 441 assert(value->subst() == value, "missed substition"); 442 if (liveness.at(index) && !value->type()->is_illegal()) { 443 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) { 444 walk(value); 445 assert(value->operand()->is_valid(), "must be evaluated now"); 446 } 447 } else { 448 // NULL out this local so that linear scan can assume that all non-NULL values are live. 449 s->invalidate_local(index); 450 } 451 } 452 } 453 } 454 455 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers()); 456} 457 458 459CodeEmitInfo* LIRGenerator::state_for(Instruction* x) { 460 return state_for(x, x->exception_state()); 461} 462 463 464void LIRGenerator::jobject2reg_with_patching(LIR_Opr r, ciObject* obj, CodeEmitInfo* info) { 465 if (!obj->is_loaded() || PatchALot) { 466 assert(info != NULL, "info must be set if class is not loaded"); 467 __ oop2reg_patch(NULL, r, info); 468 } else { 469 // no patching needed 470 __ oop2reg(obj->constant_encoding(), r); 471 } 472} 473 474 475void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index, 476 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) { 477 CodeStub* stub = new RangeCheckStub(range_check_info, index); 478 if (index->is_constant()) { 479 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(), 480 index->as_jint(), null_check_info); 481 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 482 } else { 483 cmp_reg_mem(lir_cond_aboveEqual, index, array, 484 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info); 485 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 486 } 487} 488 489 490void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) { 491 CodeStub* stub = new RangeCheckStub(info, index, true); 492 if (index->is_constant()) { 493 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info); 494 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch 495 } else { 496 cmp_reg_mem(lir_cond_aboveEqual, index, buffer, 497 java_nio_Buffer::limit_offset(), T_INT, info); 498 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch 499 } 500 __ move(index, result); 501} 502 503 504 505void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) { 506 LIR_Opr result_op = result; 507 LIR_Opr left_op = left; 508 LIR_Opr right_op = right; 509 510 if (TwoOperandLIRForm && left_op != result_op) { 511 assert(right_op != result_op, "malformed"); 512 __ move(left_op, result_op); 513 left_op = result_op; 514 } 515 516 switch(code) { 517 case Bytecodes::_dadd: 518 case Bytecodes::_fadd: 519 case Bytecodes::_ladd: 520 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break; 521 case Bytecodes::_fmul: 522 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break; 523 524 case Bytecodes::_dmul: 525 { 526 if (is_strictfp) { 527 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break; 528 } else { 529 __ mul(left_op, right_op, result_op); break; 530 } 531 } 532 break; 533 534 case Bytecodes::_imul: 535 { 536 bool did_strength_reduce = false; 537 538 if (right->is_constant()) { 539 int c = right->as_jint(); 540 if (is_power_of_2(c)) { 541 // do not need tmp here 542 __ shift_left(left_op, exact_log2(c), result_op); 543 did_strength_reduce = true; 544 } else { 545 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op); 546 } 547 } 548 // we couldn't strength reduce so just emit the multiply 549 if (!did_strength_reduce) { 550 __ mul(left_op, right_op, result_op); 551 } 552 } 553 break; 554 555 case Bytecodes::_dsub: 556 case Bytecodes::_fsub: 557 case Bytecodes::_lsub: 558 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break; 559 560 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break; 561 // ldiv and lrem are implemented with a direct runtime call 562 563 case Bytecodes::_ddiv: 564 { 565 if (is_strictfp) { 566 __ div_strictfp (left_op, right_op, result_op, tmp_op); break; 567 } else { 568 __ div (left_op, right_op, result_op); break; 569 } 570 } 571 break; 572 573 case Bytecodes::_drem: 574 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break; 575 576 default: ShouldNotReachHere(); 577 } 578} 579 580 581void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) { 582 arithmetic_op(code, result, left, right, false, tmp); 583} 584 585 586void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) { 587 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info); 588} 589 590 591void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) { 592 arithmetic_op(code, result, left, right, is_strictfp, tmp); 593} 594 595 596void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) { 597 if (TwoOperandLIRForm && value != result_op) { 598 assert(count != result_op, "malformed"); 599 __ move(value, result_op); 600 value = result_op; 601 } 602 603 assert(count->is_constant() || count->is_register(), "must be"); 604 switch(code) { 605 case Bytecodes::_ishl: 606 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break; 607 case Bytecodes::_ishr: 608 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break; 609 case Bytecodes::_iushr: 610 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break; 611 default: ShouldNotReachHere(); 612 } 613} 614 615 616void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) { 617 if (TwoOperandLIRForm && left_op != result_op) { 618 assert(right_op != result_op, "malformed"); 619 __ move(left_op, result_op); 620 left_op = result_op; 621 } 622 623 switch(code) { 624 case Bytecodes::_iand: 625 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break; 626 627 case Bytecodes::_ior: 628 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break; 629 630 case Bytecodes::_ixor: 631 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break; 632 633 default: ShouldNotReachHere(); 634 } 635} 636 637 638void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) { 639 if (!GenerateSynchronizationCode) return; 640 // for slow path, use debug info for state after successful locking 641 CodeStub* slow_path = new MonitorEnterStub(object, lock, info); 642 __ load_stack_address_monitor(monitor_no, lock); 643 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter 644 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception); 645} 646 647 648void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) { 649 if (!GenerateSynchronizationCode) return; 650 // setup registers 651 LIR_Opr hdr = lock; 652 lock = new_hdr; 653 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no); 654 __ load_stack_address_monitor(monitor_no, lock); 655 __ unlock_object(hdr, object, lock, scratch, slow_path); 656} 657 658 659void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) { 660 jobject2reg_with_patching(klass_reg, klass, info); 661 // If klass is not loaded we do not know if the klass has finalizers: 662 if (UseFastNewInstance && klass->is_loaded() 663 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) { 664 665 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id; 666 667 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id); 668 669 assert(klass->is_loaded(), "must be loaded"); 670 // allocate space for instance 671 assert(klass->size_helper() >= 0, "illegal instance size"); 672 const int instance_size = align_object_size(klass->size_helper()); 673 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, 674 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path); 675 } else { 676 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id); 677 __ branch(lir_cond_always, T_ILLEGAL, slow_path); 678 __ branch_destination(slow_path->continuation()); 679 } 680} 681 682 683static bool is_constant_zero(Instruction* inst) { 684 IntConstant* c = inst->type()->as_IntConstant(); 685 if (c) { 686 return (c->value() == 0); 687 } 688 return false; 689} 690 691 692static bool positive_constant(Instruction* inst) { 693 IntConstant* c = inst->type()->as_IntConstant(); 694 if (c) { 695 return (c->value() >= 0); 696 } 697 return false; 698} 699 700 701static ciArrayKlass* as_array_klass(ciType* type) { 702 if (type != NULL && type->is_array_klass() && type->is_loaded()) { 703 return (ciArrayKlass*)type; 704 } else { 705 return NULL; 706 } 707} 708 709static Value maxvalue(IfOp* ifop) { 710 switch (ifop->cond()) { 711 case If::eql: return NULL; 712 case If::neq: return NULL; 713 case If::lss: // x < y ? x : y 714 case If::leq: // x <= y ? x : y 715 if (ifop->x() == ifop->tval() && 716 ifop->y() == ifop->fval()) return ifop->y(); 717 return NULL; 718 719 case If::gtr: // x > y ? y : x 720 case If::geq: // x >= y ? y : x 721 if (ifop->x() == ifop->tval() && 722 ifop->y() == ifop->fval()) return ifop->y(); 723 return NULL; 724 725 } 726} 727 728static ciType* phi_declared_type(Phi* phi) { 729 ciType* t = phi->operand_at(0)->declared_type(); 730 if (t == NULL) { 731 return NULL; 732 } 733 for(int i = 1; i < phi->operand_count(); i++) { 734 if (t != phi->operand_at(i)->declared_type()) { 735 return NULL; 736 } 737 } 738 return t; 739} 740 741void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) { 742 Instruction* src = x->argument_at(0); 743 Instruction* src_pos = x->argument_at(1); 744 Instruction* dst = x->argument_at(2); 745 Instruction* dst_pos = x->argument_at(3); 746 Instruction* length = x->argument_at(4); 747 748 // first try to identify the likely type of the arrays involved 749 ciArrayKlass* expected_type = NULL; 750 bool is_exact = false, src_objarray = false, dst_objarray = false; 751 { 752 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type()); 753 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type()); 754 Phi* phi; 755 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) { 756 src_declared_type = as_array_klass(phi_declared_type(phi)); 757 } 758 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type()); 759 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type()); 760 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) { 761 dst_declared_type = as_array_klass(phi_declared_type(phi)); 762 } 763 764 if (src_exact_type != NULL && src_exact_type == dst_exact_type) { 765 // the types exactly match so the type is fully known 766 is_exact = true; 767 expected_type = src_exact_type; 768 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) { 769 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type; 770 ciArrayKlass* src_type = NULL; 771 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) { 772 src_type = (ciArrayKlass*) src_exact_type; 773 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) { 774 src_type = (ciArrayKlass*) src_declared_type; 775 } 776 if (src_type != NULL) { 777 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) { 778 is_exact = true; 779 expected_type = dst_type; 780 } 781 } 782 } 783 // at least pass along a good guess 784 if (expected_type == NULL) expected_type = dst_exact_type; 785 if (expected_type == NULL) expected_type = src_declared_type; 786 if (expected_type == NULL) expected_type = dst_declared_type; 787 788 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass()); 789 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass()); 790 } 791 792 // if a probable array type has been identified, figure out if any 793 // of the required checks for a fast case can be elided. 794 int flags = LIR_OpArrayCopy::all_flags; 795 796 if (!src_objarray) 797 flags &= ~LIR_OpArrayCopy::src_objarray; 798 if (!dst_objarray) 799 flags &= ~LIR_OpArrayCopy::dst_objarray; 800 801 if (!x->arg_needs_null_check(0)) 802 flags &= ~LIR_OpArrayCopy::src_null_check; 803 if (!x->arg_needs_null_check(2)) 804 flags &= ~LIR_OpArrayCopy::dst_null_check; 805 806 807 if (expected_type != NULL) { 808 Value length_limit = NULL; 809 810 IfOp* ifop = length->as_IfOp(); 811 if (ifop != NULL) { 812 // look for expressions like min(v, a.length) which ends up as 813 // x > y ? y : x or x >= y ? y : x 814 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) && 815 ifop->x() == ifop->fval() && 816 ifop->y() == ifop->tval()) { 817 length_limit = ifop->y(); 818 } 819 } 820 821 // try to skip null checks and range checks 822 NewArray* src_array = src->as_NewArray(); 823 if (src_array != NULL) { 824 flags &= ~LIR_OpArrayCopy::src_null_check; 825 if (length_limit != NULL && 826 src_array->length() == length_limit && 827 is_constant_zero(src_pos)) { 828 flags &= ~LIR_OpArrayCopy::src_range_check; 829 } 830 } 831 832 NewArray* dst_array = dst->as_NewArray(); 833 if (dst_array != NULL) { 834 flags &= ~LIR_OpArrayCopy::dst_null_check; 835 if (length_limit != NULL && 836 dst_array->length() == length_limit && 837 is_constant_zero(dst_pos)) { 838 flags &= ~LIR_OpArrayCopy::dst_range_check; 839 } 840 } 841 842 // check from incoming constant values 843 if (positive_constant(src_pos)) 844 flags &= ~LIR_OpArrayCopy::src_pos_positive_check; 845 if (positive_constant(dst_pos)) 846 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check; 847 if (positive_constant(length)) 848 flags &= ~LIR_OpArrayCopy::length_positive_check; 849 850 // see if the range check can be elided, which might also imply 851 // that src or dst is non-null. 852 ArrayLength* al = length->as_ArrayLength(); 853 if (al != NULL) { 854 if (al->array() == src) { 855 // it's the length of the source array 856 flags &= ~LIR_OpArrayCopy::length_positive_check; 857 flags &= ~LIR_OpArrayCopy::src_null_check; 858 if (is_constant_zero(src_pos)) 859 flags &= ~LIR_OpArrayCopy::src_range_check; 860 } 861 if (al->array() == dst) { 862 // it's the length of the destination array 863 flags &= ~LIR_OpArrayCopy::length_positive_check; 864 flags &= ~LIR_OpArrayCopy::dst_null_check; 865 if (is_constant_zero(dst_pos)) 866 flags &= ~LIR_OpArrayCopy::dst_range_check; 867 } 868 } 869 if (is_exact) { 870 flags &= ~LIR_OpArrayCopy::type_check; 871 } 872 } 873 874 IntConstant* src_int = src_pos->type()->as_IntConstant(); 875 IntConstant* dst_int = dst_pos->type()->as_IntConstant(); 876 if (src_int && dst_int) { 877 int s_offs = src_int->value(); 878 int d_offs = dst_int->value(); 879 if (src_int->value() >= dst_int->value()) { 880 flags &= ~LIR_OpArrayCopy::overlapping; 881 } 882 if (expected_type != NULL) { 883 BasicType t = expected_type->element_type()->basic_type(); 884 int element_size = type2aelembytes(t); 885 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) && 886 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) { 887 flags &= ~LIR_OpArrayCopy::unaligned; 888 } 889 } 890 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) { 891 // src and dest positions are the same, or dst is zero so assume 892 // nonoverlapping copy. 893 flags &= ~LIR_OpArrayCopy::overlapping; 894 } 895 896 if (src == dst) { 897 // moving within a single array so no type checks are needed 898 if (flags & LIR_OpArrayCopy::type_check) { 899 flags &= ~LIR_OpArrayCopy::type_check; 900 } 901 } 902 *flagsp = flags; 903 *expected_typep = (ciArrayKlass*)expected_type; 904} 905 906 907LIR_Opr LIRGenerator::round_item(LIR_Opr opr) { 908 assert(opr->is_register(), "why spill if item is not register?"); 909 910 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) { 911 LIR_Opr result = new_register(T_FLOAT); 912 set_vreg_flag(result, must_start_in_memory); 913 assert(opr->is_register(), "only a register can be spilled"); 914 assert(opr->value_type()->is_float(), "rounding only for floats available"); 915 __ roundfp(opr, LIR_OprFact::illegalOpr, result); 916 return result; 917 } 918 return opr; 919} 920 921 922LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) { 923 assert(type2size[t] == type2size[value->type()], "size mismatch"); 924 if (!value->is_register()) { 925 // force into a register 926 LIR_Opr r = new_register(value->type()); 927 __ move(value, r); 928 value = r; 929 } 930 931 // create a spill location 932 LIR_Opr tmp = new_register(t); 933 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory); 934 935 // move from register to spill 936 __ move(value, tmp); 937 return tmp; 938} 939 940void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) { 941 if (if_instr->should_profile()) { 942 ciMethod* method = if_instr->profiled_method(); 943 assert(method != NULL, "method should be set if branch is profiled"); 944 ciMethodData* md = method->method_data_or_null(); 945 assert(md != NULL, "Sanity"); 946 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 947 assert(data != NULL, "must have profiling data"); 948 assert(data->is_BranchData(), "need BranchData for two-way branches"); 949 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 950 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 951 if (if_instr->is_swapped()) { 952 int t = taken_count_offset; 953 taken_count_offset = not_taken_count_offset; 954 not_taken_count_offset = t; 955 } 956 957 LIR_Opr md_reg = new_register(T_OBJECT); 958 __ oop2reg(md->constant_encoding(), md_reg); 959 960 LIR_Opr data_offset_reg = new_pointer_register(); 961 __ cmove(lir_cond(cond), 962 LIR_OprFact::intptrConst(taken_count_offset), 963 LIR_OprFact::intptrConst(not_taken_count_offset), 964 data_offset_reg, as_BasicType(if_instr->x()->type())); 965 966 // MDO cells are intptr_t, so the data_reg width is arch-dependent. 967 LIR_Opr data_reg = new_pointer_register(); 968 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 969 __ move(data_addr, data_reg); 970 // Use leal instead of add to avoid destroying condition codes on x86 971 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 972 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 973 __ move(data_reg, data_addr); 974 } 975} 976 977// Phi technique: 978// This is about passing live values from one basic block to the other. 979// In code generated with Java it is rather rare that more than one 980// value is on the stack from one basic block to the other. 981// We optimize our technique for efficient passing of one value 982// (of type long, int, double..) but it can be extended. 983// When entering or leaving a basic block, all registers and all spill 984// slots are release and empty. We use the released registers 985// and spill slots to pass the live values from one block 986// to the other. The topmost value, i.e., the value on TOS of expression 987// stack is passed in registers. All other values are stored in spilling 988// area. Every Phi has an index which designates its spill slot 989// At exit of a basic block, we fill the register(s) and spill slots. 990// At entry of a basic block, the block_prolog sets up the content of phi nodes 991// and locks necessary registers and spilling slots. 992 993 994// move current value to referenced phi function 995void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) { 996 Phi* phi = sux_val->as_Phi(); 997 // cur_val can be null without phi being null in conjunction with inlining 998 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) { 999 LIR_Opr operand = cur_val->operand(); 1000 if (cur_val->operand()->is_illegal()) { 1001 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL, 1002 "these can be produced lazily"); 1003 operand = operand_for_instruction(cur_val); 1004 } 1005 resolver->move(operand, operand_for_instruction(phi)); 1006 } 1007} 1008 1009 1010// Moves all stack values into their PHI position 1011void LIRGenerator::move_to_phi(ValueStack* cur_state) { 1012 BlockBegin* bb = block(); 1013 if (bb->number_of_sux() == 1) { 1014 BlockBegin* sux = bb->sux_at(0); 1015 assert(sux->number_of_preds() > 0, "invalid CFG"); 1016 1017 // a block with only one predecessor never has phi functions 1018 if (sux->number_of_preds() > 1) { 1019 int max_phis = cur_state->stack_size() + cur_state->locals_size(); 1020 PhiResolver resolver(this, _virtual_register_number + max_phis * 2); 1021 1022 ValueStack* sux_state = sux->state(); 1023 Value sux_value; 1024 int index; 1025 1026 assert(cur_state->scope() == sux_state->scope(), "not matching"); 1027 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching"); 1028 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching"); 1029 1030 for_each_stack_value(sux_state, index, sux_value) { 1031 move_to_phi(&resolver, cur_state->stack_at(index), sux_value); 1032 } 1033 1034 for_each_local_value(sux_state, index, sux_value) { 1035 move_to_phi(&resolver, cur_state->local_at(index), sux_value); 1036 } 1037 1038 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal"); 1039 } 1040 } 1041} 1042 1043 1044LIR_Opr LIRGenerator::new_register(BasicType type) { 1045 int vreg = _virtual_register_number; 1046 // add a little fudge factor for the bailout, since the bailout is 1047 // only checked periodically. This gives a few extra registers to 1048 // hand out before we really run out, which helps us keep from 1049 // tripping over assertions. 1050 if (vreg + 20 >= LIR_OprDesc::vreg_max) { 1051 bailout("out of virtual registers"); 1052 if (vreg + 2 >= LIR_OprDesc::vreg_max) { 1053 // wrap it around 1054 _virtual_register_number = LIR_OprDesc::vreg_base; 1055 } 1056 } 1057 _virtual_register_number += 1; 1058 return LIR_OprFact::virtual_register(vreg, type); 1059} 1060 1061 1062// Try to lock using register in hint 1063LIR_Opr LIRGenerator::rlock(Value instr) { 1064 return new_register(instr->type()); 1065} 1066 1067 1068// does an rlock and sets result 1069LIR_Opr LIRGenerator::rlock_result(Value x) { 1070 LIR_Opr reg = rlock(x); 1071 set_result(x, reg); 1072 return reg; 1073} 1074 1075 1076// does an rlock and sets result 1077LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) { 1078 LIR_Opr reg; 1079 switch (type) { 1080 case T_BYTE: 1081 case T_BOOLEAN: 1082 reg = rlock_byte(type); 1083 break; 1084 default: 1085 reg = rlock(x); 1086 break; 1087 } 1088 1089 set_result(x, reg); 1090 return reg; 1091} 1092 1093 1094//--------------------------------------------------------------------- 1095ciObject* LIRGenerator::get_jobject_constant(Value value) { 1096 ObjectType* oc = value->type()->as_ObjectType(); 1097 if (oc) { 1098 return oc->constant_value(); 1099 } 1100 return NULL; 1101} 1102 1103 1104void LIRGenerator::do_ExceptionObject(ExceptionObject* x) { 1105 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block"); 1106 assert(block()->next() == x, "ExceptionObject must be first instruction of block"); 1107 1108 // no moves are created for phi functions at the begin of exception 1109 // handlers, so assign operands manually here 1110 for_each_phi_fun(block(), phi, 1111 operand_for_instruction(phi)); 1112 1113 LIR_Opr thread_reg = getThreadPointer(); 1114 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 1115 exceptionOopOpr()); 1116 __ move_wide(LIR_OprFact::oopConst(NULL), 1117 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 1118 __ move_wide(LIR_OprFact::oopConst(NULL), 1119 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 1120 1121 LIR_Opr result = new_register(T_OBJECT); 1122 __ move(exceptionOopOpr(), result); 1123 set_result(x, result); 1124} 1125 1126 1127//---------------------------------------------------------------------- 1128//---------------------------------------------------------------------- 1129//---------------------------------------------------------------------- 1130//---------------------------------------------------------------------- 1131// visitor functions 1132//---------------------------------------------------------------------- 1133//---------------------------------------------------------------------- 1134//---------------------------------------------------------------------- 1135//---------------------------------------------------------------------- 1136 1137void LIRGenerator::do_Phi(Phi* x) { 1138 // phi functions are never visited directly 1139 ShouldNotReachHere(); 1140} 1141 1142 1143// Code for a constant is generated lazily unless the constant is frequently used and can't be inlined. 1144void LIRGenerator::do_Constant(Constant* x) { 1145 if (x->state_before() != NULL) { 1146 // Any constant with a ValueStack requires patching so emit the patch here 1147 LIR_Opr reg = rlock_result(x); 1148 CodeEmitInfo* info = state_for(x, x->state_before()); 1149 __ oop2reg_patch(NULL, reg, info); 1150 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) { 1151 if (!x->is_pinned()) { 1152 // unpinned constants are handled specially so that they can be 1153 // put into registers when they are used multiple times within a 1154 // block. After the block completes their operand will be 1155 // cleared so that other blocks can't refer to that register. 1156 set_result(x, load_constant(x)); 1157 } else { 1158 LIR_Opr res = x->operand(); 1159 if (!res->is_valid()) { 1160 res = LIR_OprFact::value_type(x->type()); 1161 } 1162 if (res->is_constant()) { 1163 LIR_Opr reg = rlock_result(x); 1164 __ move(res, reg); 1165 } else { 1166 set_result(x, res); 1167 } 1168 } 1169 } else { 1170 set_result(x, LIR_OprFact::value_type(x->type())); 1171 } 1172} 1173 1174 1175void LIRGenerator::do_Local(Local* x) { 1176 // operand_for_instruction has the side effect of setting the result 1177 // so there's no need to do it here. 1178 operand_for_instruction(x); 1179} 1180 1181 1182void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) { 1183 Unimplemented(); 1184} 1185 1186 1187void LIRGenerator::do_Return(Return* x) { 1188 if (compilation()->env()->dtrace_method_probes()) { 1189 BasicTypeList signature; 1190 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 1191 signature.append(T_OBJECT); // methodOop 1192 LIR_OprList* args = new LIR_OprList(); 1193 args->append(getThreadPointer()); 1194 LIR_Opr meth = new_register(T_OBJECT); 1195 __ oop2reg(method()->constant_encoding(), meth); 1196 args->append(meth); 1197 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL); 1198 } 1199 1200 if (x->type()->is_void()) { 1201 __ return_op(LIR_OprFact::illegalOpr); 1202 } else { 1203 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true); 1204 LIRItem result(x->result(), this); 1205 1206 result.load_item_force(reg); 1207 __ return_op(result.result()); 1208 } 1209 set_no_result(x); 1210} 1211 1212 1213// Example: object.getClass () 1214void LIRGenerator::do_getClass(Intrinsic* x) { 1215 assert(x->number_of_arguments() == 1, "wrong type"); 1216 1217 LIRItem rcvr(x->argument_at(0), this); 1218 rcvr.load_item(); 1219 LIR_Opr result = rlock_result(x); 1220 1221 // need to perform the null check on the rcvr 1222 CodeEmitInfo* info = NULL; 1223 if (x->needs_null_check()) { 1224 info = state_for(x); 1225 } 1226 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); 1227 __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 1228 klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 1229} 1230 1231 1232// Example: Thread.currentThread() 1233void LIRGenerator::do_currentThread(Intrinsic* x) { 1234 assert(x->number_of_arguments() == 0, "wrong type"); 1235 LIR_Opr reg = rlock_result(x); 1236 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 1237} 1238 1239 1240void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) { 1241 assert(x->number_of_arguments() == 1, "wrong type"); 1242 LIRItem receiver(x->argument_at(0), this); 1243 1244 receiver.load_item(); 1245 BasicTypeList signature; 1246 signature.append(T_OBJECT); // receiver 1247 LIR_OprList* args = new LIR_OprList(); 1248 args->append(receiver.result()); 1249 CodeEmitInfo* info = state_for(x, x->state()); 1250 call_runtime(&signature, args, 1251 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)), 1252 voidType, info); 1253 1254 set_no_result(x); 1255} 1256 1257 1258//------------------------local access-------------------------------------- 1259 1260LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) { 1261 if (x->operand()->is_illegal()) { 1262 Constant* c = x->as_Constant(); 1263 if (c != NULL) { 1264 x->set_operand(LIR_OprFact::value_type(c->type())); 1265 } else { 1266 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local"); 1267 // allocate a virtual register for this local or phi 1268 x->set_operand(rlock(x)); 1269 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL); 1270 } 1271 } 1272 return x->operand(); 1273} 1274 1275 1276Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) { 1277 if (opr->is_virtual()) { 1278 return instruction_for_vreg(opr->vreg_number()); 1279 } 1280 return NULL; 1281} 1282 1283 1284Instruction* LIRGenerator::instruction_for_vreg(int reg_num) { 1285 if (reg_num < _instruction_for_operand.length()) { 1286 return _instruction_for_operand.at(reg_num); 1287 } 1288 return NULL; 1289} 1290 1291 1292void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) { 1293 if (_vreg_flags.size_in_bits() == 0) { 1294 BitMap2D temp(100, num_vreg_flags); 1295 temp.clear(); 1296 _vreg_flags = temp; 1297 } 1298 _vreg_flags.at_put_grow(vreg_num, f, true); 1299} 1300 1301bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) { 1302 if (!_vreg_flags.is_valid_index(vreg_num, f)) { 1303 return false; 1304 } 1305 return _vreg_flags.at(vreg_num, f); 1306} 1307 1308 1309// Block local constant handling. This code is useful for keeping 1310// unpinned constants and constants which aren't exposed in the IR in 1311// registers. Unpinned Constant instructions have their operands 1312// cleared when the block is finished so that other blocks can't end 1313// up referring to their registers. 1314 1315LIR_Opr LIRGenerator::load_constant(Constant* x) { 1316 assert(!x->is_pinned(), "only for unpinned constants"); 1317 _unpinned_constants.append(x); 1318 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr()); 1319} 1320 1321 1322LIR_Opr LIRGenerator::load_constant(LIR_Const* c) { 1323 BasicType t = c->type(); 1324 for (int i = 0; i < _constants.length(); i++) { 1325 LIR_Const* other = _constants.at(i); 1326 if (t == other->type()) { 1327 switch (t) { 1328 case T_INT: 1329 case T_FLOAT: 1330 if (c->as_jint_bits() != other->as_jint_bits()) continue; 1331 break; 1332 case T_LONG: 1333 case T_DOUBLE: 1334 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue; 1335 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue; 1336 break; 1337 case T_OBJECT: 1338 if (c->as_jobject() != other->as_jobject()) continue; 1339 break; 1340 } 1341 return _reg_for_constants.at(i); 1342 } 1343 } 1344 1345 LIR_Opr result = new_register(t); 1346 __ move((LIR_Opr)c, result); 1347 _constants.append(c); 1348 _reg_for_constants.append(result); 1349 return result; 1350} 1351 1352// Various barriers 1353 1354void LIRGenerator::pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { 1355 // Do the pre-write barrier, if any. 1356 switch (_bs->kind()) { 1357#ifndef SERIALGC 1358 case BarrierSet::G1SATBCT: 1359 case BarrierSet::G1SATBCTLogging: 1360 G1SATBCardTableModRef_pre_barrier(addr_opr, patch, info); 1361 break; 1362#endif // SERIALGC 1363 case BarrierSet::CardTableModRef: 1364 case BarrierSet::CardTableExtension: 1365 // No pre barriers 1366 break; 1367 case BarrierSet::ModRef: 1368 case BarrierSet::Other: 1369 // No pre barriers 1370 break; 1371 default : 1372 ShouldNotReachHere(); 1373 1374 } 1375} 1376 1377void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1378 switch (_bs->kind()) { 1379#ifndef SERIALGC 1380 case BarrierSet::G1SATBCT: 1381 case BarrierSet::G1SATBCTLogging: 1382 G1SATBCardTableModRef_post_barrier(addr, new_val); 1383 break; 1384#endif // SERIALGC 1385 case BarrierSet::CardTableModRef: 1386 case BarrierSet::CardTableExtension: 1387 CardTableModRef_post_barrier(addr, new_val); 1388 break; 1389 case BarrierSet::ModRef: 1390 case BarrierSet::Other: 1391 // No post barriers 1392 break; 1393 default : 1394 ShouldNotReachHere(); 1395 } 1396} 1397 1398//////////////////////////////////////////////////////////////////////// 1399#ifndef SERIALGC 1400 1401void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, bool patch, CodeEmitInfo* info) { 1402 if (G1DisablePreBarrier) return; 1403 1404 // First we test whether marking is in progress. 1405 BasicType flag_type; 1406 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) { 1407 flag_type = T_INT; 1408 } else { 1409 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1, 1410 "Assumption"); 1411 flag_type = T_BYTE; 1412 } 1413 LIR_Opr thrd = getThreadPointer(); 1414 LIR_Address* mark_active_flag_addr = 1415 new LIR_Address(thrd, 1416 in_bytes(JavaThread::satb_mark_queue_offset() + 1417 PtrQueue::byte_offset_of_active()), 1418 flag_type); 1419 // Read the marking-in-progress flag. 1420 LIR_Opr flag_val = new_register(T_INT); 1421 __ load(mark_active_flag_addr, flag_val); 1422 1423 LIR_PatchCode pre_val_patch_code = 1424 patch ? lir_patch_normal : lir_patch_none; 1425 1426 LIR_Opr pre_val = new_register(T_OBJECT); 1427 1428 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); 1429 if (!addr_opr->is_address()) { 1430 assert(addr_opr->is_register(), "must be"); 1431 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); 1432 } 1433 CodeStub* slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, 1434 info); 1435 __ branch(lir_cond_notEqual, T_INT, slow); 1436 __ branch_destination(slow->continuation()); 1437} 1438 1439void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1440 if (G1DisablePostBarrier) return; 1441 1442 // If the "new_val" is a constant NULL, no barrier is necessary. 1443 if (new_val->is_constant() && 1444 new_val->as_constant_ptr()->as_jobject() == NULL) return; 1445 1446 if (!new_val->is_register()) { 1447 LIR_Opr new_val_reg = new_register(T_OBJECT); 1448 if (new_val->is_constant()) { 1449 __ move(new_val, new_val_reg); 1450 } else { 1451 __ leal(new_val, new_val_reg); 1452 } 1453 new_val = new_val_reg; 1454 } 1455 assert(new_val->is_register(), "must be a register at this point"); 1456 1457 if (addr->is_address()) { 1458 LIR_Address* address = addr->as_address_ptr(); 1459 LIR_Opr ptr = new_pointer_register(); 1460 if (!address->index()->is_valid() && address->disp() == 0) { 1461 __ move(address->base(), ptr); 1462 } else { 1463 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1464 __ leal(addr, ptr); 1465 } 1466 addr = ptr; 1467 } 1468 assert(addr->is_register(), "must be a register at this point"); 1469 1470 LIR_Opr xor_res = new_pointer_register(); 1471 LIR_Opr xor_shift_res = new_pointer_register(); 1472 if (TwoOperandLIRForm ) { 1473 __ move(addr, xor_res); 1474 __ logical_xor(xor_res, new_val, xor_res); 1475 __ move(xor_res, xor_shift_res); 1476 __ unsigned_shift_right(xor_shift_res, 1477 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1478 xor_shift_res, 1479 LIR_OprDesc::illegalOpr()); 1480 } else { 1481 __ logical_xor(addr, new_val, xor_res); 1482 __ unsigned_shift_right(xor_res, 1483 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), 1484 xor_shift_res, 1485 LIR_OprDesc::illegalOpr()); 1486 } 1487 1488 if (!new_val->is_register()) { 1489 LIR_Opr new_val_reg = new_register(T_OBJECT); 1490 __ leal(new_val, new_val_reg); 1491 new_val = new_val_reg; 1492 } 1493 assert(new_val->is_register(), "must be a register at this point"); 1494 1495 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); 1496 1497 CodeStub* slow = new G1PostBarrierStub(addr, new_val); 1498 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow); 1499 __ branch_destination(slow->continuation()); 1500} 1501 1502#endif // SERIALGC 1503//////////////////////////////////////////////////////////////////////// 1504 1505void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) { 1506 1507 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code"); 1508 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base); 1509 if (addr->is_address()) { 1510 LIR_Address* address = addr->as_address_ptr(); 1511 // ptr cannot be an object because we use this barrier for array card marks 1512 // and addr can point in the middle of an array. 1513 LIR_Opr ptr = new_pointer_register(); 1514 if (!address->index()->is_valid() && address->disp() == 0) { 1515 __ move(address->base(), ptr); 1516 } else { 1517 assert(address->disp() != max_jint, "lea doesn't support patched addresses!"); 1518 __ leal(addr, ptr); 1519 } 1520 addr = ptr; 1521 } 1522 assert(addr->is_register(), "must be a register at this point"); 1523 1524#ifdef ARM 1525 // TODO: ARM - move to platform-dependent code 1526 LIR_Opr tmp = FrameMap::R14_opr; 1527 if (VM_Version::supports_movw()) { 1528 __ move((LIR_Opr)card_table_base, tmp); 1529 } else { 1530 __ move(new LIR_Address(FrameMap::Rthread_opr, in_bytes(JavaThread::card_table_base_offset()), T_ADDRESS), tmp); 1531 } 1532 1533 CardTableModRefBS* ct = (CardTableModRefBS*)_bs; 1534 LIR_Address *card_addr = new LIR_Address(tmp, addr, (LIR_Address::Scale) -CardTableModRefBS::card_shift, 0, T_BYTE); 1535 if(((int)ct->byte_map_base & 0xff) == 0) { 1536 __ move(tmp, card_addr); 1537 } else { 1538 LIR_Opr tmp_zero = new_register(T_INT); 1539 __ move(LIR_OprFact::intConst(0), tmp_zero); 1540 __ move(tmp_zero, card_addr); 1541 } 1542#else // ARM 1543 LIR_Opr tmp = new_pointer_register(); 1544 if (TwoOperandLIRForm) { 1545 __ move(addr, tmp); 1546 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp); 1547 } else { 1548 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp); 1549 } 1550 if (can_inline_as_constant(card_table_base)) { 1551 __ move(LIR_OprFact::intConst(0), 1552 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE)); 1553 } else { 1554 __ move(LIR_OprFact::intConst(0), 1555 new LIR_Address(tmp, load_constant(card_table_base), 1556 T_BYTE)); 1557 } 1558#endif // ARM 1559} 1560 1561 1562//------------------------field access-------------------------------------- 1563 1564// Comment copied form templateTable_i486.cpp 1565// ---------------------------------------------------------------------------- 1566// Volatile variables demand their effects be made known to all CPU's in 1567// order. Store buffers on most chips allow reads & writes to reorder; the 1568// JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of 1569// memory barrier (i.e., it's not sufficient that the interpreter does not 1570// reorder volatile references, the hardware also must not reorder them). 1571// 1572// According to the new Java Memory Model (JMM): 1573// (1) All volatiles are serialized wrt to each other. 1574// ALSO reads & writes act as aquire & release, so: 1575// (2) A read cannot let unrelated NON-volatile memory refs that happen after 1576// the read float up to before the read. It's OK for non-volatile memory refs 1577// that happen before the volatile read to float down below it. 1578// (3) Similar a volatile write cannot let unrelated NON-volatile memory refs 1579// that happen BEFORE the write float down to after the write. It's OK for 1580// non-volatile memory refs that happen after the volatile write to float up 1581// before it. 1582// 1583// We only put in barriers around volatile refs (they are expensive), not 1584// _between_ memory refs (that would require us to track the flavor of the 1585// previous memory refs). Requirements (2) and (3) require some barriers 1586// before volatile stores and after volatile loads. These nearly cover 1587// requirement (1) but miss the volatile-store-volatile-load case. This final 1588// case is placed after volatile-stores although it could just as well go 1589// before volatile-loads. 1590 1591 1592void LIRGenerator::do_StoreField(StoreField* x) { 1593 bool needs_patching = x->needs_patching(); 1594 bool is_volatile = x->field()->is_volatile(); 1595 BasicType field_type = x->field_type(); 1596 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT); 1597 1598 CodeEmitInfo* info = NULL; 1599 if (needs_patching) { 1600 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1601 info = state_for(x, x->state_before()); 1602 } else if (x->needs_null_check()) { 1603 NullCheck* nc = x->explicit_null_check(); 1604 if (nc == NULL) { 1605 info = state_for(x); 1606 } else { 1607 info = state_for(nc); 1608 } 1609 } 1610 1611 1612 LIRItem object(x->obj(), this); 1613 LIRItem value(x->value(), this); 1614 1615 object.load_item(); 1616 1617 if (is_volatile || needs_patching) { 1618 // load item if field is volatile (fewer special cases for volatiles) 1619 // load item if field not initialized 1620 // load item if field not constant 1621 // because of code patching we cannot inline constants 1622 if (field_type == T_BYTE || field_type == T_BOOLEAN) { 1623 value.load_byte_item(); 1624 } else { 1625 value.load_item(); 1626 } 1627 } else { 1628 value.load_for_store(field_type); 1629 } 1630 1631 set_no_result(x); 1632 1633#ifndef PRODUCT 1634 if (PrintNotLoaded && needs_patching) { 1635 tty->print_cr(" ###class not loaded at store_%s bci %d", 1636 x->is_static() ? "static" : "field", x->printable_bci()); 1637 } 1638#endif 1639 1640 if (x->needs_null_check() && 1641 (needs_patching || 1642 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1643 // emit an explicit null check because the offset is too large 1644 __ null_check(object.result(), new CodeEmitInfo(info)); 1645 } 1646 1647 LIR_Address* address; 1648 if (needs_patching) { 1649 // we need to patch the offset in the instruction so don't allow 1650 // generate_address to try to be smart about emitting the -1. 1651 // Otherwise the patching code won't know how to find the 1652 // instruction to patch. 1653 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type); 1654 } else { 1655 address = generate_address(object.result(), x->offset(), field_type); 1656 } 1657 1658 if (is_volatile && os::is_MP()) { 1659 __ membar_release(); 1660 } 1661 1662 if (is_oop) { 1663 // Do the pre-write barrier, if any. 1664 pre_barrier(LIR_OprFact::address(address), 1665 needs_patching, 1666 (info ? new CodeEmitInfo(info) : NULL)); 1667 } 1668 1669 if (is_volatile && !needs_patching) { 1670 volatile_field_store(value.result(), address, info); 1671 } else { 1672 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1673 __ store(value.result(), address, info, patch_code); 1674 } 1675 1676 if (is_oop) { 1677 // Store to object so mark the card of the header 1678 post_barrier(object.result(), value.result()); 1679 } 1680 1681 if (is_volatile && os::is_MP()) { 1682 __ membar(); 1683 } 1684} 1685 1686 1687void LIRGenerator::do_LoadField(LoadField* x) { 1688 bool needs_patching = x->needs_patching(); 1689 bool is_volatile = x->field()->is_volatile(); 1690 BasicType field_type = x->field_type(); 1691 1692 CodeEmitInfo* info = NULL; 1693 if (needs_patching) { 1694 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access"); 1695 info = state_for(x, x->state_before()); 1696 } else if (x->needs_null_check()) { 1697 NullCheck* nc = x->explicit_null_check(); 1698 if (nc == NULL) { 1699 info = state_for(x); 1700 } else { 1701 info = state_for(nc); 1702 } 1703 } 1704 1705 LIRItem object(x->obj(), this); 1706 1707 object.load_item(); 1708 1709#ifndef PRODUCT 1710 if (PrintNotLoaded && needs_patching) { 1711 tty->print_cr(" ###class not loaded at load_%s bci %d", 1712 x->is_static() ? "static" : "field", x->printable_bci()); 1713 } 1714#endif 1715 1716 if (x->needs_null_check() && 1717 (needs_patching || 1718 MacroAssembler::needs_explicit_null_check(x->offset()))) { 1719 // emit an explicit null check because the offset is too large 1720 __ null_check(object.result(), new CodeEmitInfo(info)); 1721 } 1722 1723 LIR_Opr reg = rlock_result(x, field_type); 1724 LIR_Address* address; 1725 if (needs_patching) { 1726 // we need to patch the offset in the instruction so don't allow 1727 // generate_address to try to be smart about emitting the -1. 1728 // Otherwise the patching code won't know how to find the 1729 // instruction to patch. 1730 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type); 1731 } else { 1732 address = generate_address(object.result(), x->offset(), field_type); 1733 } 1734 1735 if (is_volatile && !needs_patching) { 1736 volatile_field_load(address, reg, info); 1737 } else { 1738 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; 1739 __ load(address, reg, info, patch_code); 1740 } 1741 1742 if (is_volatile && os::is_MP()) { 1743 __ membar_acquire(); 1744 } 1745} 1746 1747 1748//------------------------java.nio.Buffer.checkIndex------------------------ 1749 1750// int java.nio.Buffer.checkIndex(int) 1751void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) { 1752 // NOTE: by the time we are in checkIndex() we are guaranteed that 1753 // the buffer is non-null (because checkIndex is package-private and 1754 // only called from within other methods in the buffer). 1755 assert(x->number_of_arguments() == 2, "wrong type"); 1756 LIRItem buf (x->argument_at(0), this); 1757 LIRItem index(x->argument_at(1), this); 1758 buf.load_item(); 1759 index.load_item(); 1760 1761 LIR_Opr result = rlock_result(x); 1762 if (GenerateRangeChecks) { 1763 CodeEmitInfo* info = state_for(x); 1764 CodeStub* stub = new RangeCheckStub(info, index.result(), true); 1765 if (index.result()->is_constant()) { 1766 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info); 1767 __ branch(lir_cond_belowEqual, T_INT, stub); 1768 } else { 1769 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(), 1770 java_nio_Buffer::limit_offset(), T_INT, info); 1771 __ branch(lir_cond_aboveEqual, T_INT, stub); 1772 } 1773 __ move(index.result(), result); 1774 } else { 1775 // Just load the index into the result register 1776 __ move(index.result(), result); 1777 } 1778} 1779 1780 1781//------------------------array access-------------------------------------- 1782 1783 1784void LIRGenerator::do_ArrayLength(ArrayLength* x) { 1785 LIRItem array(x->array(), this); 1786 array.load_item(); 1787 LIR_Opr reg = rlock_result(x); 1788 1789 CodeEmitInfo* info = NULL; 1790 if (x->needs_null_check()) { 1791 NullCheck* nc = x->explicit_null_check(); 1792 if (nc == NULL) { 1793 info = state_for(x); 1794 } else { 1795 info = state_for(nc); 1796 } 1797 } 1798 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none); 1799} 1800 1801 1802void LIRGenerator::do_LoadIndexed(LoadIndexed* x) { 1803 bool use_length = x->length() != NULL; 1804 LIRItem array(x->array(), this); 1805 LIRItem index(x->index(), this); 1806 LIRItem length(this); 1807 bool needs_range_check = true; 1808 1809 if (use_length) { 1810 needs_range_check = x->compute_needs_range_check(); 1811 if (needs_range_check) { 1812 length.set_instruction(x->length()); 1813 length.load_item(); 1814 } 1815 } 1816 1817 array.load_item(); 1818 if (index.is_constant() && can_inline_as_constant(x->index())) { 1819 // let it be a constant 1820 index.dont_load_item(); 1821 } else { 1822 index.load_item(); 1823 } 1824 1825 CodeEmitInfo* range_check_info = state_for(x); 1826 CodeEmitInfo* null_check_info = NULL; 1827 if (x->needs_null_check()) { 1828 NullCheck* nc = x->explicit_null_check(); 1829 if (nc != NULL) { 1830 null_check_info = state_for(nc); 1831 } else { 1832 null_check_info = range_check_info; 1833 } 1834 } 1835 1836 // emit array address setup early so it schedules better 1837 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false); 1838 1839 if (GenerateRangeChecks && needs_range_check) { 1840 if (use_length) { 1841 // TODO: use a (modified) version of array_range_check that does not require a 1842 // constant length to be loaded to a register 1843 __ cmp(lir_cond_belowEqual, length.result(), index.result()); 1844 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result())); 1845 } else { 1846 array_range_check(array.result(), index.result(), null_check_info, range_check_info); 1847 // The range check performs the null check, so clear it out for the load 1848 null_check_info = NULL; 1849 } 1850 } 1851 1852 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info); 1853} 1854 1855 1856void LIRGenerator::do_NullCheck(NullCheck* x) { 1857 if (x->can_trap()) { 1858 LIRItem value(x->obj(), this); 1859 value.load_item(); 1860 CodeEmitInfo* info = state_for(x); 1861 __ null_check(value.result(), info); 1862 } 1863} 1864 1865 1866void LIRGenerator::do_Throw(Throw* x) { 1867 LIRItem exception(x->exception(), this); 1868 exception.load_item(); 1869 set_no_result(x); 1870 LIR_Opr exception_opr = exception.result(); 1871 CodeEmitInfo* info = state_for(x, x->state()); 1872 1873#ifndef PRODUCT 1874 if (PrintC1Statistics) { 1875 increment_counter(Runtime1::throw_count_address(), T_INT); 1876 } 1877#endif 1878 1879 // check if the instruction has an xhandler in any of the nested scopes 1880 bool unwind = false; 1881 if (info->exception_handlers()->length() == 0) { 1882 // this throw is not inside an xhandler 1883 unwind = true; 1884 } else { 1885 // get some idea of the throw type 1886 bool type_is_exact = true; 1887 ciType* throw_type = x->exception()->exact_type(); 1888 if (throw_type == NULL) { 1889 type_is_exact = false; 1890 throw_type = x->exception()->declared_type(); 1891 } 1892 if (throw_type != NULL && throw_type->is_instance_klass()) { 1893 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type; 1894 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact); 1895 } 1896 } 1897 1898 // do null check before moving exception oop into fixed register 1899 // to avoid a fixed interval with an oop during the null check. 1900 // Use a copy of the CodeEmitInfo because debug information is 1901 // different for null_check and throw. 1902 if (GenerateCompilerNullChecks && 1903 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) { 1904 // if the exception object wasn't created using new then it might be null. 1905 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci()))); 1906 } 1907 1908 if (compilation()->env()->jvmti_can_post_on_exceptions()) { 1909 // we need to go through the exception lookup path to get JVMTI 1910 // notification done 1911 unwind = false; 1912 } 1913 1914 // move exception oop into fixed register 1915 __ move(exception_opr, exceptionOopOpr()); 1916 1917 if (unwind) { 1918 __ unwind_exception(exceptionOopOpr()); 1919 } else { 1920 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info); 1921 } 1922} 1923 1924 1925void LIRGenerator::do_RoundFP(RoundFP* x) { 1926 LIRItem input(x->input(), this); 1927 input.load_item(); 1928 LIR_Opr input_opr = input.result(); 1929 assert(input_opr->is_register(), "why round if value is not in a register?"); 1930 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value"); 1931 if (input_opr->is_single_fpu()) { 1932 set_result(x, round_item(input_opr)); // This code path not currently taken 1933 } else { 1934 LIR_Opr result = new_register(T_DOUBLE); 1935 set_vreg_flag(result, must_start_in_memory); 1936 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result); 1937 set_result(x, result); 1938 } 1939} 1940 1941void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { 1942 LIRItem base(x->base(), this); 1943 LIRItem idx(this); 1944 1945 base.load_item(); 1946 if (x->has_index()) { 1947 idx.set_instruction(x->index()); 1948 idx.load_nonconstant(); 1949 } 1950 1951 LIR_Opr reg = rlock_result(x, x->basic_type()); 1952 1953 int log2_scale = 0; 1954 if (x->has_index()) { 1955 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 1956 log2_scale = x->log2_scale(); 1957 } 1958 1959 assert(!x->has_index() || idx.value() == x->index(), "should match"); 1960 1961 LIR_Opr base_op = base.result(); 1962#ifndef _LP64 1963 if (x->base()->type()->tag() == longTag) { 1964 base_op = new_register(T_INT); 1965 __ convert(Bytecodes::_l2i, base.result(), base_op); 1966 } else { 1967 assert(x->base()->type()->tag() == intTag, "must be"); 1968 } 1969#endif 1970 1971 BasicType dst_type = x->basic_type(); 1972 LIR_Opr index_op = idx.result(); 1973 1974 LIR_Address* addr; 1975 if (index_op->is_constant()) { 1976 assert(log2_scale == 0, "must not have a scale"); 1977 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); 1978 } else { 1979#ifdef X86 1980#ifdef _LP64 1981 if (!index_op->is_illegal() && index_op->type() == T_INT) { 1982 LIR_Opr tmp = new_pointer_register(); 1983 __ convert(Bytecodes::_i2l, index_op, tmp); 1984 index_op = tmp; 1985 } 1986#endif 1987 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); 1988#elif defined(ARM) 1989 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type); 1990#else 1991 if (index_op->is_illegal() || log2_scale == 0) { 1992#ifdef _LP64 1993 if (!index_op->is_illegal() && index_op->type() == T_INT) { 1994 LIR_Opr tmp = new_pointer_register(); 1995 __ convert(Bytecodes::_i2l, index_op, tmp); 1996 index_op = tmp; 1997 } 1998#endif 1999 addr = new LIR_Address(base_op, index_op, dst_type); 2000 } else { 2001 LIR_Opr tmp = new_pointer_register(); 2002 __ shift_left(index_op, log2_scale, tmp); 2003 addr = new LIR_Address(base_op, tmp, dst_type); 2004 } 2005#endif 2006 } 2007 2008 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 2009 __ unaligned_move(addr, reg); 2010 } else { 2011 if (dst_type == T_OBJECT && x->is_wide()) { 2012 __ move_wide(addr, reg); 2013 } else { 2014 __ move(addr, reg); 2015 } 2016 } 2017} 2018 2019 2020void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { 2021 int log2_scale = 0; 2022 BasicType type = x->basic_type(); 2023 2024 if (x->has_index()) { 2025 assert(x->index()->type()->tag() == intTag, "should not find non-int index"); 2026 log2_scale = x->log2_scale(); 2027 } 2028 2029 LIRItem base(x->base(), this); 2030 LIRItem value(x->value(), this); 2031 LIRItem idx(this); 2032 2033 base.load_item(); 2034 if (x->has_index()) { 2035 idx.set_instruction(x->index()); 2036 idx.load_item(); 2037 } 2038 2039 if (type == T_BYTE || type == T_BOOLEAN) { 2040 value.load_byte_item(); 2041 } else { 2042 value.load_item(); 2043 } 2044 2045 set_no_result(x); 2046 2047 LIR_Opr base_op = base.result(); 2048#ifndef _LP64 2049 if (x->base()->type()->tag() == longTag) { 2050 base_op = new_register(T_INT); 2051 __ convert(Bytecodes::_l2i, base.result(), base_op); 2052 } else { 2053 assert(x->base()->type()->tag() == intTag, "must be"); 2054 } 2055#endif 2056 2057 LIR_Opr index_op = idx.result(); 2058 if (log2_scale != 0) { 2059 // temporary fix (platform dependent code without shift on Intel would be better) 2060 index_op = new_pointer_register(); 2061#ifdef _LP64 2062 if(idx.result()->type() == T_INT) { 2063 __ convert(Bytecodes::_i2l, idx.result(), index_op); 2064 } else { 2065#endif 2066 // TODO: ARM also allows embedded shift in the address 2067 __ move(idx.result(), index_op); 2068#ifdef _LP64 2069 } 2070#endif 2071 __ shift_left(index_op, log2_scale, index_op); 2072 } 2073#ifdef _LP64 2074 else if(!index_op->is_illegal() && index_op->type() == T_INT) { 2075 LIR_Opr tmp = new_pointer_register(); 2076 __ convert(Bytecodes::_i2l, index_op, tmp); 2077 index_op = tmp; 2078 } 2079#endif 2080 2081 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); 2082 __ move(value.result(), addr); 2083} 2084 2085 2086void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) { 2087 BasicType type = x->basic_type(); 2088 LIRItem src(x->object(), this); 2089 LIRItem off(x->offset(), this); 2090 2091 off.load_item(); 2092 src.load_item(); 2093 2094 LIR_Opr reg = reg = rlock_result(x, x->basic_type()); 2095 2096 get_Object_unsafe(reg, src.result(), off.result(), type, x->is_volatile()); 2097 if (x->is_volatile() && os::is_MP()) __ membar_acquire(); 2098} 2099 2100 2101void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) { 2102 BasicType type = x->basic_type(); 2103 LIRItem src(x->object(), this); 2104 LIRItem off(x->offset(), this); 2105 LIRItem data(x->value(), this); 2106 2107 src.load_item(); 2108 if (type == T_BOOLEAN || type == T_BYTE) { 2109 data.load_byte_item(); 2110 } else { 2111 data.load_item(); 2112 } 2113 off.load_item(); 2114 2115 set_no_result(x); 2116 2117 if (x->is_volatile() && os::is_MP()) __ membar_release(); 2118 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); 2119 if (x->is_volatile() && os::is_MP()) __ membar(); 2120} 2121 2122 2123void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) { 2124 LIRItem src(x->object(), this); 2125 LIRItem off(x->offset(), this); 2126 2127 src.load_item(); 2128 if (off.is_constant() && can_inline_as_constant(x->offset())) { 2129 // let it be a constant 2130 off.dont_load_item(); 2131 } else { 2132 off.load_item(); 2133 } 2134 2135 set_no_result(x); 2136 2137 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE); 2138 __ prefetch(addr, is_store); 2139} 2140 2141 2142void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) { 2143 do_UnsafePrefetch(x, false); 2144} 2145 2146 2147void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { 2148 do_UnsafePrefetch(x, true); 2149} 2150 2151 2152void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) { 2153 int lng = x->length(); 2154 2155 for (int i = 0; i < lng; i++) { 2156 SwitchRange* one_range = x->at(i); 2157 int low_key = one_range->low_key(); 2158 int high_key = one_range->high_key(); 2159 BlockBegin* dest = one_range->sux(); 2160 if (low_key == high_key) { 2161 __ cmp(lir_cond_equal, value, low_key); 2162 __ branch(lir_cond_equal, T_INT, dest); 2163 } else if (high_key - low_key == 1) { 2164 __ cmp(lir_cond_equal, value, low_key); 2165 __ branch(lir_cond_equal, T_INT, dest); 2166 __ cmp(lir_cond_equal, value, high_key); 2167 __ branch(lir_cond_equal, T_INT, dest); 2168 } else { 2169 LabelObj* L = new LabelObj(); 2170 __ cmp(lir_cond_less, value, low_key); 2171 __ branch(lir_cond_less, L->label()); 2172 __ cmp(lir_cond_lessEqual, value, high_key); 2173 __ branch(lir_cond_lessEqual, T_INT, dest); 2174 __ branch_destination(L->label()); 2175 } 2176 } 2177 __ jump(default_sux); 2178} 2179 2180 2181SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) { 2182 SwitchRangeList* res = new SwitchRangeList(); 2183 int len = x->length(); 2184 if (len > 0) { 2185 BlockBegin* sux = x->sux_at(0); 2186 int key = x->lo_key(); 2187 BlockBegin* default_sux = x->default_sux(); 2188 SwitchRange* range = new SwitchRange(key, sux); 2189 for (int i = 0; i < len; i++, key++) { 2190 BlockBegin* new_sux = x->sux_at(i); 2191 if (sux == new_sux) { 2192 // still in same range 2193 range->set_high_key(key); 2194 } else { 2195 // skip tests which explicitly dispatch to the default 2196 if (sux != default_sux) { 2197 res->append(range); 2198 } 2199 range = new SwitchRange(key, new_sux); 2200 } 2201 sux = new_sux; 2202 } 2203 if (res->length() == 0 || res->last() != range) res->append(range); 2204 } 2205 return res; 2206} 2207 2208 2209// we expect the keys to be sorted by increasing value 2210SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) { 2211 SwitchRangeList* res = new SwitchRangeList(); 2212 int len = x->length(); 2213 if (len > 0) { 2214 BlockBegin* default_sux = x->default_sux(); 2215 int key = x->key_at(0); 2216 BlockBegin* sux = x->sux_at(0); 2217 SwitchRange* range = new SwitchRange(key, sux); 2218 for (int i = 1; i < len; i++) { 2219 int new_key = x->key_at(i); 2220 BlockBegin* new_sux = x->sux_at(i); 2221 if (key+1 == new_key && sux == new_sux) { 2222 // still in same range 2223 range->set_high_key(new_key); 2224 } else { 2225 // skip tests which explicitly dispatch to the default 2226 if (range->sux() != default_sux) { 2227 res->append(range); 2228 } 2229 range = new SwitchRange(new_key, new_sux); 2230 } 2231 key = new_key; 2232 sux = new_sux; 2233 } 2234 if (res->length() == 0 || res->last() != range) res->append(range); 2235 } 2236 return res; 2237} 2238 2239 2240void LIRGenerator::do_TableSwitch(TableSwitch* x) { 2241 LIRItem tag(x->tag(), this); 2242 tag.load_item(); 2243 set_no_result(x); 2244 2245 if (x->is_safepoint()) { 2246 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2247 } 2248 2249 // move values into phi locations 2250 move_to_phi(x->state()); 2251 2252 int lo_key = x->lo_key(); 2253 int hi_key = x->hi_key(); 2254 int len = x->length(); 2255 LIR_Opr value = tag.result(); 2256 if (UseTableRanges) { 2257 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2258 } else { 2259 for (int i = 0; i < len; i++) { 2260 __ cmp(lir_cond_equal, value, i + lo_key); 2261 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2262 } 2263 __ jump(x->default_sux()); 2264 } 2265} 2266 2267 2268void LIRGenerator::do_LookupSwitch(LookupSwitch* x) { 2269 LIRItem tag(x->tag(), this); 2270 tag.load_item(); 2271 set_no_result(x); 2272 2273 if (x->is_safepoint()) { 2274 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before())); 2275 } 2276 2277 // move values into phi locations 2278 move_to_phi(x->state()); 2279 2280 LIR_Opr value = tag.result(); 2281 if (UseTableRanges) { 2282 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux()); 2283 } else { 2284 int len = x->length(); 2285 for (int i = 0; i < len; i++) { 2286 __ cmp(lir_cond_equal, value, x->key_at(i)); 2287 __ branch(lir_cond_equal, T_INT, x->sux_at(i)); 2288 } 2289 __ jump(x->default_sux()); 2290 } 2291} 2292 2293 2294void LIRGenerator::do_Goto(Goto* x) { 2295 set_no_result(x); 2296 2297 if (block()->next()->as_OsrEntry()) { 2298 // need to free up storage used for OSR entry point 2299 LIR_Opr osrBuffer = block()->next()->operand(); 2300 BasicTypeList signature; 2301 signature.append(T_INT); 2302 CallingConvention* cc = frame_map()->c_calling_convention(&signature); 2303 __ move(osrBuffer, cc->args()->at(0)); 2304 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end), 2305 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args()); 2306 } 2307 2308 if (x->is_safepoint()) { 2309 ValueStack* state = x->state_before() ? x->state_before() : x->state(); 2310 2311 // increment backedge counter if needed 2312 CodeEmitInfo* info = state_for(x, state); 2313 increment_backedge_counter(info, info->stack()->bci()); 2314 CodeEmitInfo* safepoint_info = state_for(x, state); 2315 __ safepoint(safepoint_poll_register(), safepoint_info); 2316 } 2317 2318 // Gotos can be folded Ifs, handle this case. 2319 if (x->should_profile()) { 2320 ciMethod* method = x->profiled_method(); 2321 assert(method != NULL, "method should be set if branch is profiled"); 2322 ciMethodData* md = method->method_data_or_null(); 2323 assert(md != NULL, "Sanity"); 2324 ciProfileData* data = md->bci_to_data(x->profiled_bci()); 2325 assert(data != NULL, "must have profiling data"); 2326 int offset; 2327 if (x->direction() == Goto::taken) { 2328 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2329 offset = md->byte_offset_of_slot(data, BranchData::taken_offset()); 2330 } else if (x->direction() == Goto::not_taken) { 2331 assert(data->is_BranchData(), "need BranchData for two-way branches"); 2332 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset()); 2333 } else { 2334 assert(data->is_JumpData(), "need JumpData for branches"); 2335 offset = md->byte_offset_of_slot(data, JumpData::taken_offset()); 2336 } 2337 LIR_Opr md_reg = new_register(T_OBJECT); 2338 __ oop2reg(md->constant_encoding(), md_reg); 2339 2340 increment_counter(new LIR_Address(md_reg, offset, 2341 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment); 2342 } 2343 2344 // emit phi-instruction move after safepoint since this simplifies 2345 // describing the state as the safepoint. 2346 move_to_phi(x->state()); 2347 2348 __ jump(x->default_sux()); 2349} 2350 2351 2352void LIRGenerator::do_Base(Base* x) { 2353 __ std_entry(LIR_OprFact::illegalOpr); 2354 // Emit moves from physical registers / stack slots to virtual registers 2355 CallingConvention* args = compilation()->frame_map()->incoming_arguments(); 2356 IRScope* irScope = compilation()->hir()->top_scope(); 2357 int java_index = 0; 2358 for (int i = 0; i < args->length(); i++) { 2359 LIR_Opr src = args->at(i); 2360 assert(!src->is_illegal(), "check"); 2361 BasicType t = src->type(); 2362 2363 // Types which are smaller than int are passed as int, so 2364 // correct the type which passed. 2365 switch (t) { 2366 case T_BYTE: 2367 case T_BOOLEAN: 2368 case T_SHORT: 2369 case T_CHAR: 2370 t = T_INT; 2371 break; 2372 } 2373 2374 LIR_Opr dest = new_register(t); 2375 __ move(src, dest); 2376 2377 // Assign new location to Local instruction for this local 2378 Local* local = x->state()->local_at(java_index)->as_Local(); 2379 assert(local != NULL, "Locals for incoming arguments must have been created"); 2380#ifndef __SOFTFP__ 2381 // The java calling convention passes double as long and float as int. 2382 assert(as_ValueType(t)->tag() == local->type()->tag(), "check"); 2383#endif // __SOFTFP__ 2384 local->set_operand(dest); 2385 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL); 2386 java_index += type2size[t]; 2387 } 2388 2389 if (compilation()->env()->dtrace_method_probes()) { 2390 BasicTypeList signature; 2391 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 2392 signature.append(T_OBJECT); // methodOop 2393 LIR_OprList* args = new LIR_OprList(); 2394 args->append(getThreadPointer()); 2395 LIR_Opr meth = new_register(T_OBJECT); 2396 __ oop2reg(method()->constant_encoding(), meth); 2397 args->append(meth); 2398 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL); 2399 } 2400 2401 if (method()->is_synchronized()) { 2402 LIR_Opr obj; 2403 if (method()->is_static()) { 2404 obj = new_register(T_OBJECT); 2405 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj); 2406 } else { 2407 Local* receiver = x->state()->local_at(0)->as_Local(); 2408 assert(receiver != NULL, "must already exist"); 2409 obj = receiver->operand(); 2410 } 2411 assert(obj->is_valid(), "must be valid"); 2412 2413 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2414 LIR_Opr lock = new_register(T_INT); 2415 __ load_stack_address_monitor(0, lock); 2416 2417 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); 2418 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2419 2420 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2421 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2422 } 2423 } 2424 2425 // increment invocation counters if needed 2426 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 2427 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL); 2428 increment_invocation_counter(info); 2429 } 2430 2431 // all blocks with a successor must end with an unconditional jump 2432 // to the successor even if they are consecutive 2433 __ jump(x->default_sux()); 2434} 2435 2436 2437void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2438 // construct our frame and model the production of incoming pointer 2439 // to the OSR buffer. 2440 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2441 LIR_Opr result = rlock_result(x); 2442 __ move(LIR_Assembler::osrBufferPointer(), result); 2443} 2444 2445 2446void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2447 int i = (x->has_receiver() || x->is_invokedynamic()) ? 1 : 0; 2448 for (; i < args->length(); i++) { 2449 LIRItem* param = args->at(i); 2450 LIR_Opr loc = arg_list->at(i); 2451 if (loc->is_register()) { 2452 param->load_item_force(loc); 2453 } else { 2454 LIR_Address* addr = loc->as_address_ptr(); 2455 param->load_for_store(addr->type()); 2456 if (addr->type() == T_OBJECT) { 2457 __ move_wide(param->result(), addr); 2458 } else 2459 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2460 __ unaligned_move(param->result(), addr); 2461 } else { 2462 __ move(param->result(), addr); 2463 } 2464 } 2465 } 2466 2467 if (x->has_receiver()) { 2468 LIRItem* receiver = args->at(0); 2469 LIR_Opr loc = arg_list->at(0); 2470 if (loc->is_register()) { 2471 receiver->load_item_force(loc); 2472 } else { 2473 assert(loc->is_address(), "just checking"); 2474 receiver->load_for_store(T_OBJECT); 2475 __ move_wide(receiver->result(), loc->as_address_ptr()); 2476 } 2477 } 2478} 2479 2480 2481// Visits all arguments, returns appropriate items without loading them 2482LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) { 2483 LIRItemList* argument_items = new LIRItemList(); 2484 if (x->has_receiver()) { 2485 LIRItem* receiver = new LIRItem(x->receiver(), this); 2486 argument_items->append(receiver); 2487 } 2488 if (x->is_invokedynamic()) { 2489 // Insert a dummy for the synthetic MethodHandle argument. 2490 argument_items->append(NULL); 2491 } 2492 int idx = x->has_receiver() ? 1 : 0; 2493 for (int i = 0; i < x->number_of_arguments(); i++) { 2494 LIRItem* param = new LIRItem(x->argument_at(i), this); 2495 argument_items->append(param); 2496 idx += (param->type()->is_double_word() ? 2 : 1); 2497 } 2498 return argument_items; 2499} 2500 2501 2502// The invoke with receiver has following phases: 2503// a) traverse and load/lock receiver; 2504// b) traverse all arguments -> item-array (invoke_visit_argument) 2505// c) push receiver on stack 2506// d) load each of the items and push on stack 2507// e) unlock receiver 2508// f) move receiver into receiver-register %o0 2509// g) lock result registers and emit call operation 2510// 2511// Before issuing a call, we must spill-save all values on stack 2512// that are in caller-save register. "spill-save" moves thos registers 2513// either in a free callee-save register or spills them if no free 2514// callee save register is available. 2515// 2516// The problem is where to invoke spill-save. 2517// - if invoked between e) and f), we may lock callee save 2518// register in "spill-save" that destroys the receiver register 2519// before f) is executed 2520// - if we rearange the f) to be earlier, by loading %o0, it 2521// may destroy a value on the stack that is currently in %o0 2522// and is waiting to be spilled 2523// - if we keep the receiver locked while doing spill-save, 2524// we cannot spill it as it is spill-locked 2525// 2526void LIRGenerator::do_Invoke(Invoke* x) { 2527 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true); 2528 2529 LIR_OprList* arg_list = cc->args(); 2530 LIRItemList* args = invoke_visit_arguments(x); 2531 LIR_Opr receiver = LIR_OprFact::illegalOpr; 2532 2533 // setup result register 2534 LIR_Opr result_register = LIR_OprFact::illegalOpr; 2535 if (x->type() != voidType) { 2536 result_register = result_register_for(x->type()); 2537 } 2538 2539 CodeEmitInfo* info = state_for(x, x->state()); 2540 2541 // invokedynamics can deoptimize. 2542 CodeEmitInfo* deopt_info = x->is_invokedynamic() ? state_for(x, x->state_before()) : NULL; 2543 2544 invoke_load_arguments(x, args, arg_list); 2545 2546 if (x->has_receiver()) { 2547 args->at(0)->load_item_force(LIR_Assembler::receiverOpr()); 2548 receiver = args->at(0)->result(); 2549 } 2550 2551 // emit invoke code 2552 bool optimized = x->target_is_loaded() && x->target_is_final(); 2553 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match"); 2554 2555 // JSR 292 2556 // Preserve the SP over MethodHandle call sites. 2557 ciMethod* target = x->target(); 2558 if (target->is_method_handle_invoke()) { 2559 info->set_is_method_handle_invoke(true); 2560 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr()); 2561 } 2562 2563 switch (x->code()) { 2564 case Bytecodes::_invokestatic: 2565 __ call_static(target, result_register, 2566 SharedRuntime::get_resolve_static_call_stub(), 2567 arg_list, info); 2568 break; 2569 case Bytecodes::_invokespecial: 2570 case Bytecodes::_invokevirtual: 2571 case Bytecodes::_invokeinterface: 2572 // for final target we still produce an inline cache, in order 2573 // to be able to call mixed mode 2574 if (x->code() == Bytecodes::_invokespecial || optimized) { 2575 __ call_opt_virtual(target, receiver, result_register, 2576 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2577 arg_list, info); 2578 } else if (x->vtable_index() < 0) { 2579 __ call_icvirtual(target, receiver, result_register, 2580 SharedRuntime::get_resolve_virtual_call_stub(), 2581 arg_list, info); 2582 } else { 2583 int entry_offset = instanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size(); 2584 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes(); 2585 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info); 2586 } 2587 break; 2588 case Bytecodes::_invokedynamic: { 2589 ciBytecodeStream bcs(x->scope()->method()); 2590 bcs.force_bci(x->state()->bci()); 2591 assert(bcs.cur_bc() == Bytecodes::_invokedynamic, "wrong stream"); 2592 ciCPCache* cpcache = bcs.get_cpcache(); 2593 2594 // Get CallSite offset from constant pool cache pointer. 2595 int index = bcs.get_method_index(); 2596 size_t call_site_offset = cpcache->get_f1_offset(index); 2597 2598 // If this invokedynamic call site hasn't been executed yet in 2599 // the interpreter, the CallSite object in the constant pool 2600 // cache is still null and we need to deoptimize. 2601 if (cpcache->is_f1_null_at(index)) { 2602 // Cannot re-use same xhandlers for multiple CodeEmitInfos, so 2603 // clone all handlers. This is handled transparently in other 2604 // places by the CodeEmitInfo cloning logic but is handled 2605 // specially here because a stub isn't being used. 2606 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 2607 2608 DeoptimizeStub* deopt_stub = new DeoptimizeStub(deopt_info); 2609 __ jump(deopt_stub); 2610 } 2611 2612 // Use the receiver register for the synthetic MethodHandle 2613 // argument. 2614 receiver = LIR_Assembler::receiverOpr(); 2615 LIR_Opr tmp = new_register(objectType); 2616 2617 // Load CallSite object from constant pool cache. 2618 __ oop2reg(cpcache->constant_encoding(), tmp); 2619 __ load(new LIR_Address(tmp, call_site_offset, T_OBJECT), tmp); 2620 2621 // Load target MethodHandle from CallSite object. 2622 __ load(new LIR_Address(tmp, java_lang_invoke_CallSite::target_offset_in_bytes(), T_OBJECT), receiver); 2623 2624 __ call_dynamic(target, receiver, result_register, 2625 SharedRuntime::get_resolve_opt_virtual_call_stub(), 2626 arg_list, info); 2627 break; 2628 } 2629 default: 2630 ShouldNotReachHere(); 2631 break; 2632 } 2633 2634 // JSR 292 2635 // Restore the SP after MethodHandle call sites. 2636 if (target->is_method_handle_invoke()) { 2637 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer()); 2638 } 2639 2640 if (x->type()->is_float() || x->type()->is_double()) { 2641 // Force rounding of results from non-strictfp when in strictfp 2642 // scope (or when we don't know the strictness of the callee, to 2643 // be safe.) 2644 if (method()->is_strict()) { 2645 if (!x->target_is_loaded() || !x->target_is_strictfp()) { 2646 result_register = round_item(result_register); 2647 } 2648 } 2649 } 2650 2651 if (result_register->is_valid()) { 2652 LIR_Opr result = rlock_result(x); 2653 __ move(result_register, result); 2654 } 2655} 2656 2657 2658void LIRGenerator::do_FPIntrinsics(Intrinsic* x) { 2659 assert(x->number_of_arguments() == 1, "wrong type"); 2660 LIRItem value (x->argument_at(0), this); 2661 LIR_Opr reg = rlock_result(x); 2662 value.load_item(); 2663 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type())); 2664 __ move(tmp, reg); 2665} 2666 2667 2668 2669// Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval() 2670void LIRGenerator::do_IfOp(IfOp* x) { 2671#ifdef ASSERT 2672 { 2673 ValueTag xtag = x->x()->type()->tag(); 2674 ValueTag ttag = x->tval()->type()->tag(); 2675 assert(xtag == intTag || xtag == objectTag, "cannot handle others"); 2676 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others"); 2677 assert(ttag == x->fval()->type()->tag(), "cannot handle others"); 2678 } 2679#endif 2680 2681 LIRItem left(x->x(), this); 2682 LIRItem right(x->y(), this); 2683 left.load_item(); 2684 if (can_inline_as_constant(right.value())) { 2685 right.dont_load_item(); 2686 } else { 2687 right.load_item(); 2688 } 2689 2690 LIRItem t_val(x->tval(), this); 2691 LIRItem f_val(x->fval(), this); 2692 t_val.dont_load_item(); 2693 f_val.dont_load_item(); 2694 LIR_Opr reg = rlock_result(x); 2695 2696 __ cmp(lir_cond(x->cond()), left.result(), right.result()); 2697 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type())); 2698} 2699 2700 2701void LIRGenerator::do_Intrinsic(Intrinsic* x) { 2702 switch (x->id()) { 2703 case vmIntrinsics::_intBitsToFloat : 2704 case vmIntrinsics::_doubleToRawLongBits : 2705 case vmIntrinsics::_longBitsToDouble : 2706 case vmIntrinsics::_floatToRawIntBits : { 2707 do_FPIntrinsics(x); 2708 break; 2709 } 2710 2711 case vmIntrinsics::_currentTimeMillis: { 2712 assert(x->number_of_arguments() == 0, "wrong type"); 2713 LIR_Opr reg = result_register_for(x->type()); 2714 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeMillis), getThreadTemp(), 2715 reg, new LIR_OprList()); 2716 LIR_Opr result = rlock_result(x); 2717 __ move(reg, result); 2718 break; 2719 } 2720 2721 case vmIntrinsics::_nanoTime: { 2722 assert(x->number_of_arguments() == 0, "wrong type"); 2723 LIR_Opr reg = result_register_for(x->type()); 2724 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, os::javaTimeNanos), getThreadTemp(), 2725 reg, new LIR_OprList()); 2726 LIR_Opr result = rlock_result(x); 2727 __ move(reg, result); 2728 break; 2729 } 2730 2731 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break; 2732 case vmIntrinsics::_getClass: do_getClass(x); break; 2733 case vmIntrinsics::_currentThread: do_currentThread(x); break; 2734 2735 case vmIntrinsics::_dlog: // fall through 2736 case vmIntrinsics::_dlog10: // fall through 2737 case vmIntrinsics::_dabs: // fall through 2738 case vmIntrinsics::_dsqrt: // fall through 2739 case vmIntrinsics::_dtan: // fall through 2740 case vmIntrinsics::_dsin : // fall through 2741 case vmIntrinsics::_dcos : do_MathIntrinsic(x); break; 2742 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break; 2743 2744 // java.nio.Buffer.checkIndex 2745 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break; 2746 2747 case vmIntrinsics::_compareAndSwapObject: 2748 do_CompareAndSwap(x, objectType); 2749 break; 2750 case vmIntrinsics::_compareAndSwapInt: 2751 do_CompareAndSwap(x, intType); 2752 break; 2753 case vmIntrinsics::_compareAndSwapLong: 2754 do_CompareAndSwap(x, longType); 2755 break; 2756 2757 // sun.misc.AtomicLongCSImpl.attemptUpdate 2758 case vmIntrinsics::_attemptUpdate: 2759 do_AttemptUpdate(x); 2760 break; 2761 2762 default: ShouldNotReachHere(); break; 2763 } 2764} 2765 2766void LIRGenerator::do_ProfileCall(ProfileCall* x) { 2767 // Need recv in a temporary register so it interferes with the other temporaries 2768 LIR_Opr recv = LIR_OprFact::illegalOpr; 2769 LIR_Opr mdo = new_register(T_OBJECT); 2770 // tmp is used to hold the counters on SPARC 2771 LIR_Opr tmp = new_pointer_register(); 2772 if (x->recv() != NULL) { 2773 LIRItem value(x->recv(), this); 2774 value.load_item(); 2775 recv = new_register(T_OBJECT); 2776 __ move(value.result(), recv); 2777 } 2778 __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder()); 2779} 2780 2781void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) { 2782 // We can safely ignore accessors here, since c2 will inline them anyway, 2783 // accessors are also always mature. 2784 if (!x->inlinee()->is_accessor()) { 2785 CodeEmitInfo* info = state_for(x, x->state(), true); 2786 // Increment invocation counter, don't notify the runtime, because we don't inline loops, 2787 increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false); 2788 } 2789} 2790 2791void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) { 2792 int freq_log; 2793 int level = compilation()->env()->comp_level(); 2794 if (level == CompLevel_limited_profile) { 2795 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); 2796 } else if (level == CompLevel_full_profile) { 2797 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); 2798 } else { 2799 ShouldNotReachHere(); 2800 } 2801 // Increment the appropriate invocation/backedge counter and notify the runtime. 2802 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); 2803} 2804 2805void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, 2806 ciMethod *method, int frequency, 2807 int bci, bool backedge, bool notify) { 2808 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); 2809 int level = _compilation->env()->comp_level(); 2810 assert(level > CompLevel_simple, "Shouldn't be here"); 2811 2812 int offset = -1; 2813 LIR_Opr counter_holder = new_register(T_OBJECT); 2814 LIR_Opr meth; 2815 if (level == CompLevel_limited_profile) { 2816 offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() : 2817 methodOopDesc::invocation_counter_offset()); 2818 __ oop2reg(method->constant_encoding(), counter_holder); 2819 meth = counter_holder; 2820 } else if (level == CompLevel_full_profile) { 2821 offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() : 2822 methodDataOopDesc::invocation_counter_offset()); 2823 ciMethodData* md = method->method_data_or_null(); 2824 assert(md != NULL, "Sanity"); 2825 __ oop2reg(md->constant_encoding(), counter_holder); 2826 meth = new_register(T_OBJECT); 2827 __ oop2reg(method->constant_encoding(), meth); 2828 } else { 2829 ShouldNotReachHere(); 2830 } 2831 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT); 2832 LIR_Opr result = new_register(T_INT); 2833 __ load(counter, result); 2834 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result); 2835 __ store(result, counter); 2836 if (notify) { 2837 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT); 2838 __ logical_and(result, mask, result); 2839 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0)); 2840 // The bci for info can point to cmp for if's we want the if bci 2841 CodeStub* overflow = new CounterOverflowStub(info, bci, meth); 2842 __ branch(lir_cond_equal, T_INT, overflow); 2843 __ branch_destination(overflow->continuation()); 2844 } 2845} 2846 2847void LIRGenerator::do_RuntimeCall(RuntimeCall* x) { 2848 LIR_OprList* args = new LIR_OprList(x->number_of_arguments()); 2849 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments()); 2850 2851 if (x->pass_thread()) { 2852 signature->append(T_ADDRESS); 2853 args->append(getThreadPointer()); 2854 } 2855 2856 for (int i = 0; i < x->number_of_arguments(); i++) { 2857 Value a = x->argument_at(i); 2858 LIRItem* item = new LIRItem(a, this); 2859 item->load_item(); 2860 args->append(item->result()); 2861 signature->append(as_BasicType(a->type())); 2862 } 2863 2864 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL); 2865 if (x->type() == voidType) { 2866 set_no_result(x); 2867 } else { 2868 __ move(result, rlock_result(x)); 2869 } 2870} 2871 2872LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) { 2873 LIRItemList args(1); 2874 LIRItem value(arg1, this); 2875 args.append(&value); 2876 BasicTypeList signature; 2877 signature.append(as_BasicType(arg1->type())); 2878 2879 return call_runtime(&signature, &args, entry, result_type, info); 2880} 2881 2882 2883LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) { 2884 LIRItemList args(2); 2885 LIRItem value1(arg1, this); 2886 LIRItem value2(arg2, this); 2887 args.append(&value1); 2888 args.append(&value2); 2889 BasicTypeList signature; 2890 signature.append(as_BasicType(arg1->type())); 2891 signature.append(as_BasicType(arg2->type())); 2892 2893 return call_runtime(&signature, &args, entry, result_type, info); 2894} 2895 2896 2897LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args, 2898 address entry, ValueType* result_type, CodeEmitInfo* info) { 2899 // get a result register 2900 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2901 LIR_Opr result = LIR_OprFact::illegalOpr; 2902 if (result_type->tag() != voidTag) { 2903 result = new_register(result_type); 2904 phys_reg = result_register_for(result_type); 2905 } 2906 2907 // move the arguments into the correct location 2908 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2909 assert(cc->length() == args->length(), "argument mismatch"); 2910 for (int i = 0; i < args->length(); i++) { 2911 LIR_Opr arg = args->at(i); 2912 LIR_Opr loc = cc->at(i); 2913 if (loc->is_register()) { 2914 __ move(arg, loc); 2915 } else { 2916 LIR_Address* addr = loc->as_address_ptr(); 2917// if (!can_store_as_constant(arg)) { 2918// LIR_Opr tmp = new_register(arg->type()); 2919// __ move(arg, tmp); 2920// arg = tmp; 2921// } 2922 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2923 __ unaligned_move(arg, addr); 2924 } else { 2925 __ move(arg, addr); 2926 } 2927 } 2928 } 2929 2930 if (info) { 2931 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2932 } else { 2933 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2934 } 2935 if (result->is_valid()) { 2936 __ move(phys_reg, result); 2937 } 2938 return result; 2939} 2940 2941 2942LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args, 2943 address entry, ValueType* result_type, CodeEmitInfo* info) { 2944 // get a result register 2945 LIR_Opr phys_reg = LIR_OprFact::illegalOpr; 2946 LIR_Opr result = LIR_OprFact::illegalOpr; 2947 if (result_type->tag() != voidTag) { 2948 result = new_register(result_type); 2949 phys_reg = result_register_for(result_type); 2950 } 2951 2952 // move the arguments into the correct location 2953 CallingConvention* cc = frame_map()->c_calling_convention(signature); 2954 2955 assert(cc->length() == args->length(), "argument mismatch"); 2956 for (int i = 0; i < args->length(); i++) { 2957 LIRItem* arg = args->at(i); 2958 LIR_Opr loc = cc->at(i); 2959 if (loc->is_register()) { 2960 arg->load_item_force(loc); 2961 } else { 2962 LIR_Address* addr = loc->as_address_ptr(); 2963 arg->load_for_store(addr->type()); 2964 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 2965 __ unaligned_move(arg->result(), addr); 2966 } else { 2967 __ move(arg->result(), addr); 2968 } 2969 } 2970 } 2971 2972 if (info) { 2973 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info); 2974 } else { 2975 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args()); 2976 } 2977 if (result->is_valid()) { 2978 __ move(phys_reg, result); 2979 } 2980 return result; 2981} 2982