1/* 2 * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2012, 2016 SAP SE. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26#include "precompiled.hpp" 27#include "c1/c1_Compilation.hpp" 28#include "c1/c1_LIRAssembler.hpp" 29#include "c1/c1_MacroAssembler.hpp" 30#include "c1/c1_Runtime1.hpp" 31#include "c1/c1_ValueStack.hpp" 32#include "ci/ciArrayKlass.hpp" 33#include "ci/ciInstance.hpp" 34#include "gc/shared/collectedHeap.hpp" 35#include "gc/shared/barrierSet.hpp" 36#include "gc/shared/cardTableModRefBS.hpp" 37#include "nativeInst_ppc.hpp" 38#include "oops/objArrayKlass.hpp" 39#include "runtime/sharedRuntime.hpp" 40 41#define __ _masm-> 42 43 44const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5; 45 46 47bool LIR_Assembler::is_small_constant(LIR_Opr opr) { 48 Unimplemented(); return false; // Currently not used on this platform. 49} 50 51 52LIR_Opr LIR_Assembler::receiverOpr() { 53 return FrameMap::R3_oop_opr; 54} 55 56 57LIR_Opr LIR_Assembler::osrBufferPointer() { 58 return FrameMap::R3_opr; 59} 60 61 62// This specifies the stack pointer decrement needed to build the frame. 63int LIR_Assembler::initial_frame_size_in_bytes() const { 64 return in_bytes(frame_map()->framesize_in_bytes()); 65} 66 67 68// Inline cache check: the inline cached class is in inline_cache_reg; 69// we fetch the class of the receiver and compare it with the cached class. 70// If they do not match we jump to slow case. 71int LIR_Assembler::check_icache() { 72 int offset = __ offset(); 73 __ inline_cache_check(R3_ARG1, R19_inline_cache_reg); 74 return offset; 75} 76 77 78void LIR_Assembler::osr_entry() { 79 // On-stack-replacement entry sequence: 80 // 81 // 1. Create a new compiled activation. 82 // 2. Initialize local variables in the compiled activation. The expression 83 // stack must be empty at the osr_bci; it is not initialized. 84 // 3. Jump to the continuation address in compiled code to resume execution. 85 86 // OSR entry point 87 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset()); 88 BlockBegin* osr_entry = compilation()->hir()->osr_entry(); 89 ValueStack* entry_state = osr_entry->end()->state(); 90 int number_of_locks = entry_state->locks_size(); 91 92 // Create a frame for the compiled activation. 93 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes()); 94 95 // OSR buffer is 96 // 97 // locals[nlocals-1..0] 98 // monitors[number_of_locks-1..0] 99 // 100 // Locals is a direct copy of the interpreter frame so in the osr buffer 101 // the first slot in the local array is the last local from the interpreter 102 // and the last slot is local[0] (receiver) from the interpreter. 103 // 104 // Similarly with locks. The first lock slot in the osr buffer is the nth lock 105 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock 106 // in the interpreter frame (the method lock if a sync method). 107 108 // Initialize monitors in the compiled activation. 109 // R3: pointer to osr buffer 110 // 111 // All other registers are dead at this point and the locals will be 112 // copied into place by code emitted in the IR. 113 114 Register OSR_buf = osrBufferPointer()->as_register(); 115 { assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below"); 116 int monitor_offset = BytesPerWord * method()->max_locals() + 117 (2 * BytesPerWord) * (number_of_locks - 1); 118 // SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in 119 // the OSR buffer using 2 word entries: first the lock and then 120 // the oop. 121 for (int i = 0; i < number_of_locks; i++) { 122 int slot_offset = monitor_offset - ((i * 2) * BytesPerWord); 123#ifdef ASSERT 124 // Verify the interpreter's monitor has a non-null object. 125 { 126 Label L; 127 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 128 __ cmpdi(CCR0, R0, 0); 129 __ bne(CCR0, L); 130 __ stop("locked object is NULL"); 131 __ bind(L); 132 } 133#endif // ASSERT 134 // Copy the lock field into the compiled activation. 135 Address ml = frame_map()->address_for_monitor_lock(i), 136 mo = frame_map()->address_for_monitor_object(i); 137 assert(ml.index() == noreg && mo.index() == noreg, "sanity"); 138 __ ld(R0, slot_offset + 0, OSR_buf); 139 __ std(R0, ml.disp(), ml.base()); 140 __ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf); 141 __ std(R0, mo.disp(), mo.base()); 142 } 143 } 144} 145 146 147int LIR_Assembler::emit_exception_handler() { 148 // If the last instruction is a call (typically to do a throw which 149 // is coming at the end after block reordering) the return address 150 // must still point into the code area in order to avoid assertion 151 // failures when searching for the corresponding bci => add a nop 152 // (was bug 5/14/1999 - gri). 153 __ nop(); 154 155 // Generate code for the exception handler. 156 address handler_base = __ start_a_stub(exception_handler_size()); 157 158 if (handler_base == NULL) { 159 // Not enough space left for the handler. 160 bailout("exception handler overflow"); 161 return -1; 162 } 163 164 int offset = code_offset(); 165 address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)); 166 //__ load_const_optimized(R0, entry_point); 167 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point)); 168 __ mtctr(R0); 169 __ bctr(); 170 171 guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); 172 __ end_a_stub(); 173 174 return offset; 175} 176 177 178// Emit the code to remove the frame from the stack in the exception 179// unwind path. 180int LIR_Assembler::emit_unwind_handler() { 181 _masm->block_comment("Unwind handler"); 182 183 int offset = code_offset(); 184 bool preserve_exception = method()->is_synchronized() || compilation()->env()->dtrace_method_probes(); 185 const Register Rexception = R3 /*LIRGenerator::exceptionOopOpr()*/, Rexception_save = R31; 186 187 // Fetch the exception from TLS and clear out exception related thread state. 188 __ ld(Rexception, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 189 __ li(R0, 0); 190 __ std(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread); 191 __ std(R0, in_bytes(JavaThread::exception_pc_offset()), R16_thread); 192 193 __ bind(_unwind_handler_entry); 194 __ verify_not_null_oop(Rexception); 195 if (preserve_exception) { __ mr(Rexception_save, Rexception); } 196 197 // Perform needed unlocking 198 MonitorExitStub* stub = NULL; 199 if (method()->is_synchronized()) { 200 monitor_address(0, FrameMap::R4_opr); 201 stub = new MonitorExitStub(FrameMap::R4_opr, true, 0); 202 __ unlock_object(R5, R6, R4, *stub->entry()); 203 __ bind(*stub->continuation()); 204 } 205 206 if (compilation()->env()->dtrace_method_probes()) { 207 Unimplemented(); 208 } 209 210 // Dispatch to the unwind logic. 211 address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id); 212 //__ load_const_optimized(R0, unwind_stub); 213 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub)); 214 if (preserve_exception) { __ mr(Rexception, Rexception_save); } 215 __ mtctr(R0); 216 __ bctr(); 217 218 // Emit the slow path assembly. 219 if (stub != NULL) { 220 stub->emit_code(this); 221 } 222 223 return offset; 224} 225 226 227int LIR_Assembler::emit_deopt_handler() { 228 // If the last instruction is a call (typically to do a throw which 229 // is coming at the end after block reordering) the return address 230 // must still point into the code area in order to avoid assertion 231 // failures when searching for the corresponding bci => add a nop 232 // (was bug 5/14/1999 - gri). 233 __ nop(); 234 235 // Generate code for deopt handler. 236 address handler_base = __ start_a_stub(deopt_handler_size()); 237 238 if (handler_base == NULL) { 239 // Not enough space left for the handler. 240 bailout("deopt handler overflow"); 241 return -1; 242 } 243 244 int offset = code_offset(); 245 __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); 246 247 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); 248 __ end_a_stub(); 249 250 return offset; 251} 252 253 254void LIR_Assembler::jobject2reg(jobject o, Register reg) { 255 if (o == NULL) { 256 __ li(reg, 0); 257 } else { 258 AddressLiteral addrlit = __ constant_oop_address(o); 259 __ load_const(reg, addrlit, (reg != R0) ? R0 : noreg); 260 } 261} 262 263 264void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) { 265 // Allocate a new index in table to hold the object once it's been patched. 266 int oop_index = __ oop_recorder()->allocate_oop_index(NULL); 267 PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index); 268 269 AddressLiteral addrlit((address)NULL, oop_Relocation::spec(oop_index)); 270 __ load_const(reg, addrlit, R0); 271 272 patching_epilog(patch, lir_patch_normal, reg, info); 273} 274 275 276void LIR_Assembler::metadata2reg(Metadata* o, Register reg) { 277 AddressLiteral md = __ constant_metadata_address(o); // Notify OOP recorder (don't need the relocation) 278 __ load_const_optimized(reg, md.value(), (reg != R0) ? R0 : noreg); 279} 280 281 282void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo *info) { 283 // Allocate a new index in table to hold the klass once it's been patched. 284 int index = __ oop_recorder()->allocate_metadata_index(NULL); 285 PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_klass_id, index); 286 287 AddressLiteral addrlit((address)NULL, metadata_Relocation::spec(index)); 288 assert(addrlit.rspec().type() == relocInfo::metadata_type, "must be an metadata reloc"); 289 __ load_const(reg, addrlit, R0); 290 291 patching_epilog(patch, lir_patch_normal, reg, info); 292} 293 294 295void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr temp, LIR_Opr result, CodeEmitInfo* info) { 296 const bool is_int = result->is_single_cpu(); 297 Register Rdividend = is_int ? left->as_register() : left->as_register_lo(); 298 Register Rdivisor = noreg; 299 Register Rscratch = temp->as_register(); 300 Register Rresult = is_int ? result->as_register() : result->as_register_lo(); 301 long divisor = -1; 302 303 if (right->is_register()) { 304 Rdivisor = is_int ? right->as_register() : right->as_register_lo(); 305 } else { 306 divisor = is_int ? right->as_constant_ptr()->as_jint() 307 : right->as_constant_ptr()->as_jlong(); 308 } 309 310 assert(Rdividend != Rscratch, ""); 311 assert(Rdivisor != Rscratch, ""); 312 assert(code == lir_idiv || code == lir_irem, "Must be irem or idiv"); 313 314 if (Rdivisor == noreg) { 315 if (divisor == 1) { // stupid, but can happen 316 if (code == lir_idiv) { 317 __ mr_if_needed(Rresult, Rdividend); 318 } else { 319 __ li(Rresult, 0); 320 } 321 322 } else if (is_power_of_2(divisor)) { 323 // Convert division by a power of two into some shifts and logical operations. 324 int log2 = log2_intptr(divisor); 325 326 // Round towards 0. 327 if (divisor == 2) { 328 if (is_int) { 329 __ srwi(Rscratch, Rdividend, 31); 330 } else { 331 __ srdi(Rscratch, Rdividend, 63); 332 } 333 } else { 334 if (is_int) { 335 __ srawi(Rscratch, Rdividend, 31); 336 } else { 337 __ sradi(Rscratch, Rdividend, 63); 338 } 339 __ clrldi(Rscratch, Rscratch, 64-log2); 340 } 341 __ add(Rscratch, Rdividend, Rscratch); 342 343 if (code == lir_idiv) { 344 if (is_int) { 345 __ srawi(Rresult, Rscratch, log2); 346 } else { 347 __ sradi(Rresult, Rscratch, log2); 348 } 349 } else { // lir_irem 350 __ clrrdi(Rscratch, Rscratch, log2); 351 __ sub(Rresult, Rdividend, Rscratch); 352 } 353 354 } else if (divisor == -1) { 355 if (code == lir_idiv) { 356 __ neg(Rresult, Rdividend); 357 } else { 358 __ li(Rresult, 0); 359 } 360 361 } else { 362 __ load_const_optimized(Rscratch, divisor); 363 if (code == lir_idiv) { 364 if (is_int) { 365 __ divw(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 366 } else { 367 __ divd(Rresult, Rdividend, Rscratch); // Can't divide minint/-1. 368 } 369 } else { 370 assert(Rscratch != R0, "need both"); 371 if (is_int) { 372 __ divw(R0, Rdividend, Rscratch); // Can't divide minint/-1. 373 __ mullw(Rscratch, R0, Rscratch); 374 } else { 375 __ divd(R0, Rdividend, Rscratch); // Can't divide minint/-1. 376 __ mulld(Rscratch, R0, Rscratch); 377 } 378 __ sub(Rresult, Rdividend, Rscratch); 379 } 380 381 } 382 return; 383 } 384 385 Label regular, done; 386 if (is_int) { 387 __ cmpwi(CCR0, Rdivisor, -1); 388 } else { 389 __ cmpdi(CCR0, Rdivisor, -1); 390 } 391 __ bne(CCR0, regular); 392 if (code == lir_idiv) { 393 __ neg(Rresult, Rdividend); 394 __ b(done); 395 __ bind(regular); 396 if (is_int) { 397 __ divw(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 398 } else { 399 __ divd(Rresult, Rdividend, Rdivisor); // Can't divide minint/-1. 400 } 401 } else { // lir_irem 402 __ li(Rresult, 0); 403 __ b(done); 404 __ bind(regular); 405 if (is_int) { 406 __ divw(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 407 __ mullw(Rscratch, Rscratch, Rdivisor); 408 } else { 409 __ divd(Rscratch, Rdividend, Rdivisor); // Can't divide minint/-1. 410 __ mulld(Rscratch, Rscratch, Rdivisor); 411 } 412 __ sub(Rresult, Rdividend, Rscratch); 413 } 414 __ bind(done); 415} 416 417 418void LIR_Assembler::emit_op3(LIR_Op3* op) { 419 switch (op->code()) { 420 case lir_idiv: 421 case lir_irem: 422 arithmetic_idiv(op->code(), op->in_opr1(), op->in_opr2(), op->in_opr3(), 423 op->result_opr(), op->info()); 424 break; 425 case lir_fmad: 426 __ fmadd(op->result_opr()->as_double_reg(), op->in_opr1()->as_double_reg(), 427 op->in_opr2()->as_double_reg(), op->in_opr3()->as_double_reg()); 428 break; 429 case lir_fmaf: 430 __ fmadds(op->result_opr()->as_float_reg(), op->in_opr1()->as_float_reg(), 431 op->in_opr2()->as_float_reg(), op->in_opr3()->as_float_reg()); 432 break; 433 default: ShouldNotReachHere(); break; 434 } 435} 436 437 438void LIR_Assembler::emit_opBranch(LIR_OpBranch* op) { 439#ifdef ASSERT 440 assert(op->block() == NULL || op->block()->label() == op->label(), "wrong label"); 441 if (op->block() != NULL) _branch_target_blocks.append(op->block()); 442 if (op->ublock() != NULL) _branch_target_blocks.append(op->ublock()); 443 assert(op->info() == NULL, "shouldn't have CodeEmitInfo"); 444#endif 445 446 Label *L = op->label(); 447 if (op->cond() == lir_cond_always) { 448 __ b(*L); 449 } else { 450 Label done; 451 bool is_unordered = false; 452 if (op->code() == lir_cond_float_branch) { 453 assert(op->ublock() != NULL, "must have unordered successor"); 454 is_unordered = true; 455 } else { 456 assert(op->code() == lir_branch, "just checking"); 457 } 458 459 bool positive = false; 460 Assembler::Condition cond = Assembler::equal; 461 switch (op->cond()) { 462 case lir_cond_equal: positive = true ; cond = Assembler::equal ; is_unordered = false; break; 463 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; is_unordered = false; break; 464 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 465 case lir_cond_belowEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 466 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 467 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 468 case lir_cond_aboveEqual: assert(op->code() != lir_cond_float_branch, ""); // fallthru 469 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 470 default: ShouldNotReachHere(); 471 } 472 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 473 int bi = Assembler::bi0(BOOL_RESULT, cond); 474 if (is_unordered) { 475 if (positive) { 476 if (op->ublock() == op->block()) { 477 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(BOOL_RESULT, Assembler::summary_overflow), *L); 478 } 479 } else { 480 if (op->ublock() != op->block()) { __ bso(BOOL_RESULT, done); } 481 } 482 } 483 __ bc_far_optimized(bo, bi, *L); 484 __ bind(done); 485 } 486} 487 488 489void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) { 490 Bytecodes::Code code = op->bytecode(); 491 LIR_Opr src = op->in_opr(), 492 dst = op->result_opr(); 493 494 switch(code) { 495 case Bytecodes::_i2l: { 496 __ extsw(dst->as_register_lo(), src->as_register()); 497 break; 498 } 499 case Bytecodes::_l2i: { 500 __ mr_if_needed(dst->as_register(), src->as_register_lo()); // high bits are garbage 501 break; 502 } 503 case Bytecodes::_i2b: { 504 __ extsb(dst->as_register(), src->as_register()); 505 break; 506 } 507 case Bytecodes::_i2c: { 508 __ clrldi(dst->as_register(), src->as_register(), 64-16); 509 break; 510 } 511 case Bytecodes::_i2s: { 512 __ extsh(dst->as_register(), src->as_register()); 513 break; 514 } 515 case Bytecodes::_i2d: 516 case Bytecodes::_l2d: { 517 __ fcfid(dst->as_double_reg(), src->as_double_reg()); // via mem 518 break; 519 } 520 case Bytecodes::_i2f: { 521 FloatRegister rdst = dst->as_float_reg(); 522 FloatRegister rsrc = src->as_double_reg(); // via mem 523 if (VM_Version::has_fcfids()) { 524 __ fcfids(rdst, rsrc); 525 } else { 526 __ fcfid(rdst, rsrc); 527 __ frsp(rdst, rdst); 528 } 529 break; 530 } 531 case Bytecodes::_l2f: { // >= Power7 532 assert(VM_Version::has_fcfids(), "fcfid+frsp needs fixup code to avoid rounding incompatibility"); 533 __ fcfids(dst->as_float_reg(), src->as_double_reg()); // via mem 534 break; 535 } 536 case Bytecodes::_f2d: { 537 __ fmr_if_needed(dst->as_double_reg(), src->as_float_reg()); 538 break; 539 } 540 case Bytecodes::_d2f: { 541 __ frsp(dst->as_float_reg(), src->as_double_reg()); 542 break; 543 } 544 case Bytecodes::_d2i: 545 case Bytecodes::_f2i: { 546 FloatRegister rsrc = (code == Bytecodes::_d2i) ? src->as_double_reg() : src->as_float_reg(); 547 Address addr = frame_map()->address_for_slot(dst->double_stack_ix()); 548 Label L; 549 // Result must be 0 if value is NaN; test by comparing value to itself. 550 __ fcmpu(CCR0, rsrc, rsrc); 551 __ li(R0, 0); // 0 in case of NAN 552 __ std(R0, addr.disp(), addr.base()); 553 __ bso(CCR0, L); 554 __ fctiwz(rsrc, rsrc); // USE_KILL 555 __ stfd(rsrc, addr.disp(), addr.base()); 556 __ bind(L); 557 break; 558 } 559 case Bytecodes::_d2l: 560 case Bytecodes::_f2l: { 561 FloatRegister rsrc = (code == Bytecodes::_d2l) ? src->as_double_reg() : src->as_float_reg(); 562 Address addr = frame_map()->address_for_slot(dst->double_stack_ix()); 563 Label L; 564 // Result must be 0 if value is NaN; test by comparing value to itself. 565 __ fcmpu(CCR0, rsrc, rsrc); 566 __ li(R0, 0); // 0 in case of NAN 567 __ std(R0, addr.disp(), addr.base()); 568 __ bso(CCR0, L); 569 __ fctidz(rsrc, rsrc); // USE_KILL 570 __ stfd(rsrc, addr.disp(), addr.base()); 571 __ bind(L); 572 break; 573 } 574 575 default: ShouldNotReachHere(); 576 } 577} 578 579 580void LIR_Assembler::align_call(LIR_Code) { 581 // do nothing since all instructions are word aligned on ppc 582} 583 584 585bool LIR_Assembler::emit_trampoline_stub_for_call(address target, Register Rtoc) { 586 int start_offset = __ offset(); 587 // Put the entry point as a constant into the constant pool. 588 const address entry_point_toc_addr = __ address_constant(target, RelocationHolder::none); 589 if (entry_point_toc_addr == NULL) { 590 bailout("const section overflow"); 591 return false; 592 } 593 const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr); 594 595 // Emit the trampoline stub which will be related to the branch-and-link below. 596 address stub = __ emit_trampoline_stub(entry_point_toc_offset, start_offset, Rtoc); 597 if (!stub) { 598 bailout("no space for trampoline stub"); 599 return false; 600 } 601 return true; 602} 603 604 605void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) { 606 assert(rtype==relocInfo::opt_virtual_call_type || rtype==relocInfo::static_call_type, "unexpected rtype"); 607 608 bool success = emit_trampoline_stub_for_call(op->addr()); 609 if (!success) { return; } 610 611 __ relocate(rtype); 612 // Note: At this point we do not have the address of the trampoline 613 // stub, and the entry point might be too far away for bl, so __ pc() 614 // serves as dummy and the bl will be patched later. 615 __ code()->set_insts_mark(); 616 __ bl(__ pc()); 617 add_call_info(code_offset(), op->info()); 618} 619 620 621void LIR_Assembler::ic_call(LIR_OpJavaCall* op) { 622 __ calculate_address_from_global_toc(R2_TOC, __ method_toc()); 623 624 // Virtual call relocation will point to ic load. 625 address virtual_call_meta_addr = __ pc(); 626 // Load a clear inline cache. 627 AddressLiteral empty_ic((address) Universe::non_oop_word()); 628 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, empty_ic, R2_TOC); 629 if (!success) { 630 bailout("const section overflow"); 631 return; 632 } 633 // Call to fixup routine. Fixup routine uses ScopeDesc info 634 // to determine who we intended to call. 635 __ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr)); 636 637 success = emit_trampoline_stub_for_call(op->addr(), R2_TOC); 638 if (!success) { return; } 639 640 // Note: At this point we do not have the address of the trampoline 641 // stub, and the entry point might be too far away for bl, so __ pc() 642 // serves as dummy and the bl will be patched later. 643 __ bl(__ pc()); 644 add_call_info(code_offset(), op->info()); 645} 646 647 648void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 649 ShouldNotReachHere(); // ic_call is used instead. 650} 651 652 653void LIR_Assembler::explicit_null_check(Register addr, CodeEmitInfo* info) { 654 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(code_offset(), info); 655 __ null_check(addr, stub->entry()); 656 append_code_stub(stub); 657} 658 659 660// Attention: caller must encode oop if needed 661int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 662 int store_offset; 663 if (!Assembler::is_simm16(offset)) { 664 // For offsets larger than a simm16 we setup the offset. 665 assert(wide && !from_reg->is_same_register(FrameMap::R0_opr), "large offset only supported in special case"); 666 __ load_const_optimized(R0, offset); 667 store_offset = store(from_reg, base, R0, type, wide); 668 } else { 669 store_offset = code_offset(); 670 switch (type) { 671 case T_BOOLEAN: // fall through 672 case T_BYTE : __ stb(from_reg->as_register(), offset, base); break; 673 case T_CHAR : 674 case T_SHORT : __ sth(from_reg->as_register(), offset, base); break; 675 case T_INT : __ stw(from_reg->as_register(), offset, base); break; 676 case T_LONG : __ std(from_reg->as_register_lo(), offset, base); break; 677 case T_ADDRESS: 678 case T_METADATA: __ std(from_reg->as_register(), offset, base); break; 679 case T_ARRAY : // fall through 680 case T_OBJECT: 681 { 682 if (UseCompressedOops && !wide) { 683 // Encoding done in caller 684 __ stw(from_reg->as_register(), offset, base); 685 } else { 686 __ std(from_reg->as_register(), offset, base); 687 } 688 __ verify_oop(from_reg->as_register()); 689 break; 690 } 691 case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break; 692 case T_DOUBLE: __ stfd(from_reg->as_double_reg(), offset, base); break; 693 default : ShouldNotReachHere(); 694 } 695 } 696 return store_offset; 697} 698 699 700// Attention: caller must encode oop if needed 701int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 702 int store_offset = code_offset(); 703 switch (type) { 704 case T_BOOLEAN: // fall through 705 case T_BYTE : __ stbx(from_reg->as_register(), base, disp); break; 706 case T_CHAR : 707 case T_SHORT : __ sthx(from_reg->as_register(), base, disp); break; 708 case T_INT : __ stwx(from_reg->as_register(), base, disp); break; 709 case T_LONG : 710#ifdef _LP64 711 __ stdx(from_reg->as_register_lo(), base, disp); 712#else 713 Unimplemented(); 714#endif 715 break; 716 case T_ADDRESS: 717 __ stdx(from_reg->as_register(), base, disp); 718 break; 719 case T_ARRAY : // fall through 720 case T_OBJECT: 721 { 722 if (UseCompressedOops && !wide) { 723 // Encoding done in caller. 724 __ stwx(from_reg->as_register(), base, disp); 725 } else { 726 __ stdx(from_reg->as_register(), base, disp); 727 } 728 __ verify_oop(from_reg->as_register()); // kills R0 729 break; 730 } 731 case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break; 732 case T_DOUBLE: __ stfdx(from_reg->as_double_reg(), base, disp); break; 733 default : ShouldNotReachHere(); 734 } 735 return store_offset; 736} 737 738 739int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 740 int load_offset; 741 if (!Assembler::is_simm16(offset)) { 742 // For offsets larger than a simm16 we setup the offset. 743 __ load_const_optimized(R0, offset); 744 load_offset = load(base, R0, to_reg, type, wide); 745 } else { 746 load_offset = code_offset(); 747 switch(type) { 748 case T_BOOLEAN: // fall through 749 case T_BYTE : __ lbz(to_reg->as_register(), offset, base); 750 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 751 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break; 752 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break; 753 case T_INT : __ lwa(to_reg->as_register(), offset, base); break; 754 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break; 755 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break; 756 case T_ADDRESS: 757 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) { 758 __ lwz(to_reg->as_register(), offset, base); 759 __ decode_klass_not_null(to_reg->as_register()); 760 } else { 761 __ ld(to_reg->as_register(), offset, base); 762 } 763 break; 764 case T_ARRAY : // fall through 765 case T_OBJECT: 766 { 767 if (UseCompressedOops && !wide) { 768 __ lwz(to_reg->as_register(), offset, base); 769 __ decode_heap_oop(to_reg->as_register()); 770 } else { 771 __ ld(to_reg->as_register(), offset, base); 772 } 773 __ verify_oop(to_reg->as_register()); 774 break; 775 } 776 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break; 777 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break; 778 default : ShouldNotReachHere(); 779 } 780 } 781 return load_offset; 782} 783 784 785int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 786 int load_offset = code_offset(); 787 switch(type) { 788 case T_BOOLEAN: // fall through 789 case T_BYTE : __ lbzx(to_reg->as_register(), base, disp); 790 __ extsb(to_reg->as_register(), to_reg->as_register()); break; 791 case T_CHAR : __ lhzx(to_reg->as_register(), base, disp); break; 792 case T_SHORT : __ lhax(to_reg->as_register(), base, disp); break; 793 case T_INT : __ lwax(to_reg->as_register(), base, disp); break; 794 case T_ADDRESS: __ ldx(to_reg->as_register(), base, disp); break; 795 case T_ARRAY : // fall through 796 case T_OBJECT: 797 { 798 if (UseCompressedOops && !wide) { 799 __ lwzx(to_reg->as_register(), base, disp); 800 __ decode_heap_oop(to_reg->as_register()); 801 } else { 802 __ ldx(to_reg->as_register(), base, disp); 803 } 804 __ verify_oop(to_reg->as_register()); 805 break; 806 } 807 case T_FLOAT: __ lfsx(to_reg->as_float_reg() , base, disp); break; 808 case T_DOUBLE: __ lfdx(to_reg->as_double_reg(), base, disp); break; 809 case T_LONG : 810#ifdef _LP64 811 __ ldx(to_reg->as_register_lo(), base, disp); 812#else 813 Unimplemented(); 814#endif 815 break; 816 default : ShouldNotReachHere(); 817 } 818 return load_offset; 819} 820 821 822void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 823 LIR_Const* c = src->as_constant_ptr(); 824 Register src_reg = R0; 825 switch (c->type()) { 826 case T_INT: 827 case T_FLOAT: { 828 int value = c->as_jint_bits(); 829 __ load_const_optimized(src_reg, value); 830 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 831 __ stw(src_reg, addr.disp(), addr.base()); 832 break; 833 } 834 case T_ADDRESS: { 835 int value = c->as_jint_bits(); 836 __ load_const_optimized(src_reg, value); 837 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 838 __ std(src_reg, addr.disp(), addr.base()); 839 break; 840 } 841 case T_OBJECT: { 842 jobject2reg(c->as_jobject(), src_reg); 843 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 844 __ std(src_reg, addr.disp(), addr.base()); 845 break; 846 } 847 case T_LONG: 848 case T_DOUBLE: { 849 int value = c->as_jlong_bits(); 850 __ load_const_optimized(src_reg, value); 851 Address addr = frame_map()->address_for_double_slot(dest->double_stack_ix()); 852 __ std(src_reg, addr.disp(), addr.base()); 853 break; 854 } 855 default: 856 Unimplemented(); 857 } 858} 859 860 861void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 862 LIR_Const* c = src->as_constant_ptr(); 863 LIR_Address* addr = dest->as_address_ptr(); 864 Register base = addr->base()->as_pointer_register(); 865 LIR_Opr tmp = LIR_OprFact::illegalOpr; 866 int offset = -1; 867 // Null check for large offsets in LIRGenerator::do_StoreField. 868 bool needs_explicit_null_check = !ImplicitNullChecks; 869 870 if (info != NULL && needs_explicit_null_check) { 871 explicit_null_check(base, info); 872 } 873 874 switch (c->type()) { 875 case T_FLOAT: type = T_INT; 876 case T_INT: 877 case T_ADDRESS: { 878 tmp = FrameMap::R0_opr; 879 __ load_const_optimized(tmp->as_register(), c->as_jint_bits()); 880 break; 881 } 882 case T_DOUBLE: type = T_LONG; 883 case T_LONG: { 884 tmp = FrameMap::R0_long_opr; 885 __ load_const_optimized(tmp->as_register_lo(), c->as_jlong_bits()); 886 break; 887 } 888 case T_OBJECT: { 889 tmp = FrameMap::R0_opr; 890 if (UseCompressedOops && !wide && c->as_jobject() != NULL) { 891 AddressLiteral oop_addr = __ constant_oop_address(c->as_jobject()); 892 __ lis(R0, oop_addr.value() >> 16); // Don't care about sign extend (will use stw). 893 __ relocate(oop_addr.rspec(), /*compressed format*/ 1); 894 __ ori(R0, R0, oop_addr.value() & 0xffff); 895 } else { 896 jobject2reg(c->as_jobject(), R0); 897 } 898 break; 899 } 900 default: 901 Unimplemented(); 902 } 903 904 // Handle either reg+reg or reg+disp address. 905 if (addr->index()->is_valid()) { 906 assert(addr->disp() == 0, "must be zero"); 907 offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 908 } else { 909 assert(Assembler::is_simm16(addr->disp()), "can't handle larger addresses"); 910 offset = store(tmp, base, addr->disp(), type, wide, false); 911 } 912 913 if (info != NULL) { 914 assert(offset != -1, "offset should've been set"); 915 if (!needs_explicit_null_check) { 916 add_debug_info_for_null_check(offset, info); 917 } 918 } 919} 920 921 922void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info) { 923 LIR_Const* c = src->as_constant_ptr(); 924 LIR_Opr to_reg = dest; 925 926 switch (c->type()) { 927 case T_INT: { 928 assert(patch_code == lir_patch_none, "no patching handled here"); 929 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); 930 break; 931 } 932 case T_ADDRESS: { 933 assert(patch_code == lir_patch_none, "no patching handled here"); 934 __ load_const_optimized(dest->as_register(), c->as_jint(), R0); // Yes, as_jint ... 935 break; 936 } 937 case T_LONG: { 938 assert(patch_code == lir_patch_none, "no patching handled here"); 939 __ load_const_optimized(dest->as_register_lo(), c->as_jlong(), R0); 940 break; 941 } 942 943 case T_OBJECT: { 944 if (patch_code == lir_patch_none) { 945 jobject2reg(c->as_jobject(), to_reg->as_register()); 946 } else { 947 jobject2reg_with_patching(to_reg->as_register(), info); 948 } 949 break; 950 } 951 952 case T_METADATA: 953 { 954 if (patch_code == lir_patch_none) { 955 metadata2reg(c->as_metadata(), to_reg->as_register()); 956 } else { 957 klass2reg_with_patching(to_reg->as_register(), info); 958 } 959 } 960 break; 961 962 case T_FLOAT: 963 { 964 if (to_reg->is_single_fpu()) { 965 address const_addr = __ float_constant(c->as_jfloat()); 966 if (const_addr == NULL) { 967 bailout("const section overflow"); 968 break; 969 } 970 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 971 __ relocate(rspec); 972 __ load_const(R0, const_addr); 973 __ lfsx(to_reg->as_float_reg(), R0); 974 } else { 975 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 976 __ load_const_optimized(to_reg->as_register(), jint_cast(c->as_jfloat()), R0); 977 } 978 } 979 break; 980 981 case T_DOUBLE: 982 { 983 if (to_reg->is_double_fpu()) { 984 address const_addr = __ double_constant(c->as_jdouble()); 985 if (const_addr == NULL) { 986 bailout("const section overflow"); 987 break; 988 } 989 RelocationHolder rspec = internal_word_Relocation::spec(const_addr); 990 __ relocate(rspec); 991 __ load_const(R0, const_addr); 992 __ lfdx(to_reg->as_double_reg(), R0); 993 } else { 994 assert(to_reg->is_double_cpu(), "Must be a long register."); 995 __ load_const_optimized(to_reg->as_register_lo(), jlong_cast(c->as_jdouble()), R0); 996 } 997 } 998 break; 999 1000 default: 1001 ShouldNotReachHere(); 1002 } 1003} 1004 1005 1006Address LIR_Assembler::as_Address(LIR_Address* addr) { 1007 Unimplemented(); return Address(); 1008} 1009 1010 1011inline RegisterOrConstant index_or_disp(LIR_Address* addr) { 1012 if (addr->index()->is_illegal()) { 1013 return (RegisterOrConstant)(addr->disp()); 1014 } else { 1015 return (RegisterOrConstant)(addr->index()->as_pointer_register()); 1016 } 1017} 1018 1019 1020void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) { 1021 const Register tmp = R0; 1022 switch (type) { 1023 case T_INT: 1024 case T_FLOAT: { 1025 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1026 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1027 __ lwz(tmp, from.disp(), from.base()); 1028 __ stw(tmp, to.disp(), to.base()); 1029 break; 1030 } 1031 case T_ADDRESS: 1032 case T_OBJECT: { 1033 Address from = frame_map()->address_for_slot(src->single_stack_ix()); 1034 Address to = frame_map()->address_for_slot(dest->single_stack_ix()); 1035 __ ld(tmp, from.disp(), from.base()); 1036 __ std(tmp, to.disp(), to.base()); 1037 break; 1038 } 1039 case T_LONG: 1040 case T_DOUBLE: { 1041 Address from = frame_map()->address_for_double_slot(src->double_stack_ix()); 1042 Address to = frame_map()->address_for_double_slot(dest->double_stack_ix()); 1043 __ ld(tmp, from.disp(), from.base()); 1044 __ std(tmp, to.disp(), to.base()); 1045 break; 1046 } 1047 1048 default: 1049 ShouldNotReachHere(); 1050 } 1051} 1052 1053 1054Address LIR_Assembler::as_Address_hi(LIR_Address* addr) { 1055 Unimplemented(); return Address(); 1056} 1057 1058 1059Address LIR_Assembler::as_Address_lo(LIR_Address* addr) { 1060 Unimplemented(); return Address(); 1061} 1062 1063 1064void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 1065 LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 1066 1067 assert(type != T_METADATA, "load of metadata ptr not supported"); 1068 LIR_Address* addr = src_opr->as_address_ptr(); 1069 LIR_Opr to_reg = dest; 1070 1071 Register src = addr->base()->as_pointer_register(); 1072 Register disp_reg = noreg; 1073 int disp_value = addr->disp(); 1074 bool needs_patching = (patch_code != lir_patch_none); 1075 // null check for large offsets in LIRGenerator::do_LoadField 1076 bool needs_explicit_null_check = !os::zero_page_read_protected() || !ImplicitNullChecks; 1077 1078 if (info != NULL && needs_explicit_null_check) { 1079 explicit_null_check(src, info); 1080 } 1081 1082 if (addr->base()->type() == T_OBJECT) { 1083 __ verify_oop(src); 1084 } 1085 1086 PatchingStub* patch = NULL; 1087 if (needs_patching) { 1088 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1089 assert(!to_reg->is_double_cpu() || 1090 patch_code == lir_patch_none || 1091 patch_code == lir_patch_normal, "patching doesn't match register"); 1092 } 1093 1094 if (addr->index()->is_illegal()) { 1095 if (!Assembler::is_simm16(disp_value)) { 1096 if (needs_patching) { 1097 __ load_const32(R0, 0); // patchable int 1098 } else { 1099 __ load_const_optimized(R0, disp_value); 1100 } 1101 disp_reg = R0; 1102 } 1103 } else { 1104 disp_reg = addr->index()->as_pointer_register(); 1105 assert(disp_value == 0, "can't handle 3 operand addresses"); 1106 } 1107 1108 // Remember the offset of the load. The patching_epilog must be done 1109 // before the call to add_debug_info, otherwise the PcDescs don't get 1110 // entered in increasing order. 1111 int offset; 1112 1113 if (disp_reg == noreg) { 1114 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1115 offset = load(src, disp_value, to_reg, type, wide, unaligned); 1116 } else { 1117 assert(!unaligned, "unexpected"); 1118 offset = load(src, disp_reg, to_reg, type, wide); 1119 } 1120 1121 if (patch != NULL) { 1122 patching_epilog(patch, patch_code, src, info); 1123 } 1124 if (info != NULL && !needs_explicit_null_check) { 1125 add_debug_info_for_null_check(offset, info); 1126 } 1127} 1128 1129 1130void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) { 1131 Address addr; 1132 if (src->is_single_word()) { 1133 addr = frame_map()->address_for_slot(src->single_stack_ix()); 1134 } else if (src->is_double_word()) { 1135 addr = frame_map()->address_for_double_slot(src->double_stack_ix()); 1136 } 1137 1138 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1139 load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 1140} 1141 1142 1143void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bool pop_fpu_stack) { 1144 Address addr; 1145 if (dest->is_single_word()) { 1146 addr = frame_map()->address_for_slot(dest->single_stack_ix()); 1147 } else if (dest->is_double_word()) { 1148 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 1149 } 1150 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 1151 store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 1152} 1153 1154 1155void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) { 1156 if (from_reg->is_float_kind() && to_reg->is_float_kind()) { 1157 if (from_reg->is_double_fpu()) { 1158 // double to double moves 1159 assert(to_reg->is_double_fpu(), "should match"); 1160 __ fmr_if_needed(to_reg->as_double_reg(), from_reg->as_double_reg()); 1161 } else { 1162 // float to float moves 1163 assert(to_reg->is_single_fpu(), "should match"); 1164 __ fmr_if_needed(to_reg->as_float_reg(), from_reg->as_float_reg()); 1165 } 1166 } else if (!from_reg->is_float_kind() && !to_reg->is_float_kind()) { 1167 if (from_reg->is_double_cpu()) { 1168 __ mr_if_needed(to_reg->as_pointer_register(), from_reg->as_pointer_register()); 1169 } else if (to_reg->is_double_cpu()) { 1170 // int to int moves 1171 __ mr_if_needed(to_reg->as_register_lo(), from_reg->as_register()); 1172 } else { 1173 // int to int moves 1174 __ mr_if_needed(to_reg->as_register(), from_reg->as_register()); 1175 } 1176 } else { 1177 ShouldNotReachHere(); 1178 } 1179 if (to_reg->type() == T_OBJECT || to_reg->type() == T_ARRAY) { 1180 __ verify_oop(to_reg->as_register()); 1181 } 1182} 1183 1184 1185void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 1186 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 1187 bool wide, bool unaligned) { 1188 assert(type != T_METADATA, "store of metadata ptr not supported"); 1189 LIR_Address* addr = dest->as_address_ptr(); 1190 1191 Register src = addr->base()->as_pointer_register(); 1192 Register disp_reg = noreg; 1193 int disp_value = addr->disp(); 1194 bool needs_patching = (patch_code != lir_patch_none); 1195 bool compress_oop = (type == T_ARRAY || type == T_OBJECT) && UseCompressedOops && !wide && 1196 Universe::narrow_oop_mode() != Universe::UnscaledNarrowOop; 1197 bool load_disp = addr->index()->is_illegal() && !Assembler::is_simm16(disp_value); 1198 bool use_R29 = compress_oop && load_disp; // Avoid register conflict, also do null check before killing R29. 1199 // Null check for large offsets in LIRGenerator::do_StoreField. 1200 bool needs_explicit_null_check = !ImplicitNullChecks || use_R29; 1201 1202 if (info != NULL && needs_explicit_null_check) { 1203 explicit_null_check(src, info); 1204 } 1205 1206 if (addr->base()->is_oop_register()) { 1207 __ verify_oop(src); 1208 } 1209 1210 PatchingStub* patch = NULL; 1211 if (needs_patching) { 1212 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 1213 assert(!from_reg->is_double_cpu() || 1214 patch_code == lir_patch_none || 1215 patch_code == lir_patch_normal, "patching doesn't match register"); 1216 } 1217 1218 if (addr->index()->is_illegal()) { 1219 if (load_disp) { 1220 disp_reg = use_R29 ? R29_TOC : R0; 1221 if (needs_patching) { 1222 __ load_const32(disp_reg, 0); // patchable int 1223 } else { 1224 __ load_const_optimized(disp_reg, disp_value); 1225 } 1226 } 1227 } else { 1228 disp_reg = addr->index()->as_pointer_register(); 1229 assert(disp_value == 0, "can't handle 3 operand addresses"); 1230 } 1231 1232 // remember the offset of the store. The patching_epilog must be done 1233 // before the call to add_debug_info_for_null_check, otherwise the PcDescs don't get 1234 // entered in increasing order. 1235 int offset; 1236 1237 if (compress_oop) { 1238 Register co = __ encode_heap_oop(R0, from_reg->as_register()); 1239 from_reg = FrameMap::as_opr(co); 1240 } 1241 1242 if (disp_reg == noreg) { 1243 assert(Assembler::is_simm16(disp_value), "should have set this up"); 1244 offset = store(from_reg, src, disp_value, type, wide, unaligned); 1245 } else { 1246 assert(!unaligned, "unexpected"); 1247 offset = store(from_reg, src, disp_reg, type, wide); 1248 } 1249 1250 if (use_R29) { 1251 __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); // reinit 1252 } 1253 1254 if (patch != NULL) { 1255 patching_epilog(patch, patch_code, src, info); 1256 } 1257 1258 if (info != NULL && !needs_explicit_null_check) { 1259 add_debug_info_for_null_check(offset, info); 1260 } 1261} 1262 1263 1264void LIR_Assembler::return_op(LIR_Opr result) { 1265 const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone(). 1266 const Register polling_page = R12; 1267 1268 // Pop the stack before the safepoint code. 1269 int frame_size = initial_frame_size_in_bytes(); 1270 if (Assembler::is_simm(frame_size, 16)) { 1271 __ addi(R1_SP, R1_SP, frame_size); 1272 } else { 1273 __ pop_frame(); 1274 } 1275 1276 if (LoadPollAddressFromThread) { 1277 // TODO: PPC port __ ld(polling_page, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1278 Unimplemented(); 1279 } else { 1280 __ load_const_optimized(polling_page, (long)(address) os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page() 1281 } 1282 1283 // Restore return pc relative to callers' sp. 1284 __ ld(return_pc, _abi(lr), R1_SP); 1285 // Move return pc to LR. 1286 __ mtlr(return_pc); 1287 1288 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) { 1289 __ reserved_stack_check(return_pc); 1290 } 1291 1292 // We need to mark the code position where the load from the safepoint 1293 // polling page was emitted as relocInfo::poll_return_type here. 1294 __ relocate(relocInfo::poll_return_type); 1295 __ load_from_polling_page(polling_page); 1296 1297 // Return. 1298 __ blr(); 1299} 1300 1301 1302int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) { 1303 1304 if (LoadPollAddressFromThread) { 1305 const Register poll_addr = tmp->as_register(); 1306 // TODO: PPC port __ ld(poll_addr, in_bytes(JavaThread::poll_address_offset()), R16_thread); 1307 Unimplemented(); 1308 __ relocate(relocInfo::poll_type); // XXX 1309 guarantee(info != NULL, "Shouldn't be NULL"); 1310 int offset = __ offset(); 1311 add_debug_info_for_branch(info); 1312 __ load_from_polling_page(poll_addr); 1313 return offset; 1314 } 1315 1316 __ load_const_optimized(tmp->as_register(), (intptr_t)os::get_polling_page(), R0); // TODO: PPC port: get_standard_polling_page() 1317 if (info != NULL) { 1318 add_debug_info_for_branch(info); 1319 } 1320 int offset = __ offset(); 1321 __ relocate(relocInfo::poll_type); 1322 __ load_from_polling_page(tmp->as_register()); 1323 1324 return offset; 1325} 1326 1327 1328void LIR_Assembler::emit_static_call_stub() { 1329 address call_pc = __ pc(); 1330 address stub = __ start_a_stub(static_call_stub_size()); 1331 if (stub == NULL) { 1332 bailout("static call stub overflow"); 1333 return; 1334 } 1335 1336 // For java_to_interp stubs we use R11_scratch1 as scratch register 1337 // and in call trampoline stubs we use R12_scratch2. This way we 1338 // can distinguish them (see is_NativeCallTrampolineStub_at()). 1339 const Register reg_scratch = R11_scratch1; 1340 1341 // Create a static stub relocation which relates this stub 1342 // with the call instruction at insts_call_instruction_offset in the 1343 // instructions code-section. 1344 int start = __ offset(); 1345 __ relocate(static_stub_Relocation::spec(call_pc)); 1346 1347 // Now, create the stub's code: 1348 // - load the TOC 1349 // - load the inline cache oop from the constant pool 1350 // - load the call target from the constant pool 1351 // - call 1352 __ calculate_address_from_global_toc(reg_scratch, __ method_toc()); 1353 AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL); 1354 bool success = __ load_const_from_method_toc(R19_inline_cache_reg, ic, reg_scratch, /*fixed_size*/ true); 1355 1356 if (ReoptimizeCallSequences) { 1357 __ b64_patchable((address)-1, relocInfo::none); 1358 } else { 1359 AddressLiteral a((address)-1); 1360 success = success && __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true); 1361 __ mtctr(reg_scratch); 1362 __ bctr(); 1363 } 1364 if (!success) { 1365 bailout("const section overflow"); 1366 return; 1367 } 1368 1369 assert(__ offset() - start <= static_call_stub_size(), "stub too big"); 1370 __ end_a_stub(); 1371} 1372 1373 1374void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Op2* op) { 1375 bool unsigned_comp = (condition == lir_cond_belowEqual || condition == lir_cond_aboveEqual); 1376 if (opr1->is_single_fpu()) { 1377 __ fcmpu(BOOL_RESULT, opr1->as_float_reg(), opr2->as_float_reg()); 1378 } else if (opr1->is_double_fpu()) { 1379 __ fcmpu(BOOL_RESULT, opr1->as_double_reg(), opr2->as_double_reg()); 1380 } else if (opr1->is_single_cpu()) { 1381 if (opr2->is_constant()) { 1382 switch (opr2->as_constant_ptr()->type()) { 1383 case T_INT: 1384 { 1385 jint con = opr2->as_constant_ptr()->as_jint(); 1386 if (unsigned_comp) { 1387 if (Assembler::is_uimm(con, 16)) { 1388 __ cmplwi(BOOL_RESULT, opr1->as_register(), con); 1389 } else { 1390 __ load_const_optimized(R0, con); 1391 __ cmplw(BOOL_RESULT, opr1->as_register(), R0); 1392 } 1393 } else { 1394 if (Assembler::is_simm(con, 16)) { 1395 __ cmpwi(BOOL_RESULT, opr1->as_register(), con); 1396 } else { 1397 __ load_const_optimized(R0, con); 1398 __ cmpw(BOOL_RESULT, opr1->as_register(), R0); 1399 } 1400 } 1401 } 1402 break; 1403 1404 case T_OBJECT: 1405 // There are only equal/notequal comparisons on objects. 1406 { 1407 assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "oops"); 1408 jobject con = opr2->as_constant_ptr()->as_jobject(); 1409 if (con == NULL) { 1410 __ cmpdi(BOOL_RESULT, opr1->as_register(), 0); 1411 } else { 1412 jobject2reg(con, R0); 1413 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1414 } 1415 } 1416 break; 1417 1418 default: 1419 ShouldNotReachHere(); 1420 break; 1421 } 1422 } else { 1423 if (opr2->is_address()) { 1424 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1425 LIR_Address *addr = opr2->as_address_ptr(); 1426 BasicType type = addr->type(); 1427 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1428 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1429 __ cmpd(BOOL_RESULT, opr1->as_register(), R0); 1430 } else { 1431 if (unsigned_comp) { 1432 __ cmplw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1433 } else { 1434 __ cmpw(BOOL_RESULT, opr1->as_register(), opr2->as_register()); 1435 } 1436 } 1437 } 1438 } else if (opr1->is_double_cpu()) { 1439 if (opr2->is_constant()) { 1440 jlong con = opr2->as_constant_ptr()->as_jlong(); 1441 if (unsigned_comp) { 1442 if (Assembler::is_uimm(con, 16)) { 1443 __ cmpldi(BOOL_RESULT, opr1->as_register_lo(), con); 1444 } else { 1445 __ load_const_optimized(R0, con); 1446 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), R0); 1447 } 1448 } else { 1449 if (Assembler::is_simm(con, 16)) { 1450 __ cmpdi(BOOL_RESULT, opr1->as_register_lo(), con); 1451 } else { 1452 __ load_const_optimized(R0, con); 1453 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), R0); 1454 } 1455 } 1456 } else if (opr2->is_register()) { 1457 if (unsigned_comp) { 1458 __ cmpld(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1459 } else { 1460 __ cmpd(BOOL_RESULT, opr1->as_register_lo(), opr2->as_register_lo()); 1461 } 1462 } else { 1463 ShouldNotReachHere(); 1464 } 1465 } else if (opr1->is_address()) { 1466 DEBUG_ONLY( Unimplemented(); ) // Seems to be unused at the moment. 1467 LIR_Address * addr = opr1->as_address_ptr(); 1468 BasicType type = addr->type(); 1469 assert (opr2->is_constant(), "Checking"); 1470 if (type == T_OBJECT) { __ ld(R0, index_or_disp(addr), addr->base()->as_register()); } 1471 else { __ lwa(R0, index_or_disp(addr), addr->base()->as_register()); } 1472 __ cmpdi(BOOL_RESULT, R0, opr2->as_constant_ptr()->as_jint()); 1473 } else { 1474 ShouldNotReachHere(); 1475 } 1476} 1477 1478 1479void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dst, LIR_Op2* op){ 1480 const Register Rdst = dst->as_register(); 1481 Label done; 1482 if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) { 1483 bool is_unordered_less = (code == lir_ucmp_fd2i); 1484 if (left->is_single_fpu()) { 1485 __ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg()); 1486 } else if (left->is_double_fpu()) { 1487 __ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg()); 1488 } else { 1489 ShouldNotReachHere(); 1490 } 1491 __ li(Rdst, is_unordered_less ? -1 : 1); 1492 __ bso(CCR0, done); 1493 } else if (code == lir_cmp_l2i) { 1494 __ cmpd(CCR0, left->as_register_lo(), right->as_register_lo()); 1495 } else { 1496 ShouldNotReachHere(); 1497 } 1498 __ mfcr(R0); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1499 __ srwi(Rdst, R0, 30); 1500 __ srawi(R0, R0, 31); 1501 __ orr(Rdst, R0, Rdst); // set result as follows: <: -1, =: 0, >: 1 1502 __ bind(done); 1503} 1504 1505 1506inline void load_to_reg(LIR_Assembler *lasm, LIR_Opr src, LIR_Opr dst) { 1507 if (src->is_constant()) { 1508 lasm->const2reg(src, dst, lir_patch_none, NULL); 1509 } else if (src->is_register()) { 1510 lasm->reg2reg(src, dst); 1511 } else if (src->is_stack()) { 1512 lasm->stack2reg(src, dst, dst->type()); 1513 } else { 1514 ShouldNotReachHere(); 1515 } 1516} 1517 1518 1519void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result, BasicType type) { 1520 if (opr1->is_equal(opr2) || opr1->is_same_register(opr2)) { 1521 load_to_reg(this, opr1, result); // Condition doesn't matter. 1522 return; 1523 } 1524 1525 bool positive = false; 1526 Assembler::Condition cond = Assembler::equal; 1527 switch (condition) { 1528 case lir_cond_equal: positive = true ; cond = Assembler::equal ; break; 1529 case lir_cond_notEqual: positive = false; cond = Assembler::equal ; break; 1530 case lir_cond_less: positive = true ; cond = Assembler::less ; break; 1531 case lir_cond_belowEqual: 1532 case lir_cond_lessEqual: positive = false; cond = Assembler::greater; break; 1533 case lir_cond_greater: positive = true ; cond = Assembler::greater; break; 1534 case lir_cond_aboveEqual: 1535 case lir_cond_greaterEqual: positive = false; cond = Assembler::less ; break; 1536 default: ShouldNotReachHere(); 1537 } 1538 1539 // Try to use isel on >=Power7. 1540 if (VM_Version::has_isel() && result->is_cpu_register()) { 1541 bool o1_is_reg = opr1->is_cpu_register(), o2_is_reg = opr2->is_cpu_register(); 1542 const Register result_reg = result->is_single_cpu() ? result->as_register() : result->as_register_lo(); 1543 1544 // We can use result_reg to load one operand if not already in register. 1545 Register first = o1_is_reg ? (opr1->is_single_cpu() ? opr1->as_register() : opr1->as_register_lo()) : result_reg, 1546 second = o2_is_reg ? (opr2->is_single_cpu() ? opr2->as_register() : opr2->as_register_lo()) : result_reg; 1547 1548 if (first != second) { 1549 if (!o1_is_reg) { 1550 load_to_reg(this, opr1, result); 1551 } 1552 1553 if (!o2_is_reg) { 1554 load_to_reg(this, opr2, result); 1555 } 1556 1557 __ isel(result_reg, BOOL_RESULT, cond, !positive, first, second); 1558 return; 1559 } 1560 } // isel 1561 1562 load_to_reg(this, opr1, result); 1563 1564 Label skip; 1565 int bo = positive ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1566 int bi = Assembler::bi0(BOOL_RESULT, cond); 1567 __ bc(bo, bi, skip); 1568 1569 load_to_reg(this, opr2, result); 1570 __ bind(skip); 1571} 1572 1573 1574void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, 1575 CodeEmitInfo* info, bool pop_fpu_stack) { 1576 assert(info == NULL, "unused on this code path"); 1577 assert(left->is_register(), "wrong items state"); 1578 assert(dest->is_register(), "wrong items state"); 1579 1580 if (right->is_register()) { 1581 if (dest->is_float_kind()) { 1582 1583 FloatRegister lreg, rreg, res; 1584 if (right->is_single_fpu()) { 1585 lreg = left->as_float_reg(); 1586 rreg = right->as_float_reg(); 1587 res = dest->as_float_reg(); 1588 switch (code) { 1589 case lir_add: __ fadds(res, lreg, rreg); break; 1590 case lir_sub: __ fsubs(res, lreg, rreg); break; 1591 case lir_mul: // fall through 1592 case lir_mul_strictfp: __ fmuls(res, lreg, rreg); break; 1593 case lir_div: // fall through 1594 case lir_div_strictfp: __ fdivs(res, lreg, rreg); break; 1595 default: ShouldNotReachHere(); 1596 } 1597 } else { 1598 lreg = left->as_double_reg(); 1599 rreg = right->as_double_reg(); 1600 res = dest->as_double_reg(); 1601 switch (code) { 1602 case lir_add: __ fadd(res, lreg, rreg); break; 1603 case lir_sub: __ fsub(res, lreg, rreg); break; 1604 case lir_mul: // fall through 1605 case lir_mul_strictfp: __ fmul(res, lreg, rreg); break; 1606 case lir_div: // fall through 1607 case lir_div_strictfp: __ fdiv(res, lreg, rreg); break; 1608 default: ShouldNotReachHere(); 1609 } 1610 } 1611 1612 } else if (dest->is_double_cpu()) { 1613 1614 Register dst_lo = dest->as_register_lo(); 1615 Register op1_lo = left->as_pointer_register(); 1616 Register op2_lo = right->as_pointer_register(); 1617 1618 switch (code) { 1619 case lir_add: __ add(dst_lo, op1_lo, op2_lo); break; 1620 case lir_sub: __ sub(dst_lo, op1_lo, op2_lo); break; 1621 case lir_mul: __ mulld(dst_lo, op1_lo, op2_lo); break; 1622 default: ShouldNotReachHere(); 1623 } 1624 } else { 1625 assert (right->is_single_cpu(), "Just Checking"); 1626 1627 Register lreg = left->as_register(); 1628 Register res = dest->as_register(); 1629 Register rreg = right->as_register(); 1630 switch (code) { 1631 case lir_add: __ add (res, lreg, rreg); break; 1632 case lir_sub: __ sub (res, lreg, rreg); break; 1633 case lir_mul: __ mullw(res, lreg, rreg); break; 1634 default: ShouldNotReachHere(); 1635 } 1636 } 1637 } else { 1638 assert (right->is_constant(), "must be constant"); 1639 1640 if (dest->is_single_cpu()) { 1641 Register lreg = left->as_register(); 1642 Register res = dest->as_register(); 1643 int simm16 = right->as_constant_ptr()->as_jint(); 1644 1645 switch (code) { 1646 case lir_sub: assert(Assembler::is_simm16(-simm16), "cannot encode"); // see do_ArithmeticOp_Int 1647 simm16 = -simm16; 1648 case lir_add: if (res == lreg && simm16 == 0) break; 1649 __ addi(res, lreg, simm16); break; 1650 case lir_mul: if (res == lreg && simm16 == 1) break; 1651 __ mulli(res, lreg, simm16); break; 1652 default: ShouldNotReachHere(); 1653 } 1654 } else { 1655 Register lreg = left->as_pointer_register(); 1656 Register res = dest->as_register_lo(); 1657 long con = right->as_constant_ptr()->as_jlong(); 1658 assert(Assembler::is_simm16(con), "must be simm16"); 1659 1660 switch (code) { 1661 case lir_sub: assert(Assembler::is_simm16(-con), "cannot encode"); // see do_ArithmeticOp_Long 1662 con = -con; 1663 case lir_add: if (res == lreg && con == 0) break; 1664 __ addi(res, lreg, (int)con); break; 1665 case lir_mul: if (res == lreg && con == 1) break; 1666 __ mulli(res, lreg, (int)con); break; 1667 default: ShouldNotReachHere(); 1668 } 1669 } 1670 } 1671} 1672 1673 1674void LIR_Assembler::fpop() { 1675 Unimplemented(); 1676 // do nothing 1677} 1678 1679 1680void LIR_Assembler::intrinsic_op(LIR_Code code, LIR_Opr value, LIR_Opr thread, LIR_Opr dest, LIR_Op* op) { 1681 switch (code) { 1682 case lir_sqrt: { 1683 __ fsqrt(dest->as_double_reg(), value->as_double_reg()); 1684 break; 1685 } 1686 case lir_abs: { 1687 __ fabs(dest->as_double_reg(), value->as_double_reg()); 1688 break; 1689 } 1690 default: { 1691 ShouldNotReachHere(); 1692 break; 1693 } 1694 } 1695} 1696 1697 1698void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest) { 1699 if (right->is_constant()) { // see do_LogicOp 1700 long uimm; 1701 Register d, l; 1702 if (dest->is_single_cpu()) { 1703 uimm = right->as_constant_ptr()->as_jint(); 1704 d = dest->as_register(); 1705 l = left->as_register(); 1706 } else { 1707 uimm = right->as_constant_ptr()->as_jlong(); 1708 d = dest->as_register_lo(); 1709 l = left->as_register_lo(); 1710 } 1711 long uimms = (unsigned long)uimm >> 16, 1712 uimmss = (unsigned long)uimm >> 32; 1713 1714 switch (code) { 1715 case lir_logic_and: 1716 if (uimmss != 0 || (uimms != 0 && (uimm & 0xFFFF) != 0) || is_power_of_2_long(uimm)) { 1717 __ andi(d, l, uimm); // special cases 1718 } else if (uimms != 0) { __ andis_(d, l, uimms); } 1719 else { __ andi_(d, l, uimm); } 1720 break; 1721 1722 case lir_logic_or: 1723 if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ oris(d, l, uimms); } 1724 else { __ ori(d, l, uimm); } 1725 break; 1726 1727 case lir_logic_xor: 1728 if (uimm == -1) { __ nand(d, l, l); } // special case 1729 else if (uimms != 0) { assert((uimm & 0xFFFF) == 0, "sanity"); __ xoris(d, l, uimms); } 1730 else { __ xori(d, l, uimm); } 1731 break; 1732 1733 default: ShouldNotReachHere(); 1734 } 1735 } else { 1736 assert(right->is_register(), "right should be in register"); 1737 1738 if (dest->is_single_cpu()) { 1739 switch (code) { 1740 case lir_logic_and: __ andr(dest->as_register(), left->as_register(), right->as_register()); break; 1741 case lir_logic_or: __ orr (dest->as_register(), left->as_register(), right->as_register()); break; 1742 case lir_logic_xor: __ xorr(dest->as_register(), left->as_register(), right->as_register()); break; 1743 default: ShouldNotReachHere(); 1744 } 1745 } else { 1746 Register l = (left->is_single_cpu() && left->is_oop_register()) ? left->as_register() : 1747 left->as_register_lo(); 1748 Register r = (right->is_single_cpu() && right->is_oop_register()) ? right->as_register() : 1749 right->as_register_lo(); 1750 1751 switch (code) { 1752 case lir_logic_and: __ andr(dest->as_register_lo(), l, r); break; 1753 case lir_logic_or: __ orr (dest->as_register_lo(), l, r); break; 1754 case lir_logic_xor: __ xorr(dest->as_register_lo(), l, r); break; 1755 default: ShouldNotReachHere(); 1756 } 1757 } 1758 } 1759} 1760 1761 1762int LIR_Assembler::shift_amount(BasicType t) { 1763 int elem_size = type2aelembytes(t); 1764 switch (elem_size) { 1765 case 1 : return 0; 1766 case 2 : return 1; 1767 case 4 : return 2; 1768 case 8 : return 3; 1769 } 1770 ShouldNotReachHere(); 1771 return -1; 1772} 1773 1774 1775void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) { 1776 info->add_register_oop(exceptionOop); 1777 1778 // Reuse the debug info from the safepoint poll for the throw op itself. 1779 address pc_for_athrow = __ pc(); 1780 int pc_for_athrow_offset = __ offset(); 1781 //RelocationHolder rspec = internal_word_Relocation::spec(pc_for_athrow); 1782 //__ relocate(rspec); 1783 //__ load_const(exceptionPC->as_register(), pc_for_athrow, R0); 1784 __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true); 1785 add_call_info(pc_for_athrow_offset, info); // for exception handler 1786 1787 address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id 1788 : Runtime1::handle_exception_nofpu_id); 1789 //__ load_const_optimized(R0, stub); 1790 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub)); 1791 __ mtctr(R0); 1792 __ bctr(); 1793} 1794 1795 1796void LIR_Assembler::unwind_op(LIR_Opr exceptionOop) { 1797 // Note: Not used with EnableDebuggingOnDemand. 1798 assert(exceptionOop->as_register() == R3, "should match"); 1799 __ b(_unwind_handler_entry); 1800} 1801 1802 1803void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) { 1804 Register src = op->src()->as_register(); 1805 Register dst = op->dst()->as_register(); 1806 Register src_pos = op->src_pos()->as_register(); 1807 Register dst_pos = op->dst_pos()->as_register(); 1808 Register length = op->length()->as_register(); 1809 Register tmp = op->tmp()->as_register(); 1810 Register tmp2 = R0; 1811 1812 int flags = op->flags(); 1813 ciArrayKlass* default_type = op->expected_type(); 1814 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL; 1815 if (basic_type == T_ARRAY) basic_type = T_OBJECT; 1816 1817 // Set up the arraycopy stub information. 1818 ArrayCopyStub* stub = op->stub(); 1819 const int frame_resize = frame::abi_reg_args_size - sizeof(frame::jit_abi); // C calls need larger frame. 1820 1821 // Always do stub if no type information is available. It's ok if 1822 // the known type isn't loaded since the code sanity checks 1823 // in debug mode and the type isn't required when we know the exact type 1824 // also check that the type is an array type. 1825 if (op->expected_type() == NULL) { 1826 assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() && 1827 length->is_nonvolatile(), "must preserve"); 1828 // 3 parms are int. Convert to long. 1829 __ mr(R3_ARG1, src); 1830 __ extsw(R4_ARG2, src_pos); 1831 __ mr(R5_ARG3, dst); 1832 __ extsw(R6_ARG4, dst_pos); 1833 __ extsw(R7_ARG5, length); 1834 address copyfunc_addr = StubRoutines::generic_arraycopy(); 1835 1836 if (copyfunc_addr == NULL) { // Use C version if stub was not generated. 1837 address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy); 1838 __ call_c_with_frame_resize(entry, frame_resize); 1839 } else { 1840#ifndef PRODUCT 1841 if (PrintC1Statistics) { 1842 address counter = (address)&Runtime1::_generic_arraycopystub_cnt; 1843 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 1844 __ lwz(R11_scratch1, simm16_offs, tmp); 1845 __ addi(R11_scratch1, R11_scratch1, 1); 1846 __ stw(R11_scratch1, simm16_offs, tmp); 1847 } 1848#endif 1849 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 1850 1851 __ nand(tmp, R3_RET, R3_RET); 1852 __ subf(length, tmp, length); 1853 __ add(src_pos, tmp, src_pos); 1854 __ add(dst_pos, tmp, dst_pos); 1855 } 1856 1857 __ cmpwi(CCR0, R3_RET, 0); 1858 __ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry()); 1859 __ bind(*stub->continuation()); 1860 return; 1861 } 1862 1863 assert(default_type != NULL && default_type->is_array_klass(), "must be true at this point"); 1864 Label cont, slow, copyfunc; 1865 1866 bool simple_check_flag_set = flags & (LIR_OpArrayCopy::src_null_check | 1867 LIR_OpArrayCopy::dst_null_check | 1868 LIR_OpArrayCopy::src_pos_positive_check | 1869 LIR_OpArrayCopy::dst_pos_positive_check | 1870 LIR_OpArrayCopy::length_positive_check); 1871 1872 // Use only one conditional branch for simple checks. 1873 if (simple_check_flag_set) { 1874 ConditionRegister combined_check = CCR1, tmp_check = CCR1; 1875 1876 // Make sure src and dst are non-null. 1877 if (flags & LIR_OpArrayCopy::src_null_check) { 1878 __ cmpdi(combined_check, src, 0); 1879 tmp_check = CCR0; 1880 } 1881 1882 if (flags & LIR_OpArrayCopy::dst_null_check) { 1883 __ cmpdi(tmp_check, dst, 0); 1884 if (tmp_check != combined_check) { 1885 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal); 1886 } 1887 tmp_check = CCR0; 1888 } 1889 1890 // Clear combined_check.eq if not already used. 1891 if (tmp_check == combined_check) { 1892 __ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal); 1893 tmp_check = CCR0; 1894 } 1895 1896 if (flags & LIR_OpArrayCopy::src_pos_positive_check) { 1897 // Test src_pos register. 1898 __ cmpwi(tmp_check, src_pos, 0); 1899 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1900 } 1901 1902 if (flags & LIR_OpArrayCopy::dst_pos_positive_check) { 1903 // Test dst_pos register. 1904 __ cmpwi(tmp_check, dst_pos, 0); 1905 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1906 } 1907 1908 if (flags & LIR_OpArrayCopy::length_positive_check) { 1909 // Make sure length isn't negative. 1910 __ cmpwi(tmp_check, length, 0); 1911 __ cror(combined_check, Assembler::equal, tmp_check, Assembler::less); 1912 } 1913 1914 __ beq(combined_check, slow); 1915 } 1916 1917 // If the compiler was not able to prove that exact type of the source or the destination 1918 // of the arraycopy is an array type, check at runtime if the source or the destination is 1919 // an instance type. 1920 if (flags & LIR_OpArrayCopy::type_check) { 1921 if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 1922 __ load_klass(tmp, dst); 1923 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1924 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1925 __ bge(CCR0, slow); 1926 } 1927 1928 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 1929 __ load_klass(tmp, src); 1930 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 1931 __ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value); 1932 __ bge(CCR0, slow); 1933 } 1934 } 1935 1936 // Higher 32bits must be null. 1937 __ extsw(length, length); 1938 1939 __ extsw(src_pos, src_pos); 1940 if (flags & LIR_OpArrayCopy::src_range_check) { 1941 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src); 1942 __ add(tmp, length, src_pos); 1943 __ cmpld(CCR0, tmp2, tmp); 1944 __ ble(CCR0, slow); 1945 } 1946 1947 __ extsw(dst_pos, dst_pos); 1948 if (flags & LIR_OpArrayCopy::dst_range_check) { 1949 __ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst); 1950 __ add(tmp, length, dst_pos); 1951 __ cmpld(CCR0, tmp2, tmp); 1952 __ ble(CCR0, slow); 1953 } 1954 1955 int shift = shift_amount(basic_type); 1956 1957 if (!(flags & LIR_OpArrayCopy::type_check)) { 1958 __ b(cont); 1959 } else { 1960 // We don't know the array types are compatible. 1961 if (basic_type != T_OBJECT) { 1962 // Simple test for basic type arrays. 1963 if (UseCompressedClassPointers) { 1964 // We don't need decode because we just need to compare. 1965 __ lwz(tmp, oopDesc::klass_offset_in_bytes(), src); 1966 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 1967 __ cmpw(CCR0, tmp, tmp2); 1968 } else { 1969 __ ld(tmp, oopDesc::klass_offset_in_bytes(), src); 1970 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 1971 __ cmpd(CCR0, tmp, tmp2); 1972 } 1973 __ beq(CCR0, cont); 1974 } else { 1975 // For object arrays, if src is a sub class of dst then we can 1976 // safely do the copy. 1977 address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 1978 1979 const Register sub_klass = R5, super_klass = R4; // like CheckCast/InstanceOf 1980 assert_different_registers(tmp, tmp2, sub_klass, super_klass); 1981 1982 __ load_klass(sub_klass, src); 1983 __ load_klass(super_klass, dst); 1984 1985 __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2, 1986 &cont, copyfunc_addr != NULL ? ©func : &slow, NULL); 1987 1988 address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 1989 //__ load_const_optimized(tmp, slow_stc, tmp2); 1990 __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false); 1991 __ mtctr(tmp); 1992 __ bctrl(); // sets CR0 1993 __ beq(CCR0, cont); 1994 1995 if (copyfunc_addr != NULL) { // Use stub if available. 1996 __ bind(copyfunc); 1997 // Src is not a sub class of dst so we have to do a 1998 // per-element check. 1999 int mask = LIR_OpArrayCopy::src_objarray|LIR_OpArrayCopy::dst_objarray; 2000 if ((flags & mask) != mask) { 2001 assert(flags & mask, "one of the two should be known to be an object array"); 2002 2003 if (!(flags & LIR_OpArrayCopy::src_objarray)) { 2004 __ load_klass(tmp, src); 2005 } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { 2006 __ load_klass(tmp, dst); 2007 } 2008 2009 __ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp); 2010 2011 jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 2012 __ load_const_optimized(tmp, objArray_lh); 2013 __ cmpw(CCR0, tmp, tmp2); 2014 __ bne(CCR0, slow); 2015 } 2016 2017 Register src_ptr = R3_ARG1; 2018 Register dst_ptr = R4_ARG2; 2019 Register len = R5_ARG3; 2020 Register chk_off = R6_ARG4; 2021 Register super_k = R7_ARG5; 2022 2023 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2024 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2025 if (shift == 0) { 2026 __ add(src_ptr, src_pos, src_ptr); 2027 __ add(dst_ptr, dst_pos, dst_ptr); 2028 } else { 2029 __ sldi(tmp, src_pos, shift); 2030 __ sldi(tmp2, dst_pos, shift); 2031 __ add(src_ptr, tmp, src_ptr); 2032 __ add(dst_ptr, tmp2, dst_ptr); 2033 } 2034 2035 __ load_klass(tmp, dst); 2036 __ mr(len, length); 2037 2038 int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset()); 2039 __ ld(super_k, ek_offset, tmp); 2040 2041 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 2042 __ lwz(chk_off, sco_offset, super_k); 2043 2044 __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0); 2045 2046#ifndef PRODUCT 2047 if (PrintC1Statistics) { 2048 Label failed; 2049 __ cmpwi(CCR0, R3_RET, 0); 2050 __ bne(CCR0, failed); 2051 address counter = (address)&Runtime1::_arraycopy_checkcast_cnt; 2052 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2053 __ lwz(R11_scratch1, simm16_offs, tmp); 2054 __ addi(R11_scratch1, R11_scratch1, 1); 2055 __ stw(R11_scratch1, simm16_offs, tmp); 2056 __ bind(failed); 2057 } 2058#endif 2059 2060 __ nand(tmp, R3_RET, R3_RET); 2061 __ cmpwi(CCR0, R3_RET, 0); 2062 __ beq(CCR0, *stub->continuation()); 2063 2064#ifndef PRODUCT 2065 if (PrintC1Statistics) { 2066 address counter = (address)&Runtime1::_arraycopy_checkcast_attempt_cnt; 2067 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2068 __ lwz(R11_scratch1, simm16_offs, tmp); 2069 __ addi(R11_scratch1, R11_scratch1, 1); 2070 __ stw(R11_scratch1, simm16_offs, tmp); 2071 } 2072#endif 2073 2074 __ subf(length, tmp, length); 2075 __ add(src_pos, tmp, src_pos); 2076 __ add(dst_pos, tmp, dst_pos); 2077 } 2078 } 2079 } 2080 __ bind(slow); 2081 __ b(*stub->entry()); 2082 __ bind(cont); 2083 2084#ifdef ASSERT 2085 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) { 2086 // Sanity check the known type with the incoming class. For the 2087 // primitive case the types must match exactly with src.klass and 2088 // dst.klass each exactly matching the default type. For the 2089 // object array case, if no type check is needed then either the 2090 // dst type is exactly the expected type and the src type is a 2091 // subtype which we can't check or src is the same array as dst 2092 // but not necessarily exactly of type default_type. 2093 Label known_ok, halt; 2094 metadata2reg(op->expected_type()->constant_encoding(), tmp); 2095 if (UseCompressedClassPointers) { 2096 // Tmp holds the default type. It currently comes uncompressed after the 2097 // load of a constant, so encode it. 2098 __ encode_klass_not_null(tmp); 2099 // Load the raw value of the dst klass, since we will be comparing 2100 // uncompressed values directly. 2101 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2102 __ cmpw(CCR0, tmp, tmp2); 2103 if (basic_type != T_OBJECT) { 2104 __ bne(CCR0, halt); 2105 // Load the raw value of the src klass. 2106 __ lwz(tmp2, oopDesc::klass_offset_in_bytes(), src); 2107 __ cmpw(CCR0, tmp, tmp2); 2108 __ beq(CCR0, known_ok); 2109 } else { 2110 __ beq(CCR0, known_ok); 2111 __ cmpw(CCR0, src, dst); 2112 __ beq(CCR0, known_ok); 2113 } 2114 } else { 2115 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), dst); 2116 __ cmpd(CCR0, tmp, tmp2); 2117 if (basic_type != T_OBJECT) { 2118 __ bne(CCR0, halt); 2119 // Load the raw value of the src klass. 2120 __ ld(tmp2, oopDesc::klass_offset_in_bytes(), src); 2121 __ cmpd(CCR0, tmp, tmp2); 2122 __ beq(CCR0, known_ok); 2123 } else { 2124 __ beq(CCR0, known_ok); 2125 __ cmpd(CCR0, src, dst); 2126 __ beq(CCR0, known_ok); 2127 } 2128 } 2129 __ bind(halt); 2130 __ stop("incorrect type information in arraycopy"); 2131 __ bind(known_ok); 2132 } 2133#endif 2134 2135#ifndef PRODUCT 2136 if (PrintC1Statistics) { 2137 address counter = Runtime1::arraycopy_count_address(basic_type); 2138 int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true); 2139 __ lwz(R11_scratch1, simm16_offs, tmp); 2140 __ addi(R11_scratch1, R11_scratch1, 1); 2141 __ stw(R11_scratch1, simm16_offs, tmp); 2142 } 2143#endif 2144 2145 Register src_ptr = R3_ARG1; 2146 Register dst_ptr = R4_ARG2; 2147 Register len = R5_ARG3; 2148 2149 __ addi(src_ptr, src, arrayOopDesc::base_offset_in_bytes(basic_type)); 2150 __ addi(dst_ptr, dst, arrayOopDesc::base_offset_in_bytes(basic_type)); 2151 if (shift == 0) { 2152 __ add(src_ptr, src_pos, src_ptr); 2153 __ add(dst_ptr, dst_pos, dst_ptr); 2154 } else { 2155 __ sldi(tmp, src_pos, shift); 2156 __ sldi(tmp2, dst_pos, shift); 2157 __ add(src_ptr, tmp, src_ptr); 2158 __ add(dst_ptr, tmp2, dst_ptr); 2159 } 2160 2161 bool disjoint = (flags & LIR_OpArrayCopy::overlapping) == 0; 2162 bool aligned = (flags & LIR_OpArrayCopy::unaligned) == 0; 2163 const char *name; 2164 address entry = StubRoutines::select_arraycopy_function(basic_type, aligned, disjoint, name, false); 2165 2166 // Arraycopy stubs takes a length in number of elements, so don't scale it. 2167 __ mr(len, length); 2168 __ call_c_with_frame_resize(entry, /*stub does not need resized frame*/ 0); 2169 2170 __ bind(*stub->continuation()); 2171} 2172 2173 2174void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr dest, LIR_Opr tmp) { 2175 if (dest->is_single_cpu()) { 2176 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-5); 2177#ifdef _LP64 2178 if (left->type() == T_OBJECT) { 2179 switch (code) { 2180 case lir_shl: __ sld(dest->as_register(), left->as_register(), tmp->as_register()); break; 2181 case lir_shr: __ srad(dest->as_register(), left->as_register(), tmp->as_register()); break; 2182 case lir_ushr: __ srd(dest->as_register(), left->as_register(), tmp->as_register()); break; 2183 default: ShouldNotReachHere(); 2184 } 2185 } else 2186#endif 2187 switch (code) { 2188 case lir_shl: __ slw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2189 case lir_shr: __ sraw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2190 case lir_ushr: __ srw(dest->as_register(), left->as_register(), tmp->as_register()); break; 2191 default: ShouldNotReachHere(); 2192 } 2193 } else { 2194 __ rldicl(tmp->as_register(), count->as_register(), 0, 64-6); 2195 switch (code) { 2196 case lir_shl: __ sld(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2197 case lir_shr: __ srad(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2198 case lir_ushr: __ srd(dest->as_register_lo(), left->as_register_lo(), tmp->as_register()); break; 2199 default: ShouldNotReachHere(); 2200 } 2201 } 2202} 2203 2204 2205void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr dest) { 2206#ifdef _LP64 2207 if (left->type() == T_OBJECT) { 2208 count = count & 63; // Shouldn't shift by more than sizeof(intptr_t). 2209 if (count == 0) { __ mr_if_needed(dest->as_register_lo(), left->as_register()); } 2210 else { 2211 switch (code) { 2212 case lir_shl: __ sldi(dest->as_register_lo(), left->as_register(), count); break; 2213 case lir_shr: __ sradi(dest->as_register_lo(), left->as_register(), count); break; 2214 case lir_ushr: __ srdi(dest->as_register_lo(), left->as_register(), count); break; 2215 default: ShouldNotReachHere(); 2216 } 2217 } 2218 return; 2219 } 2220#endif 2221 2222 if (dest->is_single_cpu()) { 2223 count = count & 0x1F; // Java spec 2224 if (count == 0) { __ mr_if_needed(dest->as_register(), left->as_register()); } 2225 else { 2226 switch (code) { 2227 case lir_shl: __ slwi(dest->as_register(), left->as_register(), count); break; 2228 case lir_shr: __ srawi(dest->as_register(), left->as_register(), count); break; 2229 case lir_ushr: __ srwi(dest->as_register(), left->as_register(), count); break; 2230 default: ShouldNotReachHere(); 2231 } 2232 } 2233 } else if (dest->is_double_cpu()) { 2234 count = count & 63; // Java spec 2235 if (count == 0) { __ mr_if_needed(dest->as_pointer_register(), left->as_pointer_register()); } 2236 else { 2237 switch (code) { 2238 case lir_shl: __ sldi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2239 case lir_shr: __ sradi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2240 case lir_ushr: __ srdi(dest->as_pointer_register(), left->as_pointer_register(), count); break; 2241 default: ShouldNotReachHere(); 2242 } 2243 } 2244 } else { 2245 ShouldNotReachHere(); 2246 } 2247} 2248 2249 2250void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) { 2251 if (op->init_check()) { 2252 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2253 explicit_null_check(op->klass()->as_register(), op->stub()->info()); 2254 } else { 2255 add_debug_info_for_null_check_here(op->stub()->info()); 2256 } 2257 __ lbz(op->tmp1()->as_register(), 2258 in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register()); 2259 __ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized); 2260 __ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry()); 2261 } 2262 __ allocate_object(op->obj()->as_register(), 2263 op->tmp1()->as_register(), 2264 op->tmp2()->as_register(), 2265 op->tmp3()->as_register(), 2266 op->header_size(), 2267 op->object_size(), 2268 op->klass()->as_register(), 2269 *op->stub()->entry()); 2270 2271 __ bind(*op->stub()->continuation()); 2272 __ verify_oop(op->obj()->as_register()); 2273} 2274 2275 2276void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) { 2277 LP64_ONLY( __ extsw(op->len()->as_register(), op->len()->as_register()); ) 2278 if (UseSlowPath || 2279 (!UseFastNewObjectArray && (op->type() == T_OBJECT || op->type() == T_ARRAY)) || 2280 (!UseFastNewTypeArray && (op->type() != T_OBJECT && op->type() != T_ARRAY))) { 2281 __ b(*op->stub()->entry()); 2282 } else { 2283 __ allocate_array(op->obj()->as_register(), 2284 op->len()->as_register(), 2285 op->tmp1()->as_register(), 2286 op->tmp2()->as_register(), 2287 op->tmp3()->as_register(), 2288 arrayOopDesc::header_size(op->type()), 2289 type2aelembytes(op->type()), 2290 op->klass()->as_register(), 2291 *op->stub()->entry()); 2292 } 2293 __ bind(*op->stub()->continuation()); 2294} 2295 2296 2297void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias, 2298 ciMethodData *md, ciProfileData *data, 2299 Register recv, Register tmp1, Label* update_done) { 2300 uint i; 2301 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2302 Label next_test; 2303 // See if the receiver is receiver[n]. 2304 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2305 __ verify_klass_ptr(tmp1); 2306 __ cmpd(CCR0, recv, tmp1); 2307 __ bne(CCR0, next_test); 2308 2309 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2310 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2311 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2312 __ b(*update_done); 2313 2314 __ bind(next_test); 2315 } 2316 2317 // Didn't find receiver; find next empty slot and fill it in. 2318 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2319 Label next_test; 2320 __ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2321 __ cmpdi(CCR0, tmp1, 0); 2322 __ bne(CCR0, next_test); 2323 __ li(tmp1, DataLayout::counter_increment); 2324 __ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo); 2325 __ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2326 __ b(*update_done); 2327 2328 __ bind(next_test); 2329 } 2330} 2331 2332 2333void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 2334 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 2335 md = method->method_data_or_null(); 2336 assert(md != NULL, "Sanity"); 2337 data = md->bci_to_data(bci); 2338 assert(data != NULL, "need data for checkcast"); 2339 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 2340 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) { 2341 // The offset is large so bias the mdo by the base of the slot so 2342 // that the ld can use simm16s to reference the slots of the data. 2343 mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset()); 2344 } 2345} 2346 2347 2348void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) { 2349 Register obj = op->object()->as_register(); 2350 Register k_RInfo = op->tmp1()->as_register(); 2351 Register klass_RInfo = op->tmp2()->as_register(); 2352 Register Rtmp1 = op->tmp3()->as_register(); 2353 Register dst = op->result_opr()->as_register(); 2354 ciKlass* k = op->klass(); 2355 bool should_profile = op->should_profile(); 2356 bool move_obj_to_dst = (op->code() == lir_checkcast); 2357 // Attention: do_temp(opTypeCheck->_object) is not used, i.e. obj may be same as one of the temps. 2358 bool reg_conflict = (obj == k_RInfo || obj == klass_RInfo || obj == Rtmp1); 2359 bool restore_obj = move_obj_to_dst && reg_conflict; 2360 2361 __ cmpdi(CCR0, obj, 0); 2362 if (move_obj_to_dst || reg_conflict) { 2363 __ mr_if_needed(dst, obj); 2364 if (reg_conflict) { obj = dst; } 2365 } 2366 2367 ciMethodData* md; 2368 ciProfileData* data; 2369 int mdo_offset_bias = 0; 2370 if (should_profile) { 2371 ciMethod* method = op->profiled_method(); 2372 assert(method != NULL, "Should have method"); 2373 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2374 2375 Register mdo = k_RInfo; 2376 Register data_val = Rtmp1; 2377 Label not_null; 2378 __ bne(CCR0, not_null); 2379 metadata2reg(md->constant_encoding(), mdo); 2380 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2381 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2382 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2383 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2384 __ b(*obj_is_null); 2385 __ bind(not_null); 2386 } else { 2387 __ beq(CCR0, *obj_is_null); 2388 } 2389 2390 // get object class 2391 __ load_klass(klass_RInfo, obj); 2392 2393 if (k->is_loaded()) { 2394 metadata2reg(k->constant_encoding(), k_RInfo); 2395 } else { 2396 klass2reg_with_patching(k_RInfo, op->info_for_patch()); 2397 } 2398 2399 Label profile_cast_failure, failure_restore_obj, profile_cast_success; 2400 Label *failure_target = should_profile ? &profile_cast_failure : failure; 2401 Label *success_target = should_profile ? &profile_cast_success : success; 2402 2403 if (op->fast_check()) { 2404 assert_different_registers(klass_RInfo, k_RInfo); 2405 __ cmpd(CCR0, k_RInfo, klass_RInfo); 2406 if (should_profile) { 2407 __ bne(CCR0, *failure_target); 2408 // Fall through to success case. 2409 } else { 2410 __ beq(CCR0, *success); 2411 // Fall through to failure case. 2412 } 2413 } else { 2414 bool need_slow_path = true; 2415 if (k->is_loaded()) { 2416 if ((int) k->super_check_offset() != in_bytes(Klass::secondary_super_cache_offset())) { 2417 need_slow_path = false; 2418 } 2419 // Perform the fast part of the checking logic. 2420 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, (need_slow_path ? success_target : NULL), 2421 failure_target, NULL, RegisterOrConstant(k->super_check_offset())); 2422 } else { 2423 // Perform the fast part of the checking logic. 2424 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, failure_target); 2425 } 2426 if (!need_slow_path) { 2427 if (!should_profile) { __ b(*success); } 2428 } else { 2429 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2430 address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2431 //__ load_const_optimized(Rtmp1, entry, R0); 2432 __ calculate_address_from_global_toc(Rtmp1, entry, true, true, false); 2433 __ mtctr(Rtmp1); 2434 __ bctrl(); // sets CR0 2435 if (should_profile) { 2436 __ bne(CCR0, *failure_target); 2437 // Fall through to success case. 2438 } else { 2439 __ beq(CCR0, *success); 2440 // Fall through to failure case. 2441 } 2442 } 2443 } 2444 2445 if (should_profile) { 2446 Register mdo = k_RInfo, recv = klass_RInfo; 2447 assert_different_registers(mdo, recv, Rtmp1); 2448 __ bind(profile_cast_success); 2449 metadata2reg(md->constant_encoding(), mdo); 2450 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2451 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, Rtmp1, success); 2452 __ b(*success); 2453 2454 // Cast failure case. 2455 __ bind(profile_cast_failure); 2456 metadata2reg(md->constant_encoding(), mdo); 2457 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2458 __ ld(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2459 __ addi(Rtmp1, Rtmp1, -DataLayout::counter_increment); 2460 __ std(Rtmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2461 } 2462 2463 __ bind(*failure); 2464 2465 if (restore_obj) { 2466 __ mr(op->object()->as_register(), dst); 2467 // Fall through to failure case. 2468 } 2469} 2470 2471 2472void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { 2473 LIR_Code code = op->code(); 2474 if (code == lir_store_check) { 2475 Register value = op->object()->as_register(); 2476 Register array = op->array()->as_register(); 2477 Register k_RInfo = op->tmp1()->as_register(); 2478 Register klass_RInfo = op->tmp2()->as_register(); 2479 Register Rtmp1 = op->tmp3()->as_register(); 2480 bool should_profile = op->should_profile(); 2481 2482 __ verify_oop(value); 2483 CodeStub* stub = op->stub(); 2484 // Check if it needs to be profiled. 2485 ciMethodData* md; 2486 ciProfileData* data; 2487 int mdo_offset_bias = 0; 2488 if (should_profile) { 2489 ciMethod* method = op->profiled_method(); 2490 assert(method != NULL, "Should have method"); 2491 setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias); 2492 } 2493 Label profile_cast_success, failure, done; 2494 Label *success_target = should_profile ? &profile_cast_success : &done; 2495 2496 __ cmpdi(CCR0, value, 0); 2497 if (should_profile) { 2498 Label not_null; 2499 __ bne(CCR0, not_null); 2500 Register mdo = k_RInfo; 2501 Register data_val = Rtmp1; 2502 metadata2reg(md->constant_encoding(), mdo); 2503 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2504 __ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2505 __ ori(data_val, data_val, BitData::null_seen_byte_constant()); 2506 __ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo); 2507 __ b(done); 2508 __ bind(not_null); 2509 } else { 2510 __ beq(CCR0, done); 2511 } 2512 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2513 explicit_null_check(array, op->info_for_exception()); 2514 } else { 2515 add_debug_info_for_null_check_here(op->info_for_exception()); 2516 } 2517 __ load_klass(k_RInfo, array); 2518 __ load_klass(klass_RInfo, value); 2519 2520 // Get instance klass. 2521 __ ld(k_RInfo, in_bytes(ObjArrayKlass::element_klass_offset()), k_RInfo); 2522 // Perform the fast part of the checking logic. 2523 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, success_target, &failure, NULL); 2524 2525 // Call out-of-line instance of __ check_klass_subtype_slow_path(...): 2526 const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id); 2527 //__ load_const_optimized(R0, slow_path); 2528 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path)); 2529 __ mtctr(R0); 2530 __ bctrl(); // sets CR0 2531 if (!should_profile) { 2532 __ beq(CCR0, done); 2533 __ bind(failure); 2534 } else { 2535 __ bne(CCR0, failure); 2536 // Fall through to the success case. 2537 2538 Register mdo = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1; 2539 assert_different_registers(value, mdo, recv, tmp1); 2540 __ bind(profile_cast_success); 2541 metadata2reg(md->constant_encoding(), mdo); 2542 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2543 __ load_klass(recv, value); 2544 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 2545 __ b(done); 2546 2547 // Cast failure case. 2548 __ bind(failure); 2549 metadata2reg(md->constant_encoding(), mdo); 2550 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2551 Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias); 2552 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2553 __ addi(tmp1, tmp1, -DataLayout::counter_increment); 2554 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2555 } 2556 __ b(*stub->entry()); 2557 __ bind(done); 2558 2559 } else if (code == lir_checkcast) { 2560 Label success, failure; 2561 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &success); // Moves obj to dst. 2562 __ b(*op->stub()->entry()); 2563 __ align(32, 12); 2564 __ bind(success); 2565 } else if (code == lir_instanceof) { 2566 Register dst = op->result_opr()->as_register(); 2567 Label success, failure, done; 2568 emit_typecheck_helper(op, &success, /*fallthru*/&failure, &failure); 2569 __ li(dst, 0); 2570 __ b(done); 2571 __ align(32, 12); 2572 __ bind(success); 2573 __ li(dst, 1); 2574 __ bind(done); 2575 } else { 2576 ShouldNotReachHere(); 2577 } 2578} 2579 2580 2581void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) { 2582 Register addr = op->addr()->as_pointer_register(); 2583 Register cmp_value = noreg, new_value = noreg; 2584 bool is_64bit = false; 2585 2586 if (op->code() == lir_cas_long) { 2587 cmp_value = op->cmp_value()->as_register_lo(); 2588 new_value = op->new_value()->as_register_lo(); 2589 is_64bit = true; 2590 } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) { 2591 cmp_value = op->cmp_value()->as_register(); 2592 new_value = op->new_value()->as_register(); 2593 if (op->code() == lir_cas_obj) { 2594 if (UseCompressedOops) { 2595 Register t1 = op->tmp1()->as_register(); 2596 Register t2 = op->tmp2()->as_register(); 2597 cmp_value = __ encode_heap_oop(t1, cmp_value); 2598 new_value = __ encode_heap_oop(t2, new_value); 2599 } else { 2600 is_64bit = true; 2601 } 2602 } 2603 } else { 2604 Unimplemented(); 2605 } 2606 2607 if (is_64bit) { 2608 __ cmpxchgd(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2609 MacroAssembler::MemBarNone, 2610 MacroAssembler::cmpxchgx_hint_atomic_update(), 2611 noreg, NULL, /*check without ldarx first*/true); 2612 } else { 2613 __ cmpxchgw(BOOL_RESULT, /*current_value=*/R0, cmp_value, new_value, addr, 2614 MacroAssembler::MemBarNone, 2615 MacroAssembler::cmpxchgx_hint_atomic_update(), 2616 noreg, /*check without ldarx first*/true); 2617 } 2618 2619 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2620 __ isync(); 2621 } else { 2622 __ sync(); 2623 } 2624} 2625 2626 2627void LIR_Assembler::set_24bit_FPU() { 2628 Unimplemented(); 2629} 2630 2631void LIR_Assembler::reset_FPU() { 2632 Unimplemented(); 2633} 2634 2635 2636void LIR_Assembler::breakpoint() { 2637 __ illtrap(); 2638} 2639 2640 2641void LIR_Assembler::push(LIR_Opr opr) { 2642 Unimplemented(); 2643} 2644 2645void LIR_Assembler::pop(LIR_Opr opr) { 2646 Unimplemented(); 2647} 2648 2649 2650void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst_opr) { 2651 Address mon_addr = frame_map()->address_for_monitor_lock(monitor_no); 2652 Register dst = dst_opr->as_register(); 2653 Register reg = mon_addr.base(); 2654 int offset = mon_addr.disp(); 2655 // Compute pointer to BasicLock. 2656 __ add_const_optimized(dst, reg, offset); 2657} 2658 2659 2660void LIR_Assembler::emit_lock(LIR_OpLock* op) { 2661 Register obj = op->obj_opr()->as_register(); 2662 Register hdr = op->hdr_opr()->as_register(); 2663 Register lock = op->lock_opr()->as_register(); 2664 2665 // Obj may not be an oop. 2666 if (op->code() == lir_lock) { 2667 MonitorEnterStub* stub = (MonitorEnterStub*)op->stub(); 2668 if (UseFastLocking) { 2669 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2670 // Add debug info for NullPointerException only if one is possible. 2671 if (op->info() != NULL) { 2672 if (!os::zero_page_read_protected() || !ImplicitNullChecks) { 2673 explicit_null_check(obj, op->info()); 2674 } else { 2675 add_debug_info_for_null_check_here(op->info()); 2676 } 2677 } 2678 __ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry()); 2679 } else { 2680 // always do slow locking 2681 // note: The slow locking code could be inlined here, however if we use 2682 // slow locking, speed doesn't matter anyway and this solution is 2683 // simpler and requires less duplicated code - additionally, the 2684 // slow locking code is the same in either case which simplifies 2685 // debugging. 2686 __ b(*op->stub()->entry()); 2687 } 2688 } else { 2689 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock"); 2690 if (UseFastLocking) { 2691 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header"); 2692 __ unlock_object(hdr, obj, lock, *op->stub()->entry()); 2693 } else { 2694 // always do slow unlocking 2695 // note: The slow unlocking code could be inlined here, however if we use 2696 // slow unlocking, speed doesn't matter anyway and this solution is 2697 // simpler and requires less duplicated code - additionally, the 2698 // slow unlocking code is the same in either case which simplifies 2699 // debugging. 2700 __ b(*op->stub()->entry()); 2701 } 2702 } 2703 __ bind(*op->stub()->continuation()); 2704} 2705 2706 2707void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) { 2708 ciMethod* method = op->profiled_method(); 2709 int bci = op->profiled_bci(); 2710 ciMethod* callee = op->profiled_callee(); 2711 2712 // Update counter for all call types. 2713 ciMethodData* md = method->method_data_or_null(); 2714 assert(md != NULL, "Sanity"); 2715 ciProfileData* data = md->bci_to_data(bci); 2716 assert(data->is_CounterData(), "need CounterData for calls"); 2717 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 2718 Register mdo = op->mdo()->as_register(); 2719#ifdef _LP64 2720 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated"); 2721 Register tmp1 = op->tmp1()->as_register_lo(); 2722#else 2723 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated"); 2724 Register tmp1 = op->tmp1()->as_register(); 2725#endif 2726 metadata2reg(md->constant_encoding(), mdo); 2727 int mdo_offset_bias = 0; 2728 if (!Assembler::is_simm16(md->byte_offset_of_slot(data, CounterData::count_offset()) + 2729 data->size_in_bytes())) { 2730 // The offset is large so bias the mdo by the base of the slot so 2731 // that the ld can use simm16s to reference the slots of the data. 2732 mdo_offset_bias = md->byte_offset_of_slot(data, CounterData::count_offset()); 2733 __ add_const_optimized(mdo, mdo, mdo_offset_bias, R0); 2734 } 2735 2736 Bytecodes::Code bc = method->java_code_at_bci(bci); 2737 const bool callee_is_static = callee->is_loaded() && callee->is_static(); 2738 // Perform additional virtual call profiling for invokevirtual and 2739 // invokeinterface bytecodes. 2740 if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) && 2741 !callee_is_static && // Required for optimized MH invokes. 2742 C1ProfileVirtualCalls) { 2743 assert(op->recv()->is_single_cpu(), "recv must be allocated"); 2744 Register recv = op->recv()->as_register(); 2745 assert_different_registers(mdo, tmp1, recv); 2746 assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls"); 2747 ciKlass* known_klass = op->known_holder(); 2748 if (C1OptimizeVirtualCallProfiling && known_klass != NULL) { 2749 // We know the type that will be seen at this call site; we can 2750 // statically update the MethodData* rather than needing to do 2751 // dynamic tests on the receiver type. 2752 2753 // NOTE: we should probably put a lock around this search to 2754 // avoid collisions by concurrent compilations. 2755 ciVirtualCallData* vc_data = (ciVirtualCallData*) data; 2756 uint i; 2757 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2758 ciKlass* receiver = vc_data->receiver(i); 2759 if (known_klass->equals(receiver)) { 2760 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2761 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2762 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2763 return; 2764 } 2765 } 2766 2767 // Receiver type not found in profile data; select an empty slot. 2768 2769 // Note that this is less efficient than it should be because it 2770 // always does a write to the receiver part of the 2771 // VirtualCallData rather than just the first time. 2772 for (i = 0; i < VirtualCallData::row_limit(); i++) { 2773 ciKlass* receiver = vc_data->receiver(i); 2774 if (receiver == NULL) { 2775 metadata2reg(known_klass->constant_encoding(), tmp1); 2776 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) - mdo_offset_bias, mdo); 2777 2778 __ ld(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2779 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2780 __ std(tmp1, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) - mdo_offset_bias, mdo); 2781 return; 2782 } 2783 } 2784 } else { 2785 __ load_klass(recv, recv); 2786 Label update_done; 2787 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 2788 // Receiver did not match any saved receiver and there is no empty row for it. 2789 // Increment total counter to indicate polymorphic case. 2790 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2791 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2792 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2793 2794 __ bind(update_done); 2795 } 2796 } else { 2797 // Static call 2798 __ ld(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2799 __ addi(tmp1, tmp1, DataLayout::counter_increment); 2800 __ std(tmp1, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias, mdo); 2801 } 2802} 2803 2804 2805void LIR_Assembler::align_backward_branch_target() { 2806 __ align(32, 12); // Insert up to 3 nops to align with 32 byte boundary. 2807} 2808 2809 2810void LIR_Assembler::emit_delay(LIR_OpDelay* op) { 2811 Unimplemented(); 2812} 2813 2814 2815void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) { 2816 assert(left->is_register(), "can only handle registers"); 2817 2818 if (left->is_single_cpu()) { 2819 __ neg(dest->as_register(), left->as_register()); 2820 } else if (left->is_single_fpu()) { 2821 __ fneg(dest->as_float_reg(), left->as_float_reg()); 2822 } else if (left->is_double_fpu()) { 2823 __ fneg(dest->as_double_reg(), left->as_double_reg()); 2824 } else { 2825 assert (left->is_double_cpu(), "Must be a long"); 2826 __ neg(dest->as_register_lo(), left->as_register_lo()); 2827 } 2828} 2829 2830 2831void LIR_Assembler::fxch(int i) { 2832 Unimplemented(); 2833} 2834 2835void LIR_Assembler::fld(int i) { 2836 Unimplemented(); 2837} 2838 2839void LIR_Assembler::ffree(int i) { 2840 Unimplemented(); 2841} 2842 2843 2844void LIR_Assembler::rt_call(LIR_Opr result, address dest, 2845 const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) { 2846 // Stubs: Called via rt_call, but dest is a stub address (no function descriptor). 2847 if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) || 2848 dest == Runtime1::entry_for(Runtime1::new_multi_array_id )) { 2849 //__ load_const_optimized(R0, dest); 2850 __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest)); 2851 __ mtctr(R0); 2852 __ bctrl(); 2853 assert(info != NULL, "sanity"); 2854 add_call_info_here(info); 2855 return; 2856 } 2857 2858 __ call_c_with_frame_resize(dest, /*no resizing*/ 0); 2859 if (info != NULL) { 2860 add_call_info_here(info); 2861 } 2862} 2863 2864 2865void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) { 2866 ShouldNotReachHere(); // Not needed on _LP64. 2867} 2868 2869void LIR_Assembler::membar() { 2870 __ fence(); 2871} 2872 2873void LIR_Assembler::membar_acquire() { 2874 __ acquire(); 2875} 2876 2877void LIR_Assembler::membar_release() { 2878 __ release(); 2879} 2880 2881void LIR_Assembler::membar_loadload() { 2882 __ membar(Assembler::LoadLoad); 2883} 2884 2885void LIR_Assembler::membar_storestore() { 2886 __ membar(Assembler::StoreStore); 2887} 2888 2889void LIR_Assembler::membar_loadstore() { 2890 __ membar(Assembler::LoadStore); 2891} 2892 2893void LIR_Assembler::membar_storeload() { 2894 __ membar(Assembler::StoreLoad); 2895} 2896 2897void LIR_Assembler::on_spin_wait() { 2898 Unimplemented(); 2899} 2900 2901void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) { 2902 LIR_Address* addr = addr_opr->as_address_ptr(); 2903 assert(addr->scale() == LIR_Address::times_1, "no scaling on this platform"); 2904 if (addr->index()->is_illegal()) { 2905 __ add_const_optimized(dest->as_pointer_register(), addr->base()->as_pointer_register(), addr->disp()); 2906 } else { 2907 assert(addr->disp() == 0, "can't have both: index and disp"); 2908 __ add(dest->as_pointer_register(), addr->index()->as_pointer_register(), addr->base()->as_pointer_register()); 2909 } 2910} 2911 2912 2913void LIR_Assembler::get_thread(LIR_Opr result_reg) { 2914 ShouldNotReachHere(); 2915} 2916 2917 2918#ifdef ASSERT 2919// Emit run-time assertion. 2920void LIR_Assembler::emit_assert(LIR_OpAssert* op) { 2921 Unimplemented(); 2922} 2923#endif 2924 2925 2926void LIR_Assembler::peephole(LIR_List* lir) { 2927 // Optimize instruction pairs before emitting. 2928 LIR_OpList* inst = lir->instructions_list(); 2929 for (int i = 1; i < inst->length(); i++) { 2930 LIR_Op* op = inst->at(i); 2931 2932 // 2 register-register-moves 2933 if (op->code() == lir_move) { 2934 LIR_Opr in2 = ((LIR_Op1*)op)->in_opr(), 2935 res2 = ((LIR_Op1*)op)->result_opr(); 2936 if (in2->is_register() && res2->is_register()) { 2937 LIR_Op* prev = inst->at(i - 1); 2938 if (prev && prev->code() == lir_move) { 2939 LIR_Opr in1 = ((LIR_Op1*)prev)->in_opr(), 2940 res1 = ((LIR_Op1*)prev)->result_opr(); 2941 if (in1->is_same_register(res2) && in2->is_same_register(res1)) { 2942 inst->remove_at(i); 2943 } 2944 } 2945 } 2946 } 2947 2948 } 2949 return; 2950} 2951 2952 2953void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp) { 2954 const Register Rptr = src->as_pointer_register(), 2955 Rtmp = tmp->as_register(); 2956 Register Rco = noreg; 2957 if (UseCompressedOops && data->is_oop()) { 2958 Rco = __ encode_heap_oop(Rtmp, data->as_register()); 2959 } 2960 2961 Label Lretry; 2962 __ bind(Lretry); 2963 2964 if (data->type() == T_INT) { 2965 const Register Rold = dest->as_register(), 2966 Rsrc = data->as_register(); 2967 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 2968 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 2969 if (code == lir_xadd) { 2970 __ add(Rtmp, Rsrc, Rold); 2971 __ stwcx_(Rtmp, Rptr); 2972 } else { 2973 __ stwcx_(Rsrc, Rptr); 2974 } 2975 } else if (data->is_oop()) { 2976 assert(code == lir_xchg, "xadd for oops"); 2977 const Register Rold = dest->as_register(); 2978 if (UseCompressedOops) { 2979 assert_different_registers(Rptr, Rold, Rco); 2980 __ lwarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 2981 __ stwcx_(Rco, Rptr); 2982 } else { 2983 const Register Robj = data->as_register(); 2984 assert_different_registers(Rptr, Rold, Robj); 2985 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 2986 __ stdcx_(Robj, Rptr); 2987 } 2988 } else if (data->type() == T_LONG) { 2989 const Register Rold = dest->as_register_lo(), 2990 Rsrc = data->as_register_lo(); 2991 assert_different_registers(Rptr, Rtmp, Rold, Rsrc); 2992 __ ldarx(Rold, Rptr, MacroAssembler::cmpxchgx_hint_atomic_update()); 2993 if (code == lir_xadd) { 2994 __ add(Rtmp, Rsrc, Rold); 2995 __ stdcx_(Rtmp, Rptr); 2996 } else { 2997 __ stdcx_(Rsrc, Rptr); 2998 } 2999 } else { 3000 ShouldNotReachHere(); 3001 } 3002 3003 if (UseStaticBranchPredictionInCompareAndSwapPPC64) { 3004 __ bne_predict_not_taken(CCR0, Lretry); 3005 } else { 3006 __ bne( CCR0, Lretry); 3007 } 3008 3009 if (UseCompressedOops && data->is_oop()) { 3010 __ decode_heap_oop(dest->as_register()); 3011 } 3012} 3013 3014 3015void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { 3016 Register obj = op->obj()->as_register(); 3017 Register tmp = op->tmp()->as_pointer_register(); 3018 LIR_Address* mdo_addr = op->mdp()->as_address_ptr(); 3019 ciKlass* exact_klass = op->exact_klass(); 3020 intptr_t current_klass = op->current_klass(); 3021 bool not_null = op->not_null(); 3022 bool no_conflict = op->no_conflict(); 3023 3024 Label Lupdate, Ldo_update, Ldone; 3025 3026 bool do_null = !not_null; 3027 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass; 3028 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set; 3029 3030 assert(do_null || do_update, "why are we here?"); 3031 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?"); 3032 3033 __ verify_oop(obj); 3034 3035 if (do_null) { 3036 if (!TypeEntries::was_null_seen(current_klass)) { 3037 __ cmpdi(CCR0, obj, 0); 3038 __ bne(CCR0, Lupdate); 3039 __ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3040 __ ori(R0, R0, TypeEntries::null_seen); 3041 if (do_update) { 3042 __ b(Ldo_update); 3043 } else { 3044 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3045 } 3046 } else { 3047 if (do_update) { 3048 __ cmpdi(CCR0, obj, 0); 3049 __ beq(CCR0, Ldone); 3050 } 3051 } 3052#ifdef ASSERT 3053 } else { 3054 __ cmpdi(CCR0, obj, 0); 3055 __ bne(CCR0, Lupdate); 3056 __ stop("unexpect null obj", 0x9652); 3057#endif 3058 } 3059 3060 __ bind(Lupdate); 3061 if (do_update) { 3062 Label Lnext; 3063 const Register klass = R29_TOC; // kill and reload 3064 bool klass_reg_used = false; 3065#ifdef ASSERT 3066 if (exact_klass != NULL) { 3067 Label ok; 3068 klass_reg_used = true; 3069 __ load_klass(klass, obj); 3070 metadata2reg(exact_klass->constant_encoding(), R0); 3071 __ cmpd(CCR0, klass, R0); 3072 __ beq(CCR0, ok); 3073 __ stop("exact klass and actual klass differ", 0x8564); 3074 __ bind(ok); 3075 } 3076#endif 3077 3078 if (!no_conflict) { 3079 if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) { 3080 klass_reg_used = true; 3081 if (exact_klass != NULL) { 3082 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3083 metadata2reg(exact_klass->constant_encoding(), klass); 3084 } else { 3085 __ load_klass(klass, obj); 3086 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); // may kill obj 3087 } 3088 3089 // Like InterpreterMacroAssembler::profile_obj_type 3090 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3091 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3092 __ cmpd(CCR1, R0, klass); 3093 // Klass seen before, nothing to do (regardless of unknown bit). 3094 //beq(CCR1, do_nothing); 3095 3096 __ andi_(R0, klass, TypeEntries::type_unknown); 3097 // Already unknown. Nothing to do anymore. 3098 //bne(CCR0, do_nothing); 3099 __ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 3100 __ beq(CCR0, Lnext); 3101 3102 if (TypeEntries::is_type_none(current_klass)) { 3103 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3104 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3105 __ beq(CCR0, Ldo_update); // First time here. Set profile type. 3106 } 3107 3108 } else { 3109 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3110 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only"); 3111 3112 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3113 __ andi_(R0, tmp, TypeEntries::type_unknown); 3114 // Already unknown. Nothing to do anymore. 3115 __ bne(CCR0, Lnext); 3116 } 3117 3118 // Different than before. Cannot keep accurate profile. 3119 __ ori(R0, tmp, TypeEntries::type_unknown); 3120 } else { 3121 // There's a single possible klass at this profile point 3122 assert(exact_klass != NULL, "should be"); 3123 __ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3124 3125 if (TypeEntries::is_type_none(current_klass)) { 3126 klass_reg_used = true; 3127 metadata2reg(exact_klass->constant_encoding(), klass); 3128 3129 __ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 3130 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 3131 __ cmpd(CCR1, R0, klass); 3132 // Klass seen before, nothing to do (regardless of unknown bit). 3133 __ beq(CCR1, Lnext); 3134#ifdef ASSERT 3135 { 3136 Label ok; 3137 __ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 3138 __ beq(CCR0, ok); // First time here. 3139 3140 __ stop("unexpected profiling mismatch", 0x7865); 3141 __ bind(ok); 3142 } 3143#endif 3144 // First time here. Set profile type. 3145 __ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 3146 } else { 3147 assert(ciTypeEntries::valid_ciklass(current_klass) != NULL && 3148 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent"); 3149 3150 // Already unknown. Nothing to do anymore. 3151 __ andi_(R0, tmp, TypeEntries::type_unknown); 3152 __ bne(CCR0, Lnext); 3153 3154 // Different than before. Cannot keep accurate profile. 3155 __ ori(R0, tmp, TypeEntries::type_unknown); 3156 } 3157 } 3158 3159 __ bind(Ldo_update); 3160 __ std(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register()); 3161 3162 __ bind(Lnext); 3163 if (klass_reg_used) { __ load_const_optimized(R29_TOC, MacroAssembler::global_toc(), R0); } // reinit 3164 } 3165 __ bind(Ldone); 3166} 3167 3168 3169void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 3170 assert(op->crc()->is_single_cpu(), "crc must be register"); 3171 assert(op->val()->is_single_cpu(), "byte value must be register"); 3172 assert(op->result_opr()->is_single_cpu(), "result must be register"); 3173 Register crc = op->crc()->as_register(); 3174 Register val = op->val()->as_register(); 3175 Register res = op->result_opr()->as_register(); 3176 3177 assert_different_registers(val, crc, res); 3178 3179 __ load_const_optimized(res, StubRoutines::crc_table_addr(), R0); 3180 __ nand(crc, crc, crc); // ~crc 3181 __ update_byte_crc32(crc, val, res); 3182 __ nand(res, crc, crc); // ~crc 3183} 3184 3185#undef __ 3186