templateTable_ppc_64.cpp revision 6760:22b98ab2a69f
1/* 2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2013, 2014 SAP AG. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26#include "precompiled.hpp" 27#include "asm/macroAssembler.inline.hpp" 28#include "interpreter/interpreter.hpp" 29#include "interpreter/interpreterRuntime.hpp" 30#include "interpreter/interp_masm.hpp" 31#include "interpreter/templateInterpreter.hpp" 32#include "interpreter/templateTable.hpp" 33#include "memory/universe.inline.hpp" 34#include "oops/objArrayKlass.hpp" 35#include "oops/oop.inline.hpp" 36#include "prims/methodHandles.hpp" 37#include "runtime/sharedRuntime.hpp" 38#include "runtime/stubRoutines.hpp" 39#include "runtime/synchronizer.hpp" 40#include "utilities/macros.hpp" 41 42#ifndef CC_INTERP 43 44#undef __ 45#define __ _masm-> 46 47// ============================================================================ 48// Misc helpers 49 50// Do an oop store like *(base + index) = val OR *(base + offset) = val 51// (only one of both variants is possible at the same time). 52// Index can be noreg. 53// Kills: 54// Rbase, Rtmp 55static void do_oop_store(InterpreterMacroAssembler* _masm, 56 Register Rbase, 57 RegisterOrConstant offset, 58 Register Rval, // Noreg means always null. 59 Register Rtmp1, 60 Register Rtmp2, 61 Register Rtmp3, 62 BarrierSet::Name barrier, 63 bool precise, 64 bool check_null) { 65 assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase); 66 67 switch (barrier) { 68#if INCLUDE_ALL_GCS 69 case BarrierSet::G1SATBCT: 70 case BarrierSet::G1SATBCTLogging: 71 { 72 // Load and record the previous value. 73 __ g1_write_barrier_pre(Rbase, offset, 74 Rtmp3, /* holder of pre_val ? */ 75 Rtmp1, Rtmp2, false /* frame */); 76 77 Label Lnull, Ldone; 78 if (Rval != noreg) { 79 if (check_null) { 80 __ cmpdi(CCR0, Rval, 0); 81 __ beq(CCR0, Lnull); 82 } 83 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1); 84 // Mark the card. 85 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 86 __ add(Rbase, offset, Rbase); 87 } 88 __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone); 89 if (check_null) { __ b(Ldone); } 90 } 91 92 if (Rval == noreg || check_null) { // Store null oop. 93 Register Rnull = Rval; 94 __ bind(Lnull); 95 if (Rval == noreg) { 96 Rnull = Rtmp1; 97 __ li(Rnull, 0); 98 } 99 if (UseCompressedOops) { 100 __ stw(Rnull, offset, Rbase); 101 } else { 102 __ std(Rnull, offset, Rbase); 103 } 104 } 105 __ bind(Ldone); 106 } 107 break; 108#endif // INCLUDE_ALL_GCS 109 case BarrierSet::CardTableModRef: 110 case BarrierSet::CardTableExtension: 111 { 112 Label Lnull, Ldone; 113 if (Rval != noreg) { 114 if (check_null) { 115 __ cmpdi(CCR0, Rval, 0); 116 __ beq(CCR0, Lnull); 117 } 118 __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1); 119 // Mark the card. 120 if (!(offset.is_constant() && offset.as_constant() == 0) && precise) { 121 __ add(Rbase, offset, Rbase); 122 } 123 __ card_write_barrier_post(Rbase, Rval, Rtmp1); 124 if (check_null) { 125 __ b(Ldone); 126 } 127 } 128 129 if (Rval == noreg || check_null) { // Store null oop. 130 Register Rnull = Rval; 131 __ bind(Lnull); 132 if (Rval == noreg) { 133 Rnull = Rtmp1; 134 __ li(Rnull, 0); 135 } 136 if (UseCompressedOops) { 137 __ stw(Rnull, offset, Rbase); 138 } else { 139 __ std(Rnull, offset, Rbase); 140 } 141 } 142 __ bind(Ldone); 143 } 144 break; 145 case BarrierSet::ModRef: 146 case BarrierSet::Other: 147 ShouldNotReachHere(); 148 break; 149 default: 150 ShouldNotReachHere(); 151 } 152} 153 154// ============================================================================ 155// Platform-dependent initialization 156 157void TemplateTable::pd_initialize() { 158 // No ppc64 specific initialization. 159} 160 161Address TemplateTable::at_bcp(int offset) { 162 // Not used on ppc. 163 ShouldNotReachHere(); 164 return Address(); 165} 166 167// Patches the current bytecode (ptr to it located in bcp) 168// in the bytecode stream with a new one. 169void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) { 170 // With sharing on, may need to test method flag. 171 if (!RewriteBytecodes) return; 172 Label L_patch_done; 173 174 switch (new_bc) { 175 case Bytecodes::_fast_aputfield: 176 case Bytecodes::_fast_bputfield: 177 case Bytecodes::_fast_cputfield: 178 case Bytecodes::_fast_dputfield: 179 case Bytecodes::_fast_fputfield: 180 case Bytecodes::_fast_iputfield: 181 case Bytecodes::_fast_lputfield: 182 case Bytecodes::_fast_sputfield: 183 { 184 // We skip bytecode quickening for putfield instructions when 185 // the put_code written to the constant pool cache is zero. 186 // This is required so that every execution of this instruction 187 // calls out to InterpreterRuntime::resolve_get_put to do 188 // additional, required work. 189 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 190 assert(load_bc_into_bc_reg, "we use bc_reg as temp"); 191 __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1); 192 // Big Endian: ((*(cache+indices))>>((1+byte_no)*8))&0xFF 193 __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp); 194 __ cmpwi(CCR0, Rnew_bc, 0); 195 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 196 __ beq(CCR0, L_patch_done); 197 // __ isync(); // acquire not needed 198 break; 199 } 200 201 default: 202 assert(byte_no == -1, "sanity"); 203 if (load_bc_into_bc_reg) { 204 __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc); 205 } 206 } 207 208 if (JvmtiExport::can_post_breakpoint()) { 209 Label L_fast_patch; 210 __ lbz(Rtemp, 0, R14_bcp); 211 __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint); 212 __ bne(CCR0, L_fast_patch); 213 // Perform the quickening, slowly, in the bowels of the breakpoint table. 214 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc); 215 __ b(L_patch_done); 216 __ bind(L_fast_patch); 217 } 218 219 // Patch bytecode. 220 __ stb(Rnew_bc, 0, R14_bcp); 221 222 __ bind(L_patch_done); 223} 224 225// ============================================================================ 226// Individual instructions 227 228void TemplateTable::nop() { 229 transition(vtos, vtos); 230 // Nothing to do. 231} 232 233void TemplateTable::shouldnotreachhere() { 234 transition(vtos, vtos); 235 __ stop("shouldnotreachhere bytecode"); 236} 237 238void TemplateTable::aconst_null() { 239 transition(vtos, atos); 240 __ li(R17_tos, 0); 241} 242 243void TemplateTable::iconst(int value) { 244 transition(vtos, itos); 245 assert(value >= -1 && value <= 5, ""); 246 __ li(R17_tos, value); 247} 248 249void TemplateTable::lconst(int value) { 250 transition(vtos, ltos); 251 assert(value >= -1 && value <= 5, ""); 252 __ li(R17_tos, value); 253} 254 255void TemplateTable::fconst(int value) { 256 transition(vtos, ftos); 257 static float zero = 0.0; 258 static float one = 1.0; 259 static float two = 2.0; 260 switch (value) { 261 default: ShouldNotReachHere(); 262 case 0: { 263 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 264 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 265 break; 266 } 267 case 1: { 268 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 269 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 270 break; 271 } 272 case 2: { 273 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true); 274 __ lfs(F15_ftos, simm16_offset, R11_scratch1); 275 break; 276 } 277 } 278} 279 280void TemplateTable::dconst(int value) { 281 transition(vtos, dtos); 282 static double zero = 0.0; 283 static double one = 1.0; 284 switch (value) { 285 case 0: { 286 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true); 287 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 288 break; 289 } 290 case 1: { 291 int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true); 292 __ lfd(F15_ftos, simm16_offset, R11_scratch1); 293 break; 294 } 295 default: ShouldNotReachHere(); 296 } 297} 298 299void TemplateTable::bipush() { 300 transition(vtos, itos); 301 __ lbz(R17_tos, 1, R14_bcp); 302 __ extsb(R17_tos, R17_tos); 303} 304 305void TemplateTable::sipush() { 306 transition(vtos, itos); 307 __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed); 308} 309 310void TemplateTable::ldc(bool wide) { 311 Register Rscratch1 = R11_scratch1, 312 Rscratch2 = R12_scratch2, 313 Rcpool = R3_ARG1; 314 315 transition(vtos, vtos); 316 Label notInt, notClass, exit; 317 318 __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags. 319 if (wide) { // Read index. 320 __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned); 321 } else { 322 __ lbz(Rscratch1, 1, R14_bcp); 323 } 324 325 const int base_offset = ConstantPool::header_size() * wordSize; 326 const int tags_offset = Array<u1>::base_offset_in_bytes(); 327 328 // Get type from tags. 329 __ addi(Rscratch2, Rscratch2, tags_offset); 330 __ lbzx(Rscratch2, Rscratch2, Rscratch1); 331 332 __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class? 333 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state? 334 __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 335 336 // Resolved class - need to call vm to get java mirror of the class. 337 __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class); 338 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above? 339 __ beq(CCR0, notClass); 340 341 __ li(R4, wide ? 1 : 0); 342 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4); 343 __ push(atos); 344 __ b(exit); 345 346 __ align(32, 12); 347 __ bind(notClass); 348 __ addi(Rcpool, Rcpool, base_offset); 349 __ sldi(Rscratch1, Rscratch1, LogBytesPerWord); 350 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer); 351 __ bne(CCR0, notInt); 352 __ isync(); // Order load of constant wrt. tags. 353 __ lwax(R17_tos, Rcpool, Rscratch1); 354 __ push(itos); 355 __ b(exit); 356 357 __ align(32, 12); 358 __ bind(notInt); 359#ifdef ASSERT 360 // String and Object are rewritten to fast_aldc 361 __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float); 362 __ asm_assert_eq("unexpected type", 0x8765); 363#endif 364 __ isync(); // Order load of constant wrt. tags. 365 __ lfsx(F15_ftos, Rcpool, Rscratch1); 366 __ push(ftos); 367 368 __ align(32, 12); 369 __ bind(exit); 370} 371 372// Fast path for caching oop constants. 373void TemplateTable::fast_aldc(bool wide) { 374 transition(vtos, atos); 375 376 int index_size = wide ? sizeof(u2) : sizeof(u1); 377 const Register Rscratch = R11_scratch1; 378 Label resolved; 379 380 // We are resolved if the resolved reference cache entry contains a 381 // non-null object (CallSite, etc.) 382 __ get_cache_index_at_bcp(Rscratch, 1, index_size); // Load index. 383 __ load_resolved_reference_at_index(R17_tos, Rscratch); 384 __ cmpdi(CCR0, R17_tos, 0); 385 __ bne(CCR0, resolved); 386 __ load_const_optimized(R3_ARG1, (int)bytecode()); 387 388 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 389 390 // First time invocation - must resolve first. 391 __ call_VM(R17_tos, entry, R3_ARG1); 392 393 __ align(32, 12); 394 __ bind(resolved); 395 __ verify_oop(R17_tos); 396} 397 398void TemplateTable::ldc2_w() { 399 transition(vtos, vtos); 400 Label Llong, Lexit; 401 402 Register Rindex = R11_scratch1, 403 Rcpool = R12_scratch2, 404 Rtag = R3_ARG1; 405 __ get_cpool_and_tags(Rcpool, Rtag); 406 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 407 408 const int base_offset = ConstantPool::header_size() * wordSize; 409 const int tags_offset = Array<u1>::base_offset_in_bytes(); 410 // Get type from tags. 411 __ addi(Rcpool, Rcpool, base_offset); 412 __ addi(Rtag, Rtag, tags_offset); 413 414 __ lbzx(Rtag, Rtag, Rindex); 415 416 __ sldi(Rindex, Rindex, LogBytesPerWord); 417 __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double); 418 __ bne(CCR0, Llong); 419 // A double can be placed at word-aligned locations in the constant pool. 420 // Check out Conversions.java for an example. 421 // Also ConstantPool::header_size() is 20, which makes it very difficult 422 // to double-align double on the constant pool. SG, 11/7/97 423 __ isync(); // Order load of constant wrt. tags. 424 __ lfdx(F15_ftos, Rcpool, Rindex); 425 __ push(dtos); 426 __ b(Lexit); 427 428 __ bind(Llong); 429 __ isync(); // Order load of constant wrt. tags. 430 __ ldx(R17_tos, Rcpool, Rindex); 431 __ push(ltos); 432 433 __ bind(Lexit); 434} 435 436// Get the locals index located in the bytecode stream at bcp + offset. 437void TemplateTable::locals_index(Register Rdst, int offset) { 438 __ lbz(Rdst, offset, R14_bcp); 439} 440 441void TemplateTable::iload() { 442 transition(vtos, itos); 443 444 // Get the local value into tos 445 const Register Rindex = R22_tmp2; 446 locals_index(Rindex); 447 448 // Rewrite iload,iload pair into fast_iload2 449 // iload,caload pair into fast_icaload 450 if (RewriteFrequentPairs) { 451 Label Lrewrite, Ldone; 452 Register Rnext_byte = R3_ARG1, 453 Rrewrite_to = R6_ARG4, 454 Rscratch = R11_scratch1; 455 456 // get next byte 457 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp); 458 459 // if _iload, wait to rewrite to iload2. We only want to rewrite the 460 // last two iloads in a pair. Comparing against fast_iload means that 461 // the next bytecode is neither an iload or a caload, and therefore 462 // an iload pair. 463 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload); 464 __ beq(CCR0, Ldone); 465 466 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 467 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2); 468 __ beq(CCR1, Lrewrite); 469 470 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload); 471 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload); 472 __ beq(CCR0, Lrewrite); 473 474 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload); 475 476 __ bind(Lrewrite); 477 patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false); 478 __ bind(Ldone); 479 } 480 481 __ load_local_int(R17_tos, Rindex, Rindex); 482} 483 484// Load 2 integers in a row without dispatching 485void TemplateTable::fast_iload2() { 486 transition(vtos, itos); 487 488 __ lbz(R3_ARG1, 1, R14_bcp); 489 __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp); 490 491 __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1); 492 __ load_local_int(R17_tos, R12_scratch2, R17_tos); 493 __ push_i(R3_ARG1); 494} 495 496void TemplateTable::fast_iload() { 497 transition(vtos, itos); 498 // Get the local value into tos 499 500 const Register Rindex = R11_scratch1; 501 locals_index(Rindex); 502 __ load_local_int(R17_tos, Rindex, Rindex); 503} 504 505// Load a local variable type long from locals area to TOS cache register. 506// Local index resides in bytecodestream. 507void TemplateTable::lload() { 508 transition(vtos, ltos); 509 510 const Register Rindex = R11_scratch1; 511 locals_index(Rindex); 512 __ load_local_long(R17_tos, Rindex, Rindex); 513} 514 515void TemplateTable::fload() { 516 transition(vtos, ftos); 517 518 const Register Rindex = R11_scratch1; 519 locals_index(Rindex); 520 __ load_local_float(F15_ftos, Rindex, Rindex); 521} 522 523void TemplateTable::dload() { 524 transition(vtos, dtos); 525 526 const Register Rindex = R11_scratch1; 527 locals_index(Rindex); 528 __ load_local_double(F15_ftos, Rindex, Rindex); 529} 530 531void TemplateTable::aload() { 532 transition(vtos, atos); 533 534 const Register Rindex = R11_scratch1; 535 locals_index(Rindex); 536 __ load_local_ptr(R17_tos, Rindex, Rindex); 537} 538 539void TemplateTable::locals_index_wide(Register Rdst) { 540 // Offset is 2, not 1, because Lbcp points to wide prefix code. 541 __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned); 542} 543 544void TemplateTable::wide_iload() { 545 // Get the local value into tos. 546 547 const Register Rindex = R11_scratch1; 548 locals_index_wide(Rindex); 549 __ load_local_int(R17_tos, Rindex, Rindex); 550} 551 552void TemplateTable::wide_lload() { 553 transition(vtos, ltos); 554 555 const Register Rindex = R11_scratch1; 556 locals_index_wide(Rindex); 557 __ load_local_long(R17_tos, Rindex, Rindex); 558} 559 560void TemplateTable::wide_fload() { 561 transition(vtos, ftos); 562 563 const Register Rindex = R11_scratch1; 564 locals_index_wide(Rindex); 565 __ load_local_float(F15_ftos, Rindex, Rindex); 566} 567 568void TemplateTable::wide_dload() { 569 transition(vtos, dtos); 570 571 const Register Rindex = R11_scratch1; 572 locals_index_wide(Rindex); 573 __ load_local_double(F15_ftos, Rindex, Rindex); 574} 575 576void TemplateTable::wide_aload() { 577 transition(vtos, atos); 578 579 const Register Rindex = R11_scratch1; 580 locals_index_wide(Rindex); 581 __ load_local_ptr(R17_tos, Rindex, Rindex); 582} 583 584void TemplateTable::iaload() { 585 transition(itos, itos); 586 587 const Register Rload_addr = R3_ARG1, 588 Rarray = R4_ARG2, 589 Rtemp = R5_ARG3; 590 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 591 __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr); 592} 593 594void TemplateTable::laload() { 595 transition(itos, ltos); 596 597 const Register Rload_addr = R3_ARG1, 598 Rarray = R4_ARG2, 599 Rtemp = R5_ARG3; 600 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 601 __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr); 602} 603 604void TemplateTable::faload() { 605 transition(itos, ftos); 606 607 const Register Rload_addr = R3_ARG1, 608 Rarray = R4_ARG2, 609 Rtemp = R5_ARG3; 610 __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr); 611 __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr); 612} 613 614void TemplateTable::daload() { 615 transition(itos, dtos); 616 617 const Register Rload_addr = R3_ARG1, 618 Rarray = R4_ARG2, 619 Rtemp = R5_ARG3; 620 __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr); 621 __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr); 622} 623 624void TemplateTable::aaload() { 625 transition(itos, atos); 626 627 // tos: index 628 // result tos: array 629 const Register Rload_addr = R3_ARG1, 630 Rarray = R4_ARG2, 631 Rtemp = R5_ARG3; 632 __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr); 633 __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr); 634 __ verify_oop(R17_tos); 635 //__ dcbt(R17_tos); // prefetch 636} 637 638void TemplateTable::baload() { 639 transition(itos, itos); 640 641 const Register Rload_addr = R3_ARG1, 642 Rarray = R4_ARG2, 643 Rtemp = R5_ARG3; 644 __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr); 645 __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr); 646 __ extsb(R17_tos, R17_tos); 647} 648 649void TemplateTable::caload() { 650 transition(itos, itos); 651 652 const Register Rload_addr = R3_ARG1, 653 Rarray = R4_ARG2, 654 Rtemp = R5_ARG3; 655 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 656 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 657} 658 659// Iload followed by caload frequent pair. 660void TemplateTable::fast_icaload() { 661 transition(vtos, itos); 662 663 const Register Rload_addr = R3_ARG1, 664 Rarray = R4_ARG2, 665 Rtemp = R11_scratch1; 666 667 locals_index(R17_tos); 668 __ load_local_int(R17_tos, Rtemp, R17_tos); 669 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 670 __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr); 671} 672 673void TemplateTable::saload() { 674 transition(itos, itos); 675 676 const Register Rload_addr = R11_scratch1, 677 Rarray = R12_scratch2, 678 Rtemp = R3_ARG1; 679 __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr); 680 __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr); 681} 682 683void TemplateTable::iload(int n) { 684 transition(vtos, itos); 685 686 __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 687} 688 689void TemplateTable::lload(int n) { 690 transition(vtos, ltos); 691 692 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 693} 694 695void TemplateTable::fload(int n) { 696 transition(vtos, ftos); 697 698 __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 699} 700 701void TemplateTable::dload(int n) { 702 transition(vtos, dtos); 703 704 __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 705} 706 707void TemplateTable::aload(int n) { 708 transition(vtos, atos); 709 710 __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 711} 712 713void TemplateTable::aload_0() { 714 transition(vtos, atos); 715 // According to bytecode histograms, the pairs: 716 // 717 // _aload_0, _fast_igetfield 718 // _aload_0, _fast_agetfield 719 // _aload_0, _fast_fgetfield 720 // 721 // occur frequently. If RewriteFrequentPairs is set, the (slow) 722 // _aload_0 bytecode checks if the next bytecode is either 723 // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then 724 // rewrites the current bytecode into a pair bytecode; otherwise it 725 // rewrites the current bytecode into _0 that doesn't do 726 // the pair check anymore. 727 // 728 // Note: If the next bytecode is _getfield, the rewrite must be 729 // delayed, otherwise we may miss an opportunity for a pair. 730 // 731 // Also rewrite frequent pairs 732 // aload_0, aload_1 733 // aload_0, iload_1 734 // These bytecodes with a small amount of code are most profitable 735 // to rewrite. 736 737 if (RewriteFrequentPairs) { 738 739 Label Lrewrite, Ldont_rewrite; 740 Register Rnext_byte = R3_ARG1, 741 Rrewrite_to = R6_ARG4, 742 Rscratch = R11_scratch1; 743 744 // Get next byte. 745 __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp); 746 747 // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair. 748 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield); 749 __ beq(CCR0, Ldont_rewrite); 750 751 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield); 752 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0); 753 __ beq(CCR1, Lrewrite); 754 755 __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield); 756 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0); 757 __ beq(CCR0, Lrewrite); 758 759 __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield); 760 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0); 761 __ beq(CCR1, Lrewrite); 762 763 __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0); 764 765 __ bind(Lrewrite); 766 patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false); 767 __ bind(Ldont_rewrite); 768 } 769 770 // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop). 771 aload(0); 772} 773 774void TemplateTable::istore() { 775 transition(itos, vtos); 776 777 const Register Rindex = R11_scratch1; 778 locals_index(Rindex); 779 __ store_local_int(R17_tos, Rindex); 780} 781 782void TemplateTable::lstore() { 783 transition(ltos, vtos); 784 const Register Rindex = R11_scratch1; 785 locals_index(Rindex); 786 __ store_local_long(R17_tos, Rindex); 787} 788 789void TemplateTable::fstore() { 790 transition(ftos, vtos); 791 792 const Register Rindex = R11_scratch1; 793 locals_index(Rindex); 794 __ store_local_float(F15_ftos, Rindex); 795} 796 797void TemplateTable::dstore() { 798 transition(dtos, vtos); 799 800 const Register Rindex = R11_scratch1; 801 locals_index(Rindex); 802 __ store_local_double(F15_ftos, Rindex); 803} 804 805void TemplateTable::astore() { 806 transition(vtos, vtos); 807 808 const Register Rindex = R11_scratch1; 809 __ pop_ptr(); 810 __ verify_oop_or_return_address(R17_tos, Rindex); 811 locals_index(Rindex); 812 __ store_local_ptr(R17_tos, Rindex); 813} 814 815void TemplateTable::wide_istore() { 816 transition(vtos, vtos); 817 818 const Register Rindex = R11_scratch1; 819 __ pop_i(); 820 locals_index_wide(Rindex); 821 __ store_local_int(R17_tos, Rindex); 822} 823 824void TemplateTable::wide_lstore() { 825 transition(vtos, vtos); 826 827 const Register Rindex = R11_scratch1; 828 __ pop_l(); 829 locals_index_wide(Rindex); 830 __ store_local_long(R17_tos, Rindex); 831} 832 833void TemplateTable::wide_fstore() { 834 transition(vtos, vtos); 835 836 const Register Rindex = R11_scratch1; 837 __ pop_f(); 838 locals_index_wide(Rindex); 839 __ store_local_float(F15_ftos, Rindex); 840} 841 842void TemplateTable::wide_dstore() { 843 transition(vtos, vtos); 844 845 const Register Rindex = R11_scratch1; 846 __ pop_d(); 847 locals_index_wide(Rindex); 848 __ store_local_double(F15_ftos, Rindex); 849} 850 851void TemplateTable::wide_astore() { 852 transition(vtos, vtos); 853 854 const Register Rindex = R11_scratch1; 855 __ pop_ptr(); 856 __ verify_oop_or_return_address(R17_tos, Rindex); 857 locals_index_wide(Rindex); 858 __ store_local_ptr(R17_tos, Rindex); 859} 860 861void TemplateTable::iastore() { 862 transition(itos, vtos); 863 864 const Register Rindex = R3_ARG1, 865 Rstore_addr = R4_ARG2, 866 Rarray = R5_ARG3, 867 Rtemp = R6_ARG4; 868 __ pop_i(Rindex); 869 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 870 __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr); 871 } 872 873void TemplateTable::lastore() { 874 transition(ltos, vtos); 875 876 const Register Rindex = R3_ARG1, 877 Rstore_addr = R4_ARG2, 878 Rarray = R5_ARG3, 879 Rtemp = R6_ARG4; 880 __ pop_i(Rindex); 881 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 882 __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr); 883 } 884 885void TemplateTable::fastore() { 886 transition(ftos, vtos); 887 888 const Register Rindex = R3_ARG1, 889 Rstore_addr = R4_ARG2, 890 Rarray = R5_ARG3, 891 Rtemp = R6_ARG4; 892 __ pop_i(Rindex); 893 __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr); 894 __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr); 895 } 896 897void TemplateTable::dastore() { 898 transition(dtos, vtos); 899 900 const Register Rindex = R3_ARG1, 901 Rstore_addr = R4_ARG2, 902 Rarray = R5_ARG3, 903 Rtemp = R6_ARG4; 904 __ pop_i(Rindex); 905 __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr); 906 __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr); 907 } 908 909// Pop 3 values from the stack and... 910void TemplateTable::aastore() { 911 transition(vtos, vtos); 912 913 Label Lstore_ok, Lis_null, Ldone; 914 const Register Rindex = R3_ARG1, 915 Rarray = R4_ARG2, 916 Rscratch = R11_scratch1, 917 Rscratch2 = R12_scratch2, 918 Rarray_klass = R5_ARG3, 919 Rarray_element_klass = Rarray_klass, 920 Rvalue_klass = R6_ARG4, 921 Rstore_addr = R31; // Use register which survives VM call. 922 923 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store. 924 __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index. 925 __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp); // Get array. 926 927 __ verify_oop(R17_tos); 928 __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr); 929 // Rindex is dead! 930 Register Rscratch3 = Rindex; 931 932 // Do array store check - check for NULL value first. 933 __ cmpdi(CCR0, R17_tos, 0); 934 __ beq(CCR0, Lis_null); 935 936 __ load_klass(Rarray_klass, Rarray); 937 __ load_klass(Rvalue_klass, R17_tos); 938 939 // Do fast instanceof cache test. 940 __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass); 941 942 // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure. 943 __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok); 944 945 // Fell through: subtype check failed => throw an exception. 946 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry); 947 __ mtctr(R11_scratch1); 948 __ bctr(); 949 950 __ bind(Lis_null); 951 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */, 952 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 953 __ profile_null_seen(Rscratch, Rscratch2); 954 __ b(Ldone); 955 956 // Store is OK. 957 __ bind(Lstore_ok); 958 do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */, 959 Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */); 960 961 __ bind(Ldone); 962 // Adjust sp (pops array, index and value). 963 __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize); 964} 965 966void TemplateTable::bastore() { 967 transition(itos, vtos); 968 969 const Register Rindex = R11_scratch1, 970 Rarray = R12_scratch2, 971 Rscratch = R3_ARG1; 972 __ pop_i(Rindex); 973 // tos: val 974 // Rarray: array ptr (popped by index_check) 975 __ index_check(Rarray, Rindex, 0, Rscratch, Rarray); 976 __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray); 977} 978 979void TemplateTable::castore() { 980 transition(itos, vtos); 981 982 const Register Rindex = R11_scratch1, 983 Rarray = R12_scratch2, 984 Rscratch = R3_ARG1; 985 __ pop_i(Rindex); 986 // tos: val 987 // Rarray: array ptr (popped by index_check) 988 __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray); 989 __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray); 990} 991 992void TemplateTable::sastore() { 993 castore(); 994} 995 996void TemplateTable::istore(int n) { 997 transition(itos, vtos); 998 __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 999} 1000 1001void TemplateTable::lstore(int n) { 1002 transition(ltos, vtos); 1003 __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1004} 1005 1006void TemplateTable::fstore(int n) { 1007 transition(ftos, vtos); 1008 __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals); 1009} 1010 1011void TemplateTable::dstore(int n) { 1012 transition(dtos, vtos); 1013 __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals); 1014} 1015 1016void TemplateTable::astore(int n) { 1017 transition(vtos, vtos); 1018 1019 __ pop_ptr(); 1020 __ verify_oop_or_return_address(R17_tos, R11_scratch1); 1021 __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals); 1022} 1023 1024void TemplateTable::pop() { 1025 transition(vtos, vtos); 1026 1027 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize); 1028} 1029 1030void TemplateTable::pop2() { 1031 transition(vtos, vtos); 1032 1033 __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2); 1034} 1035 1036void TemplateTable::dup() { 1037 transition(vtos, vtos); 1038 1039 __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp); 1040 __ push_ptr(R11_scratch1); 1041} 1042 1043void TemplateTable::dup_x1() { 1044 transition(vtos, vtos); 1045 1046 Register Ra = R11_scratch1, 1047 Rb = R12_scratch2; 1048 // stack: ..., a, b 1049 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1050 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1051 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1052 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1053 __ push_ptr(Rb); 1054 // stack: ..., b, a, b 1055} 1056 1057void TemplateTable::dup_x2() { 1058 transition(vtos, vtos); 1059 1060 Register Ra = R11_scratch1, 1061 Rb = R12_scratch2, 1062 Rc = R3_ARG1; 1063 1064 // stack: ..., a, b, c 1065 __ ld(Rc, Interpreter::stackElementSize, R15_esp); // load c 1066 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); // load a 1067 __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a 1068 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); // load b 1069 // stack: ..., c, b, c 1070 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b 1071 // stack: ..., c, a, c 1072 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in c 1073 __ push_ptr(Rc); // push c 1074 // stack: ..., c, a, b, c 1075} 1076 1077void TemplateTable::dup2() { 1078 transition(vtos, vtos); 1079 1080 Register Ra = R11_scratch1, 1081 Rb = R12_scratch2; 1082 // stack: ..., a, b 1083 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1084 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1085 __ push_2ptrs(Ra, Rb); 1086 // stack: ..., a, b, a, b 1087} 1088 1089void TemplateTable::dup2_x1() { 1090 transition(vtos, vtos); 1091 1092 Register Ra = R11_scratch1, 1093 Rb = R12_scratch2, 1094 Rc = R3_ARG1; 1095 // stack: ..., a, b, c 1096 __ ld(Rc, Interpreter::stackElementSize, R15_esp); 1097 __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp); 1098 __ std(Rc, Interpreter::stackElementSize * 2, R15_esp); 1099 __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp); 1100 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1101 __ std(Rb, Interpreter::stackElementSize * 3, R15_esp); 1102 // stack: ..., b, c, a 1103 __ push_2ptrs(Rb, Rc); 1104 // stack: ..., b, c, a, b, c 1105} 1106 1107void TemplateTable::dup2_x2() { 1108 transition(vtos, vtos); 1109 1110 Register Ra = R11_scratch1, 1111 Rb = R12_scratch2, 1112 Rc = R3_ARG1, 1113 Rd = R4_ARG2; 1114 // stack: ..., a, b, c, d 1115 __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp); 1116 __ ld(Rd, Interpreter::stackElementSize, R15_esp); 1117 __ std(Rb, Interpreter::stackElementSize, R15_esp); // store b in d 1118 __ std(Rd, Interpreter::stackElementSize * 3, R15_esp); // store d in b 1119 __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp); 1120 __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp); 1121 __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in c 1122 __ std(Rc, Interpreter::stackElementSize * 4, R15_esp); // store c in a 1123 // stack: ..., c, d, a, b 1124 __ push_2ptrs(Rc, Rd); 1125 // stack: ..., c, d, a, b, c, d 1126} 1127 1128void TemplateTable::swap() { 1129 transition(vtos, vtos); 1130 // stack: ..., a, b 1131 1132 Register Ra = R11_scratch1, 1133 Rb = R12_scratch2; 1134 // stack: ..., a, b 1135 __ ld(Rb, Interpreter::stackElementSize, R15_esp); 1136 __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp); 1137 __ std(Rb, Interpreter::stackElementSize * 2, R15_esp); 1138 __ std(Ra, Interpreter::stackElementSize, R15_esp); 1139 // stack: ..., b, a 1140} 1141 1142void TemplateTable::iop2(Operation op) { 1143 transition(itos, itos); 1144 1145 Register Rscratch = R11_scratch1; 1146 1147 __ pop_i(Rscratch); 1148 // tos = number of bits to shift 1149 // Rscratch = value to shift 1150 switch (op) { 1151 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1152 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1153 case mul: __ mullw(R17_tos, Rscratch, R17_tos); break; 1154 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1155 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1156 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1157 case shl: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break; 1158 case shr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break; 1159 case ushr: __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break; 1160 default: ShouldNotReachHere(); 1161 } 1162} 1163 1164void TemplateTable::lop2(Operation op) { 1165 transition(ltos, ltos); 1166 1167 Register Rscratch = R11_scratch1; 1168 __ pop_l(Rscratch); 1169 switch (op) { 1170 case add: __ add(R17_tos, Rscratch, R17_tos); break; 1171 case sub: __ sub(R17_tos, Rscratch, R17_tos); break; 1172 case _and: __ andr(R17_tos, Rscratch, R17_tos); break; 1173 case _or: __ orr(R17_tos, Rscratch, R17_tos); break; 1174 case _xor: __ xorr(R17_tos, Rscratch, R17_tos); break; 1175 default: ShouldNotReachHere(); 1176 } 1177} 1178 1179void TemplateTable::idiv() { 1180 transition(itos, itos); 1181 1182 Label Lnormal, Lexception, Ldone; 1183 Register Rdividend = R11_scratch1; // Used by irem. 1184 1185 __ addi(R0, R17_tos, 1); 1186 __ cmplwi(CCR0, R0, 2); 1187 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1188 1189 __ cmpwi(CCR1, R17_tos, 0); 1190 __ beq(CCR1, Lexception); // divisor == 0 1191 1192 __ pop_i(Rdividend); 1193 __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1 1194 __ b(Ldone); 1195 1196 __ bind(Lexception); 1197 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1198 __ mtctr(R11_scratch1); 1199 __ bctr(); 1200 1201 __ align(32, 12); 1202 __ bind(Lnormal); 1203 __ pop_i(Rdividend); 1204 __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1205 __ bind(Ldone); 1206} 1207 1208void TemplateTable::irem() { 1209 transition(itos, itos); 1210 1211 __ mr(R12_scratch2, R17_tos); 1212 idiv(); 1213 __ mullw(R17_tos, R17_tos, R12_scratch2); 1214 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv. 1215} 1216 1217void TemplateTable::lmul() { 1218 transition(ltos, ltos); 1219 1220 __ pop_l(R11_scratch1); 1221 __ mulld(R17_tos, R11_scratch1, R17_tos); 1222} 1223 1224void TemplateTable::ldiv() { 1225 transition(ltos, ltos); 1226 1227 Label Lnormal, Lexception, Ldone; 1228 Register Rdividend = R11_scratch1; // Used by lrem. 1229 1230 __ addi(R0, R17_tos, 1); 1231 __ cmpldi(CCR0, R0, 2); 1232 __ bgt(CCR0, Lnormal); // divisor <-1 or >1 1233 1234 __ cmpdi(CCR1, R17_tos, 0); 1235 __ beq(CCR1, Lexception); // divisor == 0 1236 1237 __ pop_l(Rdividend); 1238 __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1 1239 __ b(Ldone); 1240 1241 __ bind(Lexception); 1242 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry); 1243 __ mtctr(R11_scratch1); 1244 __ bctr(); 1245 1246 __ align(32, 12); 1247 __ bind(Lnormal); 1248 __ pop_l(Rdividend); 1249 __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1. 1250 __ bind(Ldone); 1251} 1252 1253void TemplateTable::lrem() { 1254 transition(ltos, ltos); 1255 1256 __ mr(R12_scratch2, R17_tos); 1257 ldiv(); 1258 __ mulld(R17_tos, R17_tos, R12_scratch2); 1259 __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv. 1260} 1261 1262void TemplateTable::lshl() { 1263 transition(itos, ltos); 1264 1265 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1266 __ pop_l(R11_scratch1); 1267 __ sld(R17_tos, R11_scratch1, R17_tos); 1268} 1269 1270void TemplateTable::lshr() { 1271 transition(itos, ltos); 1272 1273 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1274 __ pop_l(R11_scratch1); 1275 __ srad(R17_tos, R11_scratch1, R17_tos); 1276} 1277 1278void TemplateTable::lushr() { 1279 transition(itos, ltos); 1280 1281 __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits. 1282 __ pop_l(R11_scratch1); 1283 __ srd(R17_tos, R11_scratch1, R17_tos); 1284} 1285 1286void TemplateTable::fop2(Operation op) { 1287 transition(ftos, ftos); 1288 1289 switch (op) { 1290 case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break; 1291 case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1292 case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break; 1293 case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break; 1294 case rem: 1295 __ pop_f(F1_ARG1); 1296 __ fmr(F2_ARG2, F15_ftos); 1297 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem)); 1298 __ fmr(F15_ftos, F1_RET); 1299 break; 1300 1301 default: ShouldNotReachHere(); 1302 } 1303} 1304 1305void TemplateTable::dop2(Operation op) { 1306 transition(dtos, dtos); 1307 1308 switch (op) { 1309 case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break; 1310 case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break; 1311 case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break; 1312 case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break; 1313 case rem: 1314 __ pop_d(F1_ARG1); 1315 __ fmr(F2_ARG2, F15_ftos); 1316 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem)); 1317 __ fmr(F15_ftos, F1_RET); 1318 break; 1319 1320 default: ShouldNotReachHere(); 1321 } 1322} 1323 1324// Negate the value in the TOS cache. 1325void TemplateTable::ineg() { 1326 transition(itos, itos); 1327 1328 __ neg(R17_tos, R17_tos); 1329} 1330 1331// Negate the value in the TOS cache. 1332void TemplateTable::lneg() { 1333 transition(ltos, ltos); 1334 1335 __ neg(R17_tos, R17_tos); 1336} 1337 1338void TemplateTable::fneg() { 1339 transition(ftos, ftos); 1340 1341 __ fneg(F15_ftos, F15_ftos); 1342} 1343 1344void TemplateTable::dneg() { 1345 transition(dtos, dtos); 1346 1347 __ fneg(F15_ftos, F15_ftos); 1348} 1349 1350// Increments a local variable in place. 1351void TemplateTable::iinc() { 1352 transition(vtos, vtos); 1353 1354 const Register Rindex = R11_scratch1, 1355 Rincrement = R0, 1356 Rvalue = R12_scratch2; 1357 1358 locals_index(Rindex); // Load locals index from bytecode stream. 1359 __ lbz(Rincrement, 2, R14_bcp); // Load increment from the bytecode stream. 1360 __ extsb(Rincrement, Rincrement); 1361 1362 __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex. 1363 1364 __ add(Rvalue, Rincrement, Rvalue); 1365 __ stw(Rvalue, 0, Rindex); 1366} 1367 1368void TemplateTable::wide_iinc() { 1369 transition(vtos, vtos); 1370 1371 Register Rindex = R11_scratch1, 1372 Rlocals_addr = Rindex, 1373 Rincr = R12_scratch2; 1374 locals_index_wide(Rindex); 1375 __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed); 1376 __ load_local_int(R17_tos, Rlocals_addr, Rindex); 1377 __ add(R17_tos, Rincr, R17_tos); 1378 __ stw(R17_tos, 0, Rlocals_addr); 1379} 1380 1381void TemplateTable::convert() { 1382 // %%%%% Factor this first part accross platforms 1383#ifdef ASSERT 1384 TosState tos_in = ilgl; 1385 TosState tos_out = ilgl; 1386 switch (bytecode()) { 1387 case Bytecodes::_i2l: // fall through 1388 case Bytecodes::_i2f: // fall through 1389 case Bytecodes::_i2d: // fall through 1390 case Bytecodes::_i2b: // fall through 1391 case Bytecodes::_i2c: // fall through 1392 case Bytecodes::_i2s: tos_in = itos; break; 1393 case Bytecodes::_l2i: // fall through 1394 case Bytecodes::_l2f: // fall through 1395 case Bytecodes::_l2d: tos_in = ltos; break; 1396 case Bytecodes::_f2i: // fall through 1397 case Bytecodes::_f2l: // fall through 1398 case Bytecodes::_f2d: tos_in = ftos; break; 1399 case Bytecodes::_d2i: // fall through 1400 case Bytecodes::_d2l: // fall through 1401 case Bytecodes::_d2f: tos_in = dtos; break; 1402 default : ShouldNotReachHere(); 1403 } 1404 switch (bytecode()) { 1405 case Bytecodes::_l2i: // fall through 1406 case Bytecodes::_f2i: // fall through 1407 case Bytecodes::_d2i: // fall through 1408 case Bytecodes::_i2b: // fall through 1409 case Bytecodes::_i2c: // fall through 1410 case Bytecodes::_i2s: tos_out = itos; break; 1411 case Bytecodes::_i2l: // fall through 1412 case Bytecodes::_f2l: // fall through 1413 case Bytecodes::_d2l: tos_out = ltos; break; 1414 case Bytecodes::_i2f: // fall through 1415 case Bytecodes::_l2f: // fall through 1416 case Bytecodes::_d2f: tos_out = ftos; break; 1417 case Bytecodes::_i2d: // fall through 1418 case Bytecodes::_l2d: // fall through 1419 case Bytecodes::_f2d: tos_out = dtos; break; 1420 default : ShouldNotReachHere(); 1421 } 1422 transition(tos_in, tos_out); 1423#endif 1424 1425 // Conversion 1426 Label done; 1427 switch (bytecode()) { 1428 case Bytecodes::_i2l: 1429 __ extsw(R17_tos, R17_tos); 1430 break; 1431 1432 case Bytecodes::_l2i: 1433 // Nothing to do, we'll continue to work with the lower bits. 1434 break; 1435 1436 case Bytecodes::_i2b: 1437 __ extsb(R17_tos, R17_tos); 1438 break; 1439 1440 case Bytecodes::_i2c: 1441 __ rldicl(R17_tos, R17_tos, 0, 64-2*8); 1442 break; 1443 1444 case Bytecodes::_i2s: 1445 __ extsh(R17_tos, R17_tos); 1446 break; 1447 1448 case Bytecodes::_i2d: 1449 __ extsw(R17_tos, R17_tos); 1450 case Bytecodes::_l2d: 1451 __ push_l_pop_d(); 1452 __ fcfid(F15_ftos, F15_ftos); 1453 break; 1454 1455 case Bytecodes::_i2f: 1456 __ extsw(R17_tos, R17_tos); 1457 __ push_l_pop_d(); 1458 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1459 // Comment: alternatively, load with sign extend could be done by lfiwax. 1460 __ fcfids(F15_ftos, F15_ftos); 1461 } else { 1462 __ fcfid(F15_ftos, F15_ftos); 1463 __ frsp(F15_ftos, F15_ftos); 1464 } 1465 break; 1466 1467 case Bytecodes::_l2f: 1468 if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only 1469 __ push_l_pop_d(); 1470 __ fcfids(F15_ftos, F15_ftos); 1471 } else { 1472 // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp. 1473 __ mr(R3_ARG1, R17_tos); 1474 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f)); 1475 __ fmr(F15_ftos, F1_RET); 1476 } 1477 break; 1478 1479 case Bytecodes::_f2d: 1480 // empty 1481 break; 1482 1483 case Bytecodes::_d2f: 1484 __ frsp(F15_ftos, F15_ftos); 1485 break; 1486 1487 case Bytecodes::_d2i: 1488 case Bytecodes::_f2i: 1489 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1490 __ li(R17_tos, 0); // 0 in case of NAN 1491 __ bso(CCR0, done); 1492 __ fctiwz(F15_ftos, F15_ftos); 1493 __ push_d_pop_l(); 1494 break; 1495 1496 case Bytecodes::_d2l: 1497 case Bytecodes::_f2l: 1498 __ fcmpu(CCR0, F15_ftos, F15_ftos); 1499 __ li(R17_tos, 0); // 0 in case of NAN 1500 __ bso(CCR0, done); 1501 __ fctidz(F15_ftos, F15_ftos); 1502 __ push_d_pop_l(); 1503 break; 1504 1505 default: ShouldNotReachHere(); 1506 } 1507 __ bind(done); 1508} 1509 1510// Long compare 1511void TemplateTable::lcmp() { 1512 transition(ltos, itos); 1513 1514 const Register Rscratch = R11_scratch1; 1515 __ pop_l(Rscratch); // first operand, deeper in stack 1516 1517 __ cmpd(CCR0, Rscratch, R17_tos); // compare 1518 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1519 __ srwi(Rscratch, R17_tos, 30); 1520 __ srawi(R17_tos, R17_tos, 31); 1521 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1522} 1523 1524// fcmpl/fcmpg and dcmpl/dcmpg bytecodes 1525// unordered_result == -1 => fcmpl or dcmpl 1526// unordered_result == 1 => fcmpg or dcmpg 1527void TemplateTable::float_cmp(bool is_float, int unordered_result) { 1528 const FloatRegister Rfirst = F0_SCRATCH, 1529 Rsecond = F15_ftos; 1530 const Register Rscratch = R11_scratch1; 1531 1532 if (is_float) { 1533 __ pop_f(Rfirst); 1534 } else { 1535 __ pop_d(Rfirst); 1536 } 1537 1538 Label Lunordered, Ldone; 1539 __ fcmpu(CCR0, Rfirst, Rsecond); // compare 1540 if (unordered_result) { 1541 __ bso(CCR0, Lunordered); 1542 } 1543 __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01 1544 __ srwi(Rscratch, R17_tos, 30); 1545 __ srawi(R17_tos, R17_tos, 31); 1546 __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1 1547 if (unordered_result) { 1548 __ b(Ldone); 1549 __ bind(Lunordered); 1550 __ load_const_optimized(R17_tos, unordered_result); 1551 } 1552 __ bind(Ldone); 1553} 1554 1555// Branch_conditional which takes TemplateTable::Condition. 1556void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) { 1557 bool positive = false; 1558 Assembler::Condition cond = Assembler::equal; 1559 switch (cc) { 1560 case TemplateTable::equal: positive = true ; cond = Assembler::equal ; break; 1561 case TemplateTable::not_equal: positive = false; cond = Assembler::equal ; break; 1562 case TemplateTable::less: positive = true ; cond = Assembler::less ; break; 1563 case TemplateTable::less_equal: positive = false; cond = Assembler::greater; break; 1564 case TemplateTable::greater: positive = true ; cond = Assembler::greater; break; 1565 case TemplateTable::greater_equal: positive = false; cond = Assembler::less ; break; 1566 default: ShouldNotReachHere(); 1567 } 1568 int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0; 1569 int bi = Assembler::bi0(crx, cond); 1570 __ bc(bo, bi, L); 1571} 1572 1573void TemplateTable::branch(bool is_jsr, bool is_wide) { 1574 1575 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also. 1576 __ verify_thread(); 1577 1578 const Register Rscratch1 = R11_scratch1, 1579 Rscratch2 = R12_scratch2, 1580 Rscratch3 = R3_ARG1, 1581 R4_counters = R4_ARG2, 1582 bumped_count = R31, 1583 Rdisp = R22_tmp2; 1584 1585 __ profile_taken_branch(Rscratch1, bumped_count); 1586 1587 // Get (wide) offset. 1588 if (is_wide) { 1589 __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1590 } else { 1591 __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed); 1592 } 1593 1594 // -------------------------------------------------------------------------- 1595 // Handle all the JSR stuff here, then exit. 1596 // It's much shorter and cleaner than intermingling with the 1597 // non-JSR normal-branch stuff occurring below. 1598 if (is_jsr) { 1599 // Compute return address as bci in Otos_i. 1600 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1601 __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3)); 1602 __ subf(R17_tos, Rscratch1, Rscratch2); 1603 1604 // Bump bcp to target of JSR. 1605 __ add(R14_bcp, Rdisp, R14_bcp); 1606 // Push returnAddress for "ret" on stack. 1607 __ push_ptr(R17_tos); 1608 // And away we go! 1609 __ dispatch_next(vtos); 1610 return; 1611 } 1612 1613 // -------------------------------------------------------------------------- 1614 // Normal (non-jsr) branch handling 1615 1616 const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1617 if (increment_invocation_counter_for_backward_branches) { 1618 //__ unimplemented("branch invocation counter"); 1619 1620 Label Lforward; 1621 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1622 1623 // Check branch direction. 1624 __ cmpdi(CCR0, Rdisp, 0); 1625 __ bgt(CCR0, Lforward); 1626 1627 __ get_method_counters(R19_method, R4_counters, Lforward); 1628 1629 if (TieredCompilation) { 1630 Label Lno_mdo, Loverflow; 1631 const int increment = InvocationCounter::count_increment; 1632 const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift; 1633 if (ProfileInterpreter) { 1634 Register Rmdo = Rscratch1; 1635 1636 // If no method data exists, go to profile_continue. 1637 __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method); 1638 __ cmpdi(CCR0, Rmdo, 0); 1639 __ beq(CCR0, Lno_mdo); 1640 1641 // Increment backedge counter in the MDO. 1642 const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1643 __ lwz(Rscratch2, mdo_bc_offs, Rmdo); 1644 __ load_const_optimized(Rscratch3, mask, R0); 1645 __ addi(Rscratch2, Rscratch2, increment); 1646 __ stw(Rscratch2, mdo_bc_offs, Rmdo); 1647 __ and_(Rscratch3, Rscratch2, Rscratch3); 1648 __ bne(CCR0, Lforward); 1649 __ b(Loverflow); 1650 } 1651 1652 // If there's no MDO, increment counter in method. 1653 const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset()); 1654 __ bind(Lno_mdo); 1655 __ lwz(Rscratch2, mo_bc_offs, R4_counters); 1656 __ load_const_optimized(Rscratch3, mask, R0); 1657 __ addi(Rscratch2, Rscratch2, increment); 1658 __ stw(Rscratch2, mo_bc_offs, R19_method); 1659 __ and_(Rscratch3, Rscratch2, Rscratch3); 1660 __ bne(CCR0, Lforward); 1661 1662 __ bind(Loverflow); 1663 1664 // Notify point for loop, pass branch bytecode. 1665 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true); 1666 1667 // Was an OSR adapter generated? 1668 // O0 = osr nmethod 1669 __ cmpdi(CCR0, R3_RET, 0); 1670 __ beq(CCR0, Lforward); 1671 1672 // Has the nmethod been invalidated already? 1673 __ lwz(R0, nmethod::entry_bci_offset(), R3_RET); 1674 __ cmpwi(CCR0, R0, InvalidOSREntryBci); 1675 __ beq(CCR0, Lforward); 1676 1677 // Migrate the interpreter frame off of the stack. 1678 // We can use all registers because we will not return to interpreter from this point. 1679 1680 // Save nmethod. 1681 const Register osr_nmethod = R31; 1682 __ mr(osr_nmethod, R3_RET); 1683 __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1684 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1685 __ reset_last_Java_frame(); 1686 // OSR buffer is in ARG1. 1687 1688 // Remove the interpreter frame. 1689 __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1690 1691 // Jump to the osr code. 1692 __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1693 __ mtlr(R0); 1694 __ mtctr(R11_scratch1); 1695 __ bctr(); 1696 1697 } else { 1698 1699 const Register invoke_ctr = Rscratch1; 1700 // Update Backedge branch separately from invocations. 1701 __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3); 1702 1703 if (ProfileInterpreter) { 1704 __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward); 1705 if (UseOnStackReplacement) { 1706 __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2); 1707 } 1708 } else { 1709 if (UseOnStackReplacement) { 1710 __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2); 1711 } 1712 } 1713 } 1714 1715 __ bind(Lforward); 1716 1717 } else { 1718 // Bump bytecode pointer by displacement (take the branch). 1719 __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr. 1720 } 1721 // Continue with bytecode @ target. 1722 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above, 1723 // %%%%% and changing dispatch_next to dispatch_only. 1724 __ dispatch_next(vtos); 1725} 1726 1727// Helper function for if_cmp* methods below. 1728// Factored out common compare and branch code. 1729void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) { 1730 Label Lnot_taken; 1731 // Note: The condition code we get is the condition under which we 1732 // *fall through*! So we have to inverse the CC here. 1733 1734 if (is_jint) { 1735 if (cmp0) { 1736 __ cmpwi(CCR0, Rfirst, 0); 1737 } else { 1738 __ cmpw(CCR0, Rfirst, Rsecond); 1739 } 1740 } else { 1741 if (cmp0) { 1742 __ cmpdi(CCR0, Rfirst, 0); 1743 } else { 1744 __ cmpd(CCR0, Rfirst, Rsecond); 1745 } 1746 } 1747 branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true); 1748 1749 // Conition is false => Jump! 1750 branch(false, false); 1751 1752 // Condition is not true => Continue. 1753 __ align(32, 12); 1754 __ bind(Lnot_taken); 1755 __ profile_not_taken_branch(Rscratch1, Rscratch2); 1756} 1757 1758// Compare integer values with zero and fall through if CC holds, branch away otherwise. 1759void TemplateTable::if_0cmp(Condition cc) { 1760 transition(itos, vtos); 1761 1762 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true); 1763} 1764 1765// Compare integer values and fall through if CC holds, branch away otherwise. 1766// 1767// Interface: 1768// - Rfirst: First operand (older stack value) 1769// - tos: Second operand (younger stack value) 1770void TemplateTable::if_icmp(Condition cc) { 1771 transition(itos, vtos); 1772 1773 const Register Rfirst = R0, 1774 Rsecond = R17_tos; 1775 1776 __ pop_i(Rfirst); 1777 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false); 1778} 1779 1780void TemplateTable::if_nullcmp(Condition cc) { 1781 transition(atos, vtos); 1782 1783 if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true); 1784} 1785 1786void TemplateTable::if_acmp(Condition cc) { 1787 transition(atos, vtos); 1788 1789 const Register Rfirst = R0, 1790 Rsecond = R17_tos; 1791 1792 __ pop_ptr(Rfirst); 1793 if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false); 1794} 1795 1796void TemplateTable::ret() { 1797 locals_index(R11_scratch1); 1798 __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1); 1799 1800 __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2); 1801 1802 __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method); 1803 __ add(R11_scratch1, R17_tos, R11_scratch1); 1804 __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1805 __ dispatch_next(vtos); 1806} 1807 1808void TemplateTable::wide_ret() { 1809 transition(vtos, vtos); 1810 1811 const Register Rindex = R3_ARG1, 1812 Rscratch1 = R11_scratch1, 1813 Rscratch2 = R12_scratch2; 1814 1815 locals_index_wide(Rindex); 1816 __ load_local_ptr(R17_tos, R17_tos, Rindex); 1817 __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2); 1818 // Tos now contains the bci, compute the bcp from that. 1819 __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method); 1820 __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset())); 1821 __ add(R14_bcp, Rscratch1, Rscratch2); 1822 __ dispatch_next(vtos); 1823} 1824 1825void TemplateTable::tableswitch() { 1826 transition(itos, vtos); 1827 1828 Label Ldispatch, Ldefault_case; 1829 Register Rlow_byte = R3_ARG1, 1830 Rindex = Rlow_byte, 1831 Rhigh_byte = R4_ARG2, 1832 Rdef_offset_addr = R5_ARG3, // is going to contain address of default offset 1833 Rscratch1 = R11_scratch1, 1834 Rscratch2 = R12_scratch2, 1835 Roffset = R6_ARG4; 1836 1837 // Align bcp. 1838 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1839 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1840 1841 // Load lo & hi. 1842 __ lwz(Rlow_byte, BytesPerInt, Rdef_offset_addr); 1843 __ lwz(Rhigh_byte, BytesPerInt * 2, Rdef_offset_addr); 1844 1845 // Check for default case (=index outside [low,high]). 1846 __ cmpw(CCR0, R17_tos, Rlow_byte); 1847 __ cmpw(CCR1, R17_tos, Rhigh_byte); 1848 __ blt(CCR0, Ldefault_case); 1849 __ bgt(CCR1, Ldefault_case); 1850 1851 // Lookup dispatch offset. 1852 __ sub(Rindex, R17_tos, Rlow_byte); 1853 __ extsw(Rindex, Rindex); 1854 __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2); 1855 __ sldi(Rindex, Rindex, LogBytesPerInt); 1856 __ addi(Rindex, Rindex, 3 * BytesPerInt); 1857 __ lwax(Roffset, Rdef_offset_addr, Rindex); 1858 __ b(Ldispatch); 1859 1860 __ bind(Ldefault_case); 1861 __ profile_switch_default(Rhigh_byte, Rscratch1); 1862 __ lwa(Roffset, 0, Rdef_offset_addr); 1863 1864 __ bind(Ldispatch); 1865 1866 __ add(R14_bcp, Roffset, R14_bcp); 1867 __ dispatch_next(vtos); 1868} 1869 1870void TemplateTable::lookupswitch() { 1871 transition(itos, itos); 1872 __ stop("lookupswitch bytecode should have been rewritten"); 1873} 1874 1875// Table switch using linear search through cases. 1876// Bytecode stream format: 1877// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1878// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1879void TemplateTable::fast_linearswitch() { 1880 transition(itos, vtos); 1881 1882 Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case; 1883 1884 Register Rcount = R3_ARG1, 1885 Rcurrent_pair = R4_ARG2, 1886 Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset. 1887 Roffset = R31, // Might need to survive C call. 1888 Rvalue = R12_scratch2, 1889 Rscratch = R11_scratch1, 1890 Rcmp_value = R17_tos; 1891 1892 // Align bcp. 1893 __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt); 1894 __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt)); 1895 1896 // Setup loop counter and limit. 1897 __ lwz(Rcount, BytesPerInt, Rdef_offset_addr); // Load count. 1898 __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair. 1899 1900 // Set up search loop. 1901 __ cmpwi(CCR0, Rcount, 0); 1902 __ beq(CCR0, Ldefault_case); 1903 1904 __ mtctr(Rcount); 1905 1906 // linear table search 1907 __ bind(Lsearch_loop); 1908 1909 __ lwz(Rvalue, 0, Rcurrent_pair); 1910 __ lwa(Roffset, 1 * BytesPerInt, Rcurrent_pair); 1911 1912 __ cmpw(CCR0, Rvalue, Rcmp_value); 1913 __ beq(CCR0, Lfound); 1914 1915 __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt); 1916 __ bdnz(Lsearch_loop); 1917 1918 // default case 1919 __ bind(Ldefault_case); 1920 1921 __ lwa(Roffset, 0, Rdef_offset_addr); 1922 if (ProfileInterpreter) { 1923 __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */); 1924 __ b(Lcontinue_execution); 1925 } 1926 1927 // Entry found, skip Roffset bytecodes and continue. 1928 __ bind(Lfound); 1929 if (ProfileInterpreter) { 1930 // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints 1931 // beyond the actual current pair due to the auto update load above! 1932 __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr); 1933 __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt); 1934 __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1); 1935 __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch); 1936 __ bind(Lcontinue_execution); 1937 } 1938 __ add(R14_bcp, Roffset, R14_bcp); 1939 __ dispatch_next(vtos); 1940} 1941 1942// Table switch using binary search (value/offset pairs are ordered). 1943// Bytecode stream format: 1944// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ... 1945// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value. 1946void TemplateTable::fast_binaryswitch() { 1947 1948 transition(itos, vtos); 1949 // Implementation using the following core algorithm: (copied from Intel) 1950 // 1951 // int binary_search(int key, LookupswitchPair* array, int n) { 1952 // // Binary search according to "Methodik des Programmierens" by 1953 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985. 1954 // int i = 0; 1955 // int j = n; 1956 // while (i+1 < j) { 1957 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q) 1958 // // with Q: for all i: 0 <= i < n: key < a[i] 1959 // // where a stands for the array and assuming that the (inexisting) 1960 // // element a[n] is infinitely big. 1961 // int h = (i + j) >> 1; 1962 // // i < h < j 1963 // if (key < array[h].fast_match()) { 1964 // j = h; 1965 // } else { 1966 // i = h; 1967 // } 1968 // } 1969 // // R: a[i] <= key < a[i+1] or Q 1970 // // (i.e., if key is within array, i is the correct index) 1971 // return i; 1972 // } 1973 1974 // register allocation 1975 const Register Rkey = R17_tos; // already set (tosca) 1976 const Register Rarray = R3_ARG1; 1977 const Register Ri = R4_ARG2; 1978 const Register Rj = R5_ARG3; 1979 const Register Rh = R6_ARG4; 1980 const Register Rscratch = R11_scratch1; 1981 1982 const int log_entry_size = 3; 1983 const int entry_size = 1 << log_entry_size; 1984 1985 Label found; 1986 1987 // Find Array start, 1988 __ addi(Rarray, R14_bcp, 3 * BytesPerInt); 1989 __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt)); 1990 1991 // initialize i & j 1992 __ li(Ri,0); 1993 __ lwz(Rj, -BytesPerInt, Rarray); 1994 1995 // and start. 1996 Label entry; 1997 __ b(entry); 1998 1999 // binary search loop 2000 { Label loop; 2001 __ bind(loop); 2002 // int h = (i + j) >> 1; 2003 __ srdi(Rh, Rh, 1); 2004 // if (key < array[h].fast_match()) { 2005 // j = h; 2006 // } else { 2007 // i = h; 2008 // } 2009 __ sldi(Rscratch, Rh, log_entry_size); 2010 __ lwzx(Rscratch, Rscratch, Rarray); 2011 2012 // if (key < current value) 2013 // Rh = Rj 2014 // else 2015 // Rh = Ri 2016 Label Lgreater; 2017 __ cmpw(CCR0, Rkey, Rscratch); 2018 __ bge(CCR0, Lgreater); 2019 __ mr(Rj, Rh); 2020 __ b(entry); 2021 __ bind(Lgreater); 2022 __ mr(Ri, Rh); 2023 2024 // while (i+1 < j) 2025 __ bind(entry); 2026 __ addi(Rscratch, Ri, 1); 2027 __ cmpw(CCR0, Rscratch, Rj); 2028 __ add(Rh, Ri, Rj); // start h = i + j >> 1; 2029 2030 __ blt(CCR0, loop); 2031 } 2032 2033 // End of binary search, result index is i (must check again!). 2034 Label default_case; 2035 Label continue_execution; 2036 if (ProfileInterpreter) { 2037 __ mr(Rh, Ri); // Save index in i for profiling. 2038 } 2039 // Ri = value offset 2040 __ sldi(Ri, Ri, log_entry_size); 2041 __ add(Ri, Ri, Rarray); 2042 __ lwz(Rscratch, 0, Ri); 2043 2044 Label not_found; 2045 // Ri = offset offset 2046 __ cmpw(CCR0, Rkey, Rscratch); 2047 __ beq(CCR0, not_found); 2048 // entry not found -> j = default offset 2049 __ lwz(Rj, -2 * BytesPerInt, Rarray); 2050 __ b(default_case); 2051 2052 __ bind(not_found); 2053 // entry found -> j = offset 2054 __ profile_switch_case(Rh, Rj, Rscratch, Rkey); 2055 __ lwz(Rj, BytesPerInt, Ri); 2056 2057 if (ProfileInterpreter) { 2058 __ b(continue_execution); 2059 } 2060 2061 __ bind(default_case); // fall through (if not profiling) 2062 __ profile_switch_default(Ri, Rscratch); 2063 2064 __ bind(continue_execution); 2065 2066 __ extsw(Rj, Rj); 2067 __ add(R14_bcp, Rj, R14_bcp); 2068 __ dispatch_next(vtos); 2069} 2070 2071void TemplateTable::_return(TosState state) { 2072 transition(state, state); 2073 assert(_desc->calls_vm(), 2074 "inconsistent calls_vm information"); // call in remove_activation 2075 2076 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) { 2077 2078 Register Rscratch = R11_scratch1, 2079 Rklass = R12_scratch2, 2080 Rklass_flags = Rklass; 2081 Label Lskip_register_finalizer; 2082 2083 // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case. 2084 assert(state == vtos, "only valid state"); 2085 __ ld(R17_tos, 0, R18_locals); 2086 2087 // Load klass of this obj. 2088 __ load_klass(Rklass, R17_tos); 2089 __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass); 2090 __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER)); 2091 __ bfalse(CCR0, Lskip_register_finalizer); 2092 2093 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */); 2094 2095 __ align(32, 12); 2096 __ bind(Lskip_register_finalizer); 2097 } 2098 2099 // Move the result value into the correct register and remove memory stack frame. 2100 __ remove_activation(state, /* throw_monitor_exception */ true); 2101 // Restoration of lr done by remove_activation. 2102 switch (state) { 2103 case ltos: 2104 case btos: 2105 case ctos: 2106 case stos: 2107 case atos: 2108 case itos: __ mr(R3_RET, R17_tos); break; 2109 case ftos: 2110 case dtos: __ fmr(F1_RET, F15_ftos); break; 2111 case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need 2112 // to get visible before the reference to the object gets stored anywhere. 2113 __ membar(Assembler::StoreStore); break; 2114 default : ShouldNotReachHere(); 2115 } 2116 __ blr(); 2117} 2118 2119// ============================================================================ 2120// Constant pool cache access 2121// 2122// Memory ordering: 2123// 2124// Like done in C++ interpreter, we load the fields 2125// - _indices 2126// - _f12_oop 2127// acquired, because these are asked if the cache is already resolved. We don't 2128// want to float loads above this check. 2129// See also comments in ConstantPoolCacheEntry::bytecode_1(), 2130// ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1(); 2131 2132// Call into the VM if call site is not yet resolved 2133// 2134// Input regs: 2135// - None, all passed regs are outputs. 2136// 2137// Returns: 2138// - Rcache: The const pool cache entry that contains the resolved result. 2139// - Rresult: Either noreg or output for f1/f2. 2140// 2141// Kills: 2142// - Rscratch 2143void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) { 2144 2145 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2146 Label Lresolved, Ldone; 2147 2148 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); 2149 // We are resolved if the indices offset contains the current bytecode. 2150 // Big Endian: 2151 __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache); 2152 // Acquire by cmp-br-isync (see below). 2153 __ cmpdi(CCR0, Rscratch, (int)bytecode()); 2154 __ beq(CCR0, Lresolved); 2155 2156 address entry = NULL; 2157 switch (bytecode()) { 2158 case Bytecodes::_getstatic : // fall through 2159 case Bytecodes::_putstatic : // fall through 2160 case Bytecodes::_getfield : // fall through 2161 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break; 2162 case Bytecodes::_invokevirtual : // fall through 2163 case Bytecodes::_invokespecial : // fall through 2164 case Bytecodes::_invokestatic : // fall through 2165 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 2166 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break; 2167 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 2168 default : ShouldNotReachHere(); break; 2169 } 2170 __ li(R4_ARG2, (int)bytecode()); 2171 __ call_VM(noreg, entry, R4_ARG2, true); 2172 2173 // Update registers with resolved info. 2174 __ get_cache_and_index_at_bcp(Rcache, 1, index_size); 2175 __ b(Ldone); 2176 2177 __ bind(Lresolved); 2178 __ isync(); // Order load wrt. succeeding loads. 2179 __ bind(Ldone); 2180} 2181 2182// Load the constant pool cache entry at field accesses into registers. 2183// The Rcache and Rindex registers must be set before call. 2184// Input: 2185// - Rcache, Rindex 2186// Output: 2187// - Robj, Roffset, Rflags 2188void TemplateTable::load_field_cp_cache_entry(Register Robj, 2189 Register Rcache, 2190 Register Rindex /* unused on PPC64 */, 2191 Register Roffset, 2192 Register Rflags, 2193 bool is_static = false) { 2194 assert_different_registers(Rcache, Rflags, Roffset); 2195 // assert(Rindex == noreg, "parameter not used on PPC64"); 2196 2197 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2198 __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); 2199 __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache); 2200 if (is_static) { 2201 __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache); 2202 __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj); 2203 // Acquire not needed here. Following access has an address dependency on this value. 2204 } 2205} 2206 2207// Load the constant pool cache entry at invokes into registers. 2208// Resolve if necessary. 2209 2210// Input Registers: 2211// - None, bcp is used, though 2212// 2213// Return registers: 2214// - Rmethod (f1 field or f2 if invokevirtual) 2215// - Ritable_index (f2 field) 2216// - Rflags (flags field) 2217// 2218// Kills: 2219// - R21 2220// 2221void TemplateTable::load_invoke_cp_cache_entry(int byte_no, 2222 Register Rmethod, 2223 Register Ritable_index, 2224 Register Rflags, 2225 bool is_invokevirtual, 2226 bool is_invokevfinal, 2227 bool is_invokedynamic) { 2228 2229 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2230 // Determine constant pool cache field offsets. 2231 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant"); 2232 const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset())); 2233 const int flags_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()); 2234 // Access constant pool cache fields. 2235 const int index_offset = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()); 2236 2237 Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP. 2238 2239 if (is_invokevfinal) { 2240 assert(Ritable_index == noreg, "register not used"); 2241 // Already resolved. 2242 __ get_cache_and_index_at_bcp(Rcache, 1); 2243 } else { 2244 resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2)); 2245 } 2246 2247 __ ld(Rmethod, method_offset, Rcache); 2248 __ ld(Rflags, flags_offset, Rcache); 2249 2250 if (Ritable_index != noreg) { 2251 __ ld(Ritable_index, index_offset, Rcache); 2252 } 2253} 2254 2255// ============================================================================ 2256// Field access 2257 2258// Volatile variables demand their effects be made known to all CPU's 2259// in order. Store buffers on most chips allow reads & writes to 2260// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode 2261// without some kind of memory barrier (i.e., it's not sufficient that 2262// the interpreter does not reorder volatile references, the hardware 2263// also must not reorder them). 2264// 2265// According to the new Java Memory Model (JMM): 2266// (1) All volatiles are serialized wrt to each other. ALSO reads & 2267// writes act as aquire & release, so: 2268// (2) A read cannot let unrelated NON-volatile memory refs that 2269// happen after the read float up to before the read. It's OK for 2270// non-volatile memory refs that happen before the volatile read to 2271// float down below it. 2272// (3) Similar a volatile write cannot let unrelated NON-volatile 2273// memory refs that happen BEFORE the write float down to after the 2274// write. It's OK for non-volatile memory refs that happen after the 2275// volatile write to float up before it. 2276// 2277// We only put in barriers around volatile refs (they are expensive), 2278// not _between_ memory refs (that would require us to track the 2279// flavor of the previous memory refs). Requirements (2) and (3) 2280// require some barriers before volatile stores and after volatile 2281// loads. These nearly cover requirement (1) but miss the 2282// volatile-store-volatile-load case. This final case is placed after 2283// volatile-stores although it could just as well go before 2284// volatile-loads. 2285 2286// The registers cache and index expected to be set before call. 2287// Correct values of the cache and index registers are preserved. 2288// Kills: 2289// Rcache (if has_tos) 2290// Rscratch 2291void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) { 2292 2293 assert_different_registers(Rcache, Rscratch); 2294 2295 if (JvmtiExport::can_post_field_access()) { 2296 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2297 Label Lno_field_access_post; 2298 2299 // Check if post field access in enabled. 2300 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true); 2301 __ lwz(Rscratch, offs, Rscratch); 2302 2303 __ cmpwi(CCR0, Rscratch, 0); 2304 __ beq(CCR0, Lno_field_access_post); 2305 2306 // Post access enabled - do it! 2307 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2308 if (is_static) { 2309 __ li(R17_tos, 0); 2310 } else { 2311 if (has_tos) { 2312 // The fast bytecode versions have obj ptr in register. 2313 // Thus, save object pointer before call_VM() clobbers it 2314 // put object on tos where GC wants it. 2315 __ push_ptr(R17_tos); 2316 } else { 2317 // Load top of stack (do not pop the value off the stack). 2318 __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); 2319 } 2320 __ verify_oop(R17_tos); 2321 } 2322 // tos: object pointer or NULL if static 2323 // cache: cache entry pointer 2324 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache); 2325 if (!is_static && has_tos) { 2326 // Restore object pointer. 2327 __ pop_ptr(R17_tos); 2328 __ verify_oop(R17_tos); 2329 } else { 2330 // Cache is still needed to get class or obj. 2331 __ get_cache_and_index_at_bcp(Rcache, 1); 2332 } 2333 2334 __ align(32, 12); 2335 __ bind(Lno_field_access_post); 2336 } 2337} 2338 2339// kills R11_scratch1 2340void TemplateTable::pop_and_check_object(Register Roop) { 2341 Register Rtmp = R11_scratch1; 2342 2343 assert_different_registers(Rtmp, Roop); 2344 __ pop_ptr(Roop); 2345 // For field access must check obj. 2346 __ null_check_throw(Roop, -1, Rtmp); 2347 __ verify_oop(Roop); 2348} 2349 2350// PPC64: implement volatile loads as fence-store-acquire. 2351void TemplateTable::getfield_or_static(int byte_no, bool is_static) { 2352 transition(vtos, vtos); 2353 2354 Label Lacquire, Lisync; 2355 2356 const Register Rcache = R3_ARG1, 2357 Rclass_or_obj = R22_tmp2, 2358 Roffset = R23_tmp3, 2359 Rflags = R31, 2360 Rbtable = R5_ARG3, 2361 Rbc = R6_ARG4, 2362 Rscratch = R12_scratch2; 2363 2364 static address field_branch_table[number_of_states], 2365 static_branch_table[number_of_states]; 2366 2367 address* branch_table = is_static ? static_branch_table : field_branch_table; 2368 2369 // Get field offset. 2370 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2371 2372 // JVMTI support 2373 jvmti_post_field_access(Rcache, Rscratch, is_static, false); 2374 2375 // Load after possible GC. 2376 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2377 2378 // Load pointer to branch table. 2379 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2380 2381 // Get volatile flag. 2382 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2383 // Note: sync is needed before volatile load on PPC64. 2384 2385 // Check field type. 2386 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2387 2388#ifdef ASSERT 2389 Label LFlagInvalid; 2390 __ cmpldi(CCR0, Rflags, number_of_states); 2391 __ bge(CCR0, LFlagInvalid); 2392#endif 2393 2394 // Load from branch table and dispatch (volatile case: one instruction ahead). 2395 __ sldi(Rflags, Rflags, LogBytesPerWord); 2396 __ cmpwi(CCR6, Rscratch, 1); // Volatile? 2397 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2398 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0. 2399 } 2400 __ ldx(Rbtable, Rbtable, Rflags); 2401 2402 // Get the obj from stack. 2403 if (!is_static) { 2404 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2405 } else { 2406 __ verify_oop(Rclass_or_obj); 2407 } 2408 2409 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 2410 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2411 } 2412 __ mtctr(Rbtable); 2413 __ bctr(); 2414 2415#ifdef ASSERT 2416 __ bind(LFlagInvalid); 2417 __ stop("got invalid flag", 0x654); 2418 2419 // __ bind(Lvtos); 2420 address pc_before_fence = __ pc(); 2421 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2422 assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2423 assert(branch_table[vtos] == 0, "can't compute twice"); 2424 branch_table[vtos] = __ pc(); // non-volatile_entry point 2425 __ stop("vtos unexpected", 0x655); 2426#endif 2427 2428 __ align(32, 28, 28); // Align load. 2429 // __ bind(Ldtos); 2430 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2431 assert(branch_table[dtos] == 0, "can't compute twice"); 2432 branch_table[dtos] = __ pc(); // non-volatile_entry point 2433 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 2434 __ push(dtos); 2435 if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch); 2436 { 2437 Label acquire_double; 2438 __ beq(CCR6, acquire_double); // Volatile? 2439 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2440 2441 __ bind(acquire_double); 2442 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2443 __ beq_predict_taken(CCR0, Lisync); 2444 __ b(Lisync); // In case of NAN. 2445 } 2446 2447 __ align(32, 28, 28); // Align load. 2448 // __ bind(Lftos); 2449 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2450 assert(branch_table[ftos] == 0, "can't compute twice"); 2451 branch_table[ftos] = __ pc(); // non-volatile_entry point 2452 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 2453 __ push(ftos); 2454 if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); } 2455 { 2456 Label acquire_float; 2457 __ beq(CCR6, acquire_float); // Volatile? 2458 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2459 2460 __ bind(acquire_float); 2461 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 2462 __ beq_predict_taken(CCR0, Lisync); 2463 __ b(Lisync); // In case of NAN. 2464 } 2465 2466 __ align(32, 28, 28); // Align load. 2467 // __ bind(Litos); 2468 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2469 assert(branch_table[itos] == 0, "can't compute twice"); 2470 branch_table[itos] = __ pc(); // non-volatile_entry point 2471 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2472 __ push(itos); 2473 if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch); 2474 __ beq(CCR6, Lacquire); // Volatile? 2475 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2476 2477 __ align(32, 28, 28); // Align load. 2478 // __ bind(Lltos); 2479 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2480 assert(branch_table[ltos] == 0, "can't compute twice"); 2481 branch_table[ltos] = __ pc(); // non-volatile_entry point 2482 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2483 __ push(ltos); 2484 if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch); 2485 __ beq(CCR6, Lacquire); // Volatile? 2486 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2487 2488 __ align(32, 28, 28); // Align load. 2489 // __ bind(Lbtos); 2490 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2491 assert(branch_table[btos] == 0, "can't compute twice"); 2492 branch_table[btos] = __ pc(); // non-volatile_entry point 2493 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2494 __ extsb(R17_tos, R17_tos); 2495 __ push(btos); 2496 if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch); 2497 __ beq(CCR6, Lacquire); // Volatile? 2498 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2499 2500 __ align(32, 28, 28); // Align load. 2501 // __ bind(Lctos); 2502 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2503 assert(branch_table[ctos] == 0, "can't compute twice"); 2504 branch_table[ctos] = __ pc(); // non-volatile_entry point 2505 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 2506 __ push(ctos); 2507 if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch); 2508 __ beq(CCR6, Lacquire); // Volatile? 2509 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2510 2511 __ align(32, 28, 28); // Align load. 2512 // __ bind(Lstos); 2513 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2514 assert(branch_table[stos] == 0, "can't compute twice"); 2515 branch_table[stos] = __ pc(); // non-volatile_entry point 2516 __ lhax(R17_tos, Rclass_or_obj, Roffset); 2517 __ push(stos); 2518 if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch); 2519 __ beq(CCR6, Lacquire); // Volatile? 2520 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2521 2522 __ align(32, 28, 28); // Align load. 2523 // __ bind(Latos); 2524 __ fence(); // Volatile entry point (one instruction before non-volatile_entry point). 2525 assert(branch_table[atos] == 0, "can't compute twice"); 2526 branch_table[atos] = __ pc(); // non-volatile_entry point 2527 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2528 __ verify_oop(R17_tos); 2529 __ push(atos); 2530 //__ dcbt(R17_tos); // prefetch 2531 if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch); 2532 __ beq(CCR6, Lacquire); // Volatile? 2533 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2534 2535 __ align(32, 12); 2536 __ bind(Lacquire); 2537 __ twi_0(R17_tos); 2538 __ bind(Lisync); 2539 __ isync(); // acquire 2540 2541#ifdef ASSERT 2542 for (int i = 0; i<number_of_states; ++i) { 2543 assert(branch_table[i], "get initialization"); 2544 //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2545 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2546 } 2547#endif 2548} 2549 2550void TemplateTable::getfield(int byte_no) { 2551 getfield_or_static(byte_no, false); 2552} 2553 2554void TemplateTable::getstatic(int byte_no) { 2555 getfield_or_static(byte_no, true); 2556} 2557 2558// The registers cache and index expected to be set before call. 2559// The function may destroy various registers, just not the cache and index registers. 2560void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) { 2561 2562 assert_different_registers(Rcache, Rscratch, R6_ARG4); 2563 2564 if (JvmtiExport::can_post_field_modification()) { 2565 Label Lno_field_mod_post; 2566 2567 // Check if post field access in enabled. 2568 int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true); 2569 __ lwz(Rscratch, offs, Rscratch); 2570 2571 __ cmpwi(CCR0, Rscratch, 0); 2572 __ beq(CCR0, Lno_field_mod_post); 2573 2574 // Do the post 2575 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2576 const Register Robj = Rscratch; 2577 2578 __ addi(Rcache, Rcache, in_bytes(cp_base_offset)); 2579 if (is_static) { 2580 // Life is simple. Null out the object pointer. 2581 __ li(Robj, 0); 2582 } else { 2583 // In case of the fast versions, value lives in registers => put it back on tos. 2584 int offs = Interpreter::expr_offset_in_bytes(0); 2585 Register base = R15_esp; 2586 switch(bytecode()) { 2587 case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break; 2588 case Bytecodes::_fast_iputfield: // Fall through 2589 case Bytecodes::_fast_bputfield: // Fall through 2590 case Bytecodes::_fast_cputfield: // Fall through 2591 case Bytecodes::_fast_sputfield: __ push_i(); offs+= Interpreter::stackElementSize; break; 2592 case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break; 2593 case Bytecodes::_fast_fputfield: __ push_f(); offs+= Interpreter::stackElementSize; break; 2594 case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break; 2595 default: { 2596 offs = 0; 2597 base = Robj; 2598 const Register Rflags = Robj; 2599 Label is_one_slot; 2600 // Life is harder. The stack holds the value on top, followed by the 2601 // object. We don't know the size of the value, though; it could be 2602 // one or two words depending on its type. As a result, we must find 2603 // the type to determine where the object is. 2604 __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian 2605 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2606 2607 __ cmpwi(CCR0, Rflags, ltos); 2608 __ cmpwi(CCR1, Rflags, dtos); 2609 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1)); 2610 __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); 2611 __ beq(CCR0, is_one_slot); 2612 __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2)); 2613 __ bind(is_one_slot); 2614 break; 2615 } 2616 } 2617 __ ld(Robj, offs, base); 2618 __ verify_oop(Robj); 2619 } 2620 2621 __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0)); 2622 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4); 2623 __ get_cache_and_index_at_bcp(Rcache, 1); 2624 2625 // In case of the fast versions, value lives in registers => put it back on tos. 2626 switch(bytecode()) { 2627 case Bytecodes::_fast_aputfield: __ pop_ptr(); break; 2628 case Bytecodes::_fast_iputfield: // Fall through 2629 case Bytecodes::_fast_bputfield: // Fall through 2630 case Bytecodes::_fast_cputfield: // Fall through 2631 case Bytecodes::_fast_sputfield: __ pop_i(); break; 2632 case Bytecodes::_fast_lputfield: __ pop_l(); break; 2633 case Bytecodes::_fast_fputfield: __ pop_f(); break; 2634 case Bytecodes::_fast_dputfield: __ pop_d(); break; 2635 default: break; // Nothin' to do. 2636 } 2637 2638 __ align(32, 12); 2639 __ bind(Lno_field_mod_post); 2640 } 2641} 2642 2643// PPC64: implement volatile stores as release-store (return bytecode contains an additional release). 2644void TemplateTable::putfield_or_static(int byte_no, bool is_static) { 2645 Label Lvolatile; 2646 2647 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2648 Rclass_or_obj = R31, // Needs to survive C call. 2649 Roffset = R22_tmp2, // Needs to survive C call. 2650 Rflags = R3_ARG1, 2651 Rbtable = R4_ARG2, 2652 Rscratch = R11_scratch1, 2653 Rscratch2 = R12_scratch2, 2654 Rscratch3 = R6_ARG4, 2655 Rbc = Rscratch3; 2656 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2657 2658 static address field_branch_table[number_of_states], 2659 static_branch_table[number_of_states]; 2660 2661 address* branch_table = is_static ? static_branch_table : field_branch_table; 2662 2663 // Stack (grows up): 2664 // value 2665 // obj 2666 2667 // Load the field offset. 2668 resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2)); 2669 jvmti_post_field_mod(Rcache, Rscratch, is_static); 2670 load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static); 2671 2672 // Load pointer to branch table. 2673 __ load_const_optimized(Rbtable, (address)branch_table, Rscratch); 2674 2675 // Get volatile flag. 2676 __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2677 2678 // Check the field type. 2679 __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 2680 2681#ifdef ASSERT 2682 Label LFlagInvalid; 2683 __ cmpldi(CCR0, Rflags, number_of_states); 2684 __ bge(CCR0, LFlagInvalid); 2685#endif 2686 2687 // Load from branch table and dispatch (volatile case: one instruction ahead). 2688 __ sldi(Rflags, Rflags, LogBytesPerWord); 2689 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile? 2690 __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0. 2691 __ ldx(Rbtable, Rbtable, Rflags); 2692 2693 __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point. 2694 __ mtctr(Rbtable); 2695 __ bctr(); 2696 2697#ifdef ASSERT 2698 __ bind(LFlagInvalid); 2699 __ stop("got invalid flag", 0x656); 2700 2701 // __ bind(Lvtos); 2702 address pc_before_release = __ pc(); 2703 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2704 assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction"); 2705 assert(branch_table[vtos] == 0, "can't compute twice"); 2706 branch_table[vtos] = __ pc(); // non-volatile_entry point 2707 __ stop("vtos unexpected", 0x657); 2708#endif 2709 2710 __ align(32, 28, 28); // Align pop. 2711 // __ bind(Ldtos); 2712 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2713 assert(branch_table[dtos] == 0, "can't compute twice"); 2714 branch_table[dtos] = __ pc(); // non-volatile_entry point 2715 __ pop(dtos); 2716 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2717 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2718 if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); } 2719 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2720 __ beq(CR_is_vol, Lvolatile); // Volatile? 2721 } 2722 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2723 2724 __ align(32, 28, 28); // Align pop. 2725 // __ bind(Lftos); 2726 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2727 assert(branch_table[ftos] == 0, "can't compute twice"); 2728 branch_table[ftos] = __ pc(); // non-volatile_entry point 2729 __ pop(ftos); 2730 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2731 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2732 if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); } 2733 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2734 __ beq(CR_is_vol, Lvolatile); // Volatile? 2735 } 2736 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2737 2738 __ align(32, 28, 28); // Align pop. 2739 // __ bind(Litos); 2740 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2741 assert(branch_table[itos] == 0, "can't compute twice"); 2742 branch_table[itos] = __ pc(); // non-volatile_entry point 2743 __ pop(itos); 2744 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2745 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2746 if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); } 2747 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2748 __ beq(CR_is_vol, Lvolatile); // Volatile? 2749 } 2750 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2751 2752 __ align(32, 28, 28); // Align pop. 2753 // __ bind(Lltos); 2754 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2755 assert(branch_table[ltos] == 0, "can't compute twice"); 2756 branch_table[ltos] = __ pc(); // non-volatile_entry point 2757 __ pop(ltos); 2758 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2759 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2760 if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); } 2761 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2762 __ beq(CR_is_vol, Lvolatile); // Volatile? 2763 } 2764 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2765 2766 __ align(32, 28, 28); // Align pop. 2767 // __ bind(Lbtos); 2768 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2769 assert(branch_table[btos] == 0, "can't compute twice"); 2770 branch_table[btos] = __ pc(); // non-volatile_entry point 2771 __ pop(btos); 2772 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2773 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2774 if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); } 2775 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2776 __ beq(CR_is_vol, Lvolatile); // Volatile? 2777 } 2778 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2779 2780 __ align(32, 28, 28); // Align pop. 2781 // __ bind(Lctos); 2782 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2783 assert(branch_table[ctos] == 0, "can't compute twice"); 2784 branch_table[ctos] = __ pc(); // non-volatile_entry point 2785 __ pop(ctos); 2786 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.. 2787 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2788 if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); } 2789 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2790 __ beq(CR_is_vol, Lvolatile); // Volatile? 2791 } 2792 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2793 2794 __ align(32, 28, 28); // Align pop. 2795 // __ bind(Lstos); 2796 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2797 assert(branch_table[stos] == 0, "can't compute twice"); 2798 branch_table[stos] = __ pc(); // non-volatile_entry point 2799 __ pop(stos); 2800 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1. 2801 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2802 if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); } 2803 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2804 __ beq(CR_is_vol, Lvolatile); // Volatile? 2805 } 2806 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2807 2808 __ align(32, 28, 28); // Align pop. 2809 // __ bind(Latos); 2810 __ release(); // Volatile entry point (one instruction before non-volatile_entry point). 2811 assert(branch_table[atos] == 0, "can't compute twice"); 2812 branch_table[atos] = __ pc(); // non-volatile_entry point 2813 __ pop(atos); 2814 if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1 2815 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2816 if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); } 2817 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2818 __ beq(CR_is_vol, Lvolatile); // Volatile? 2819 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2820 2821 __ align(32, 12); 2822 __ bind(Lvolatile); 2823 __ fence(); 2824 } 2825 // fallthru: __ b(Lexit); 2826 2827#ifdef ASSERT 2828 for (int i = 0; i<number_of_states; ++i) { 2829 assert(branch_table[i], "put initialization"); 2830 //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)", 2831 // is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i])); 2832 } 2833#endif 2834} 2835 2836void TemplateTable::putfield(int byte_no) { 2837 putfield_or_static(byte_no, false); 2838} 2839 2840void TemplateTable::putstatic(int byte_no) { 2841 putfield_or_static(byte_no, true); 2842} 2843 2844// See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job. 2845void TemplateTable::jvmti_post_fast_field_mod() { 2846 __ should_not_reach_here(); 2847} 2848 2849void TemplateTable::fast_storefield(TosState state) { 2850 transition(state, vtos); 2851 2852 const Register Rcache = R5_ARG3, // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod). 2853 Rclass_or_obj = R31, // Needs to survive C call. 2854 Roffset = R22_tmp2, // Needs to survive C call. 2855 Rflags = R3_ARG1, 2856 Rscratch = R11_scratch1, 2857 Rscratch2 = R12_scratch2, 2858 Rscratch3 = R4_ARG2; 2859 const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store). 2860 2861 // Constant pool already resolved => Load flags and offset of field. 2862 __ get_cache_and_index_at_bcp(Rcache, 1); 2863 jvmti_post_field_mod(Rcache, Rscratch, false /* not static */); 2864 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2865 2866 // Get the obj and the final store addr. 2867 pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1. 2868 2869 // Get volatile flag. 2870 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2871 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); } 2872 { 2873 Label LnotVolatile; 2874 __ beq(CCR0, LnotVolatile); 2875 __ release(); 2876 __ align(32, 12); 2877 __ bind(LnotVolatile); 2878 } 2879 2880 // Do the store and fencing. 2881 switch(bytecode()) { 2882 case Bytecodes::_fast_aputfield: 2883 // Store into the field. 2884 do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */); 2885 break; 2886 2887 case Bytecodes::_fast_iputfield: 2888 __ stwx(R17_tos, Rclass_or_obj, Roffset); 2889 break; 2890 2891 case Bytecodes::_fast_lputfield: 2892 __ stdx(R17_tos, Rclass_or_obj, Roffset); 2893 break; 2894 2895 case Bytecodes::_fast_bputfield: 2896 __ stbx(R17_tos, Rclass_or_obj, Roffset); 2897 break; 2898 2899 case Bytecodes::_fast_cputfield: 2900 case Bytecodes::_fast_sputfield: 2901 __ sthx(R17_tos, Rclass_or_obj, Roffset); 2902 break; 2903 2904 case Bytecodes::_fast_fputfield: 2905 __ stfsx(F15_ftos, Rclass_or_obj, Roffset); 2906 break; 2907 2908 case Bytecodes::_fast_dputfield: 2909 __ stfdx(F15_ftos, Rclass_or_obj, Roffset); 2910 break; 2911 2912 default: ShouldNotReachHere(); 2913 } 2914 2915 if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { 2916 Label LVolatile; 2917 __ beq(CR_is_vol, LVolatile); 2918 __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode())); 2919 2920 __ align(32, 12); 2921 __ bind(LVolatile); 2922 __ fence(); 2923 } 2924} 2925 2926void TemplateTable::fast_accessfield(TosState state) { 2927 transition(atos, state); 2928 2929 Label LisVolatile; 2930 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 2931 2932 const Register Rcache = R3_ARG1, 2933 Rclass_or_obj = R17_tos, 2934 Roffset = R22_tmp2, 2935 Rflags = R23_tmp3, 2936 Rscratch = R12_scratch2; 2937 2938 // Constant pool already resolved. Get the field offset. 2939 __ get_cache_and_index_at_bcp(Rcache, 1); 2940 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 2941 2942 // JVMTI support 2943 jvmti_post_field_access(Rcache, Rscratch, false, true); 2944 2945 // Get the load address. 2946 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 2947 2948 // Get volatile flag. 2949 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 2950 __ bne(CCR0, LisVolatile); 2951 2952 switch(bytecode()) { 2953 case Bytecodes::_fast_agetfield: 2954 { 2955 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2956 __ verify_oop(R17_tos); 2957 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2958 2959 __ bind(LisVolatile); 2960 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2961 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 2962 __ verify_oop(R17_tos); 2963 __ twi_0(R17_tos); 2964 __ isync(); 2965 break; 2966 } 2967 case Bytecodes::_fast_igetfield: 2968 { 2969 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2970 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2971 2972 __ bind(LisVolatile); 2973 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2974 __ lwax(R17_tos, Rclass_or_obj, Roffset); 2975 __ twi_0(R17_tos); 2976 __ isync(); 2977 break; 2978 } 2979 case Bytecodes::_fast_lgetfield: 2980 { 2981 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2982 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2983 2984 __ bind(LisVolatile); 2985 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2986 __ ldx(R17_tos, Rclass_or_obj, Roffset); 2987 __ twi_0(R17_tos); 2988 __ isync(); 2989 break; 2990 } 2991 case Bytecodes::_fast_bgetfield: 2992 { 2993 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 2994 __ extsb(R17_tos, R17_tos); 2995 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 2996 2997 __ bind(LisVolatile); 2998 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 2999 __ lbzx(R17_tos, Rclass_or_obj, Roffset); 3000 __ twi_0(R17_tos); 3001 __ extsb(R17_tos, R17_tos); 3002 __ isync(); 3003 break; 3004 } 3005 case Bytecodes::_fast_cgetfield: 3006 { 3007 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3008 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3009 3010 __ bind(LisVolatile); 3011 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3012 __ lhzx(R17_tos, Rclass_or_obj, Roffset); 3013 __ twi_0(R17_tos); 3014 __ isync(); 3015 break; 3016 } 3017 case Bytecodes::_fast_sgetfield: 3018 { 3019 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3020 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3021 3022 __ bind(LisVolatile); 3023 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3024 __ lhax(R17_tos, Rclass_or_obj, Roffset); 3025 __ twi_0(R17_tos); 3026 __ isync(); 3027 break; 3028 } 3029 case Bytecodes::_fast_fgetfield: 3030 { 3031 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3032 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3033 3034 __ bind(LisVolatile); 3035 Label Ldummy; 3036 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3037 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3038 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3039 __ bne_predict_not_taken(CCR0, Ldummy); 3040 __ bind(Ldummy); 3041 __ isync(); 3042 break; 3043 } 3044 case Bytecodes::_fast_dgetfield: 3045 { 3046 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3047 __ dispatch_epilog(state, Bytecodes::length_for(bytecode())); 3048 3049 __ bind(LisVolatile); 3050 Label Ldummy; 3051 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3052 __ lfdx(F15_ftos, Rclass_or_obj, Roffset); 3053 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3054 __ bne_predict_not_taken(CCR0, Ldummy); 3055 __ bind(Ldummy); 3056 __ isync(); 3057 break; 3058 } 3059 default: ShouldNotReachHere(); 3060 } 3061} 3062 3063void TemplateTable::fast_xaccess(TosState state) { 3064 transition(vtos, state); 3065 3066 Label LisVolatile; 3067 ByteSize cp_base_offset = ConstantPoolCache::base_offset(); 3068 const Register Rcache = R3_ARG1, 3069 Rclass_or_obj = R17_tos, 3070 Roffset = R22_tmp2, 3071 Rflags = R23_tmp3, 3072 Rscratch = R12_scratch2; 3073 3074 __ ld(Rclass_or_obj, 0, R18_locals); 3075 3076 // Constant pool already resolved. Get the field offset. 3077 __ get_cache_and_index_at_bcp(Rcache, 2); 3078 load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false); 3079 3080 // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches. 3081 3082 // Needed to report exception at the correct bcp. 3083 __ addi(R14_bcp, R14_bcp, 1); 3084 3085 // Get the load address. 3086 __ null_check_throw(Rclass_or_obj, -1, Rscratch); 3087 3088 // Get volatile flag. 3089 __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit. 3090 __ bne(CCR0, LisVolatile); 3091 3092 switch(state) { 3093 case atos: 3094 { 3095 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3096 __ verify_oop(R17_tos); 3097 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3098 3099 __ bind(LisVolatile); 3100 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3101 __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj); 3102 __ verify_oop(R17_tos); 3103 __ twi_0(R17_tos); 3104 __ isync(); 3105 break; 3106 } 3107 case itos: 3108 { 3109 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3110 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3111 3112 __ bind(LisVolatile); 3113 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3114 __ lwax(R17_tos, Rclass_or_obj, Roffset); 3115 __ twi_0(R17_tos); 3116 __ isync(); 3117 break; 3118 } 3119 case ftos: 3120 { 3121 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3122 __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment. 3123 3124 __ bind(LisVolatile); 3125 Label Ldummy; 3126 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); } 3127 __ lfsx(F15_ftos, Rclass_or_obj, Roffset); 3128 __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync. 3129 __ bne_predict_not_taken(CCR0, Ldummy); 3130 __ bind(Ldummy); 3131 __ isync(); 3132 break; 3133 } 3134 default: ShouldNotReachHere(); 3135 } 3136 __ addi(R14_bcp, R14_bcp, -1); 3137} 3138 3139// ============================================================================ 3140// Calls 3141 3142// Common code for invoke 3143// 3144// Input: 3145// - byte_no 3146// 3147// Output: 3148// - Rmethod: The method to invoke next. 3149// - Rret_addr: The return address to return to. 3150// - Rindex: MethodType (invokehandle) or CallSite obj (invokedynamic) 3151// - Rrecv: Cache for "this" pointer, might be noreg if static call. 3152// - Rflags: Method flags from const pool cache. 3153// 3154// Kills: 3155// - Rscratch1 3156// 3157void TemplateTable::prepare_invoke(int byte_no, 3158 Register Rmethod, // linked method (or i-klass) 3159 Register Rret_addr,// return address 3160 Register Rindex, // itable index, MethodType, etc. 3161 Register Rrecv, // If caller wants to see it. 3162 Register Rflags, // If caller wants to test it. 3163 Register Rscratch 3164 ) { 3165 // Determine flags. 3166 const Bytecodes::Code code = bytecode(); 3167 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 3168 const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 3169 const bool is_invokehandle = code == Bytecodes::_invokehandle; 3170 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 3171 const bool is_invokespecial = code == Bytecodes::_invokespecial; 3172 const bool load_receiver = (Rrecv != noreg); 3173 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), ""); 3174 3175 assert_different_registers(Rmethod, Rindex, Rflags, Rscratch); 3176 assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch); 3177 assert_different_registers(Rret_addr, Rscratch); 3178 3179 load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic); 3180 3181 // Saving of SP done in call_from_interpreter. 3182 3183 // Maybe push "appendix" to arguments. 3184 if (is_invokedynamic || is_invokehandle) { 3185 Label Ldone; 3186 __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63); 3187 __ beq(CCR0, Ldone); 3188 // Push "appendix" (MethodType, CallSite, etc.). 3189 // This must be done before we get the receiver, 3190 // since the parameter_size includes it. 3191 __ load_resolved_reference_at_index(Rscratch, Rindex); 3192 __ verify_oop(Rscratch); 3193 __ push_ptr(Rscratch); 3194 __ bind(Ldone); 3195 } 3196 3197 // Load receiver if needed (after appendix is pushed so parameter size is correct). 3198 if (load_receiver) { 3199 const Register Rparam_count = Rscratch; 3200 __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask); 3201 __ load_receiver(Rparam_count, Rrecv); 3202 __ verify_oop(Rrecv); 3203 } 3204 3205 // Get return address. 3206 { 3207 Register Rtable_addr = Rscratch; 3208 Register Rret_type = Rret_addr; 3209 address table_addr = (address) Interpreter::invoke_return_entry_table_for(code); 3210 3211 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3212 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3213 __ load_dispatch_table(Rtable_addr, (address*)table_addr); 3214 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3215 // Get return address. 3216 __ ldx(Rret_addr, Rtable_addr, Rret_type); 3217 } 3218} 3219 3220// Helper for virtual calls. Load target out of vtable and jump off! 3221// Kills all passed registers. 3222void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) { 3223 3224 assert_different_registers(Rrecv_klass, Rtemp, Rret); 3225 const Register Rtarget_method = Rindex; 3226 3227 // Get target method & entry point. 3228 const int base = InstanceKlass::vtable_start_offset() * wordSize; 3229 // Calc vtable addr scale the vtable index by 8. 3230 __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize)); 3231 // Load target. 3232 __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes()); 3233 __ ldx(Rtarget_method, Rindex, Rrecv_klass); 3234 __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */); 3235} 3236 3237// Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time. 3238void TemplateTable::invokevirtual(int byte_no) { 3239 transition(vtos, vtos); 3240 3241 Register Rtable_addr = R11_scratch1, 3242 Rret_type = R12_scratch2, 3243 Rret_addr = R5_ARG3, 3244 Rflags = R22_tmp2, // Should survive C call. 3245 Rrecv = R3_ARG1, 3246 Rrecv_klass = Rrecv, 3247 Rvtableindex_or_method = R31, // Should survive C call. 3248 Rnum_params = R4_ARG2, 3249 Rnew_bc = R6_ARG4; 3250 3251 Label LnotFinal; 3252 3253 load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false); 3254 3255 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3256 __ bfalse(CCR0, LnotFinal); 3257 3258 patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2); 3259 invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2); 3260 3261 __ align(32, 12); 3262 __ bind(LnotFinal); 3263 // Load "this" pointer (receiver). 3264 __ rldicl(Rnum_params, Rflags, 64, 48); 3265 __ load_receiver(Rnum_params, Rrecv); 3266 __ verify_oop(Rrecv); 3267 3268 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3269 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3270 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3271 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3272 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3273 __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1); 3274 __ load_klass(Rrecv_klass, Rrecv); 3275 __ verify_klass_ptr(Rrecv_klass); 3276 __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false); 3277 3278 generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1); 3279} 3280 3281void TemplateTable::fast_invokevfinal(int byte_no) { 3282 transition(vtos, vtos); 3283 3284 assert(byte_no == f2_byte, "use this argument"); 3285 Register Rflags = R22_tmp2, 3286 Rmethod = R31; 3287 load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false); 3288 invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2); 3289} 3290 3291void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) { 3292 3293 assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2); 3294 3295 // Load receiver from stack slot. 3296 Register Rrecv = Rscratch2; 3297 Register Rnum_params = Rrecv; 3298 3299 __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod); 3300 __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params); 3301 3302 // Get return address. 3303 Register Rtable_addr = Rscratch1, 3304 Rret_addr = Rflags, 3305 Rret_type = Rret_addr; 3306 // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value. 3307 __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits); 3308 __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table()); 3309 __ sldi(Rret_type, Rret_type, LogBytesPerWord); 3310 __ ldx(Rret_addr, Rret_type, Rtable_addr); 3311 3312 // Load receiver and receiver NULL check. 3313 __ load_receiver(Rnum_params, Rrecv); 3314 __ null_check_throw(Rrecv, -1, Rscratch1); 3315 3316 __ profile_final_call(Rrecv, Rscratch1); 3317 3318 // Do the call. 3319 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2); 3320} 3321 3322void TemplateTable::invokespecial(int byte_no) { 3323 assert(byte_no == f1_byte, "use this argument"); 3324 transition(vtos, vtos); 3325 3326 Register Rtable_addr = R3_ARG1, 3327 Rret_addr = R4_ARG2, 3328 Rflags = R5_ARG3, 3329 Rreceiver = R6_ARG4, 3330 Rmethod = R31; 3331 3332 prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1); 3333 3334 // Receiver NULL check. 3335 __ null_check_throw(Rreceiver, -1, R11_scratch1); 3336 3337 __ profile_call(R11_scratch1, R12_scratch2); 3338 __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2); 3339} 3340 3341void TemplateTable::invokestatic(int byte_no) { 3342 assert(byte_no == f1_byte, "use this argument"); 3343 transition(vtos, vtos); 3344 3345 Register Rtable_addr = R3_ARG1, 3346 Rret_addr = R4_ARG2, 3347 Rflags = R5_ARG3; 3348 3349 prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1); 3350 3351 __ profile_call(R11_scratch1, R12_scratch2); 3352 __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2); 3353} 3354 3355void TemplateTable::invokeinterface_object_method(Register Rrecv_klass, 3356 Register Rret, 3357 Register Rflags, 3358 Register Rindex, 3359 Register Rtemp1, 3360 Register Rtemp2) { 3361 3362 assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2); 3363 Label LnotFinal; 3364 3365 // Check for vfinal. 3366 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift); 3367 __ bfalse(CCR0, LnotFinal); 3368 3369 Register Rscratch = Rflags; // Rflags is dead now. 3370 3371 // Final call case. 3372 __ profile_final_call(Rtemp1, Rscratch); 3373 // Do the final call - the index (f2) contains the method. 3374 __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */); 3375 3376 // Non-final callc case. 3377 __ bind(LnotFinal); 3378 __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false); 3379 generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch); 3380} 3381 3382void TemplateTable::invokeinterface(int byte_no) { 3383 assert(byte_no == f1_byte, "use this argument"); 3384 transition(vtos, vtos); 3385 3386 const Register Rscratch1 = R11_scratch1, 3387 Rscratch2 = R12_scratch2, 3388 Rscratch3 = R9_ARG7, 3389 Rscratch4 = R10_ARG8, 3390 Rtable_addr = Rscratch2, 3391 Rinterface_klass = R5_ARG3, 3392 Rret_type = R8_ARG6, 3393 Rret_addr = Rret_type, 3394 Rindex = R6_ARG4, 3395 Rreceiver = R4_ARG2, 3396 Rrecv_klass = Rreceiver, 3397 Rflags = R7_ARG5; 3398 3399 prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1); 3400 3401 // Get receiver klass. 3402 __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3); 3403 __ load_klass(Rrecv_klass, Rreceiver); 3404 3405 // Check corner case object method. 3406 Label LobjectMethod; 3407 3408 __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift); 3409 __ btrue(CCR0, LobjectMethod); 3410 3411 // Fallthrough: The normal invokeinterface case. 3412 __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false); 3413 3414 // Find entry point to call. 3415 Label Lthrow_icc, Lthrow_ame; 3416 // Result will be returned in Rindex. 3417 __ mr(Rscratch4, Rrecv_klass); 3418 __ mr(Rscratch3, Rindex); 3419 __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc); 3420 3421 __ cmpdi(CCR0, Rindex, 0); 3422 __ beq(CCR0, Lthrow_ame); 3423 // Found entry. Jump off! 3424 __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2); 3425 3426 // Vtable entry was NULL => Throw abstract method error. 3427 __ bind(Lthrow_ame); 3428 __ mr(Rrecv_klass, Rscratch4); 3429 __ mr(Rindex, Rscratch3); 3430 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 3431 3432 // Interface was not found => Throw incompatible class change error. 3433 __ bind(Lthrow_icc); 3434 __ mr(Rrecv_klass, Rscratch4); 3435 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError)); 3436 3437 __ should_not_reach_here(); 3438 3439 // Special case of invokeinterface called for virtual method of 3440 // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details: 3441 // The invokeinterface was rewritten to a invokevirtual, hence we have 3442 // to handle this corner case. This code isn't produced by javac, but could 3443 // be produced by another compliant java compiler. 3444 __ bind(LobjectMethod); 3445 invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2); 3446} 3447 3448void TemplateTable::invokedynamic(int byte_no) { 3449 transition(vtos, vtos); 3450 3451 const Register Rret_addr = R3_ARG1, 3452 Rflags = R4_ARG2, 3453 Rmethod = R22_tmp2, 3454 Rscratch1 = R11_scratch1, 3455 Rscratch2 = R12_scratch2; 3456 3457 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2); 3458 3459 // Profile this call. 3460 __ profile_call(Rscratch1, Rscratch2); 3461 3462 // Off we go. With the new method handles, we don't jump to a method handle 3463 // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens 3464 // to be the callsite object the bootstrap method returned. This is passed to a 3465 // "link" method which does the dispatch (Most likely just grabs the MH stored 3466 // inside the callsite and does an invokehandle). 3467 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3468} 3469 3470void TemplateTable::invokehandle(int byte_no) { 3471 transition(vtos, vtos); 3472 3473 const Register Rret_addr = R3_ARG1, 3474 Rflags = R4_ARG2, 3475 Rrecv = R5_ARG3, 3476 Rmethod = R22_tmp2, 3477 Rscratch1 = R11_scratch1, 3478 Rscratch2 = R12_scratch2; 3479 3480 prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2); 3481 __ verify_method_ptr(Rmethod); 3482 __ null_check_throw(Rrecv, -1, Rscratch2); 3483 3484 __ profile_final_call(Rrecv, Rscratch1); 3485 3486 // Still no call from handle => We call the method handle interpreter here. 3487 __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */); 3488} 3489 3490// ============================================================================= 3491// Allocation 3492 3493// Puts allocated obj ref onto the expression stack. 3494void TemplateTable::_new() { 3495 transition(vtos, atos); 3496 3497 Label Lslow_case, 3498 Ldone, 3499 Linitialize_header, 3500 Lallocate_shared, 3501 Linitialize_object; // Including clearing the fields. 3502 3503 const Register RallocatedObject = R17_tos, 3504 RinstanceKlass = R9_ARG7, 3505 Rscratch = R11_scratch1, 3506 Roffset = R8_ARG6, 3507 Rinstance_size = Roffset, 3508 Rcpool = R4_ARG2, 3509 Rtags = R3_ARG1, 3510 Rindex = R5_ARG3; 3511 3512 const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode; 3513 3514 // -------------------------------------------------------------------------- 3515 // Check if fast case is possible. 3516 3517 // Load pointers to const pool and const pool's tags array. 3518 __ get_cpool_and_tags(Rcpool, Rtags); 3519 // Load index of constant pool entry. 3520 __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned); 3521 3522 if (UseTLAB) { 3523 // Make sure the class we're about to instantiate has been resolved 3524 // This is done before loading instanceKlass to be consistent with the order 3525 // how Constant Pool is updated (see ConstantPoolCache::klass_at_put). 3526 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3527 __ lbzx(Rtags, Rindex, Rtags); 3528 3529 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3530 __ bne(CCR0, Lslow_case); 3531 3532 // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord). 3533 __ sldi(Roffset, Rindex, LogBytesPerWord); 3534 __ addi(Rscratch, Rcpool, sizeof(ConstantPool)); 3535 __ isync(); // Order load of instance Klass wrt. tags. 3536 __ ldx(RinstanceKlass, Roffset, Rscratch); 3537 3538 // Make sure klass is fully initialized and get instance_size. 3539 __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass); 3540 __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass); 3541 3542 __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized); 3543 // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class. 3544 __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0? 3545 3546 __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized? 3547 __ beq(CCR0, Lslow_case); 3548 3549 // -------------------------------------------------------------------------- 3550 // Fast case: 3551 // Allocate the instance. 3552 // 1) Try to allocate in the TLAB. 3553 // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden. 3554 // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.). 3555 3556 Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits. 3557 Register RnewTopValue = R6_ARG4; 3558 Register RendValue = R7_ARG5; 3559 3560 // Check if we can allocate in the TLAB. 3561 __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3562 __ ld(RendValue, in_bytes(JavaThread::tlab_end_offset()), R16_thread); 3563 3564 __ add(RnewTopValue, Rinstance_size, RoldTopValue); 3565 3566 // If there is enough space, we do not CAS and do not clear. 3567 __ cmpld(CCR0, RnewTopValue, RendValue); 3568 __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case); 3569 3570 __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread); 3571 3572 if (ZeroTLAB) { 3573 // The fields have already been cleared. 3574 __ b(Linitialize_header); 3575 } else { 3576 // Initialize both the header and fields. 3577 __ b(Linitialize_object); 3578 } 3579 3580 // Fall through: TLAB was too small. 3581 if (allow_shared_alloc) { 3582 Register RtlabWasteLimitValue = R10_ARG8; 3583 Register RfreeValue = RnewTopValue; 3584 3585 __ bind(Lallocate_shared); 3586 // Check if tlab should be discarded (refill_waste_limit >= free). 3587 __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3588 __ subf(RfreeValue, RoldTopValue, RendValue); 3589 __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords 3590 __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue); 3591 __ bge(CCR0, Lslow_case); 3592 3593 // Increment waste limit to prevent getting stuck on this slow path. 3594 __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment()); 3595 __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread); 3596 } 3597 // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case); 3598 } 3599 // else: Always go the slow path. 3600 3601 // -------------------------------------------------------------------------- 3602 // slow case 3603 __ bind(Lslow_case); 3604 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex); 3605 3606 if (UseTLAB) { 3607 __ b(Ldone); 3608 // -------------------------------------------------------------------------- 3609 // Init1: Zero out newly allocated memory. 3610 3611 if (!ZeroTLAB || allow_shared_alloc) { 3612 // Clear object fields. 3613 __ bind(Linitialize_object); 3614 3615 // Initialize remaining object fields. 3616 Register Rbase = Rtags; 3617 __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc)); 3618 __ addi(Rbase, RallocatedObject, sizeof(oopDesc)); 3619 __ srdi(Rinstance_size, Rinstance_size, 3); 3620 3621 // Clear out object skipping header. Takes also care of the zero length case. 3622 __ clear_memory_doubleword(Rbase, Rinstance_size); 3623 // fallthru: __ b(Linitialize_header); 3624 } 3625 3626 // -------------------------------------------------------------------------- 3627 // Init2: Initialize the header: mark, klass 3628 __ bind(Linitialize_header); 3629 3630 // Init mark. 3631 if (UseBiasedLocking) { 3632 __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass); 3633 } else { 3634 __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0); 3635 } 3636 __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject); 3637 3638 // Init klass. 3639 __ store_klass_gap(RallocatedObject); 3640 __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms) 3641 3642 // Check and trigger dtrace event. 3643 { 3644 SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes); 3645 __ push(atos); 3646 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)); 3647 __ pop(atos); 3648 } 3649 } 3650 3651 // continue 3652 __ bind(Ldone); 3653 3654 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3655 __ membar(Assembler::StoreStore); 3656} 3657 3658void TemplateTable::newarray() { 3659 transition(itos, atos); 3660 3661 __ lbz(R4, 1, R14_bcp); 3662 __ extsw(R5, R17_tos); 3663 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */); 3664 3665 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3666 __ membar(Assembler::StoreStore); 3667} 3668 3669void TemplateTable::anewarray() { 3670 transition(itos, atos); 3671 3672 __ get_constant_pool(R4); 3673 __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned); 3674 __ extsw(R6, R17_tos); // size 3675 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */); 3676 3677 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3678 __ membar(Assembler::StoreStore); 3679} 3680 3681// Allocate a multi dimensional array 3682void TemplateTable::multianewarray() { 3683 transition(vtos, atos); 3684 3685 Register Rptr = R31; // Needs to survive C call. 3686 3687 // Put ndims * wordSize into frame temp slot 3688 __ lbz(Rptr, 3, R14_bcp); 3689 __ sldi(Rptr, Rptr, Interpreter::logStackElementSize); 3690 // Esp points past last_dim, so set to R4 to first_dim address. 3691 __ add(R4, Rptr, R15_esp); 3692 call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */); 3693 // Pop all dimensions off the stack. 3694 __ add(R15_esp, Rptr, R15_esp); 3695 3696 // Must prevent reordering of stores for object initialization with stores that publish the new object. 3697 __ membar(Assembler::StoreStore); 3698} 3699 3700void TemplateTable::arraylength() { 3701 transition(atos, itos); 3702 3703 Label LnoException; 3704 __ verify_oop(R17_tos); 3705 __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1); 3706 __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos); 3707} 3708 3709// ============================================================================ 3710// Typechecks 3711 3712void TemplateTable::checkcast() { 3713 transition(atos, atos); 3714 3715 Label Ldone, Lis_null, Lquicked, Lresolved; 3716 Register Roffset = R6_ARG4, 3717 RobjKlass = R4_ARG2, 3718 RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register. 3719 Rcpool = R11_scratch1, 3720 Rtags = R12_scratch2; 3721 3722 // Null does not pass. 3723 __ cmpdi(CCR0, R17_tos, 0); 3724 __ beq(CCR0, Lis_null); 3725 3726 // Get constant pool tag to find out if the bytecode has already been "quickened". 3727 __ get_cpool_and_tags(Rcpool, Rtags); 3728 3729 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3730 3731 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3732 __ lbzx(Rtags, Rtags, Roffset); 3733 3734 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3735 __ beq(CCR0, Lquicked); 3736 3737 // Call into the VM to "quicken" instanceof. 3738 __ push_ptr(); // for GC 3739 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3740 __ get_vm_result_2(RspecifiedKlass); 3741 __ pop_ptr(); // Restore receiver. 3742 __ b(Lresolved); 3743 3744 // Extract target class from constant pool. 3745 __ bind(Lquicked); 3746 __ sldi(Roffset, Roffset, LogBytesPerWord); 3747 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3748 __ isync(); // Order load of specified Klass wrt. tags. 3749 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3750 3751 // Do the checkcast. 3752 __ bind(Lresolved); 3753 // Get value klass in RobjKlass. 3754 __ load_klass(RobjKlass, R17_tos); 3755 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3756 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3757 3758 // Not a subtype; so must throw exception 3759 // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention. 3760 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry); 3761 __ mtctr(R11_scratch1); 3762 __ bctr(); 3763 3764 // Profile the null case. 3765 __ align(32, 12); 3766 __ bind(Lis_null); 3767 __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch. 3768 3769 __ align(32, 12); 3770 __ bind(Ldone); 3771} 3772 3773// Output: 3774// - tos == 0: Obj was null or not an instance of class. 3775// - tos == 1: Obj was an instance of class. 3776void TemplateTable::instanceof() { 3777 transition(atos, itos); 3778 3779 Label Ldone, Lis_null, Lquicked, Lresolved; 3780 Register Roffset = R5_ARG3, 3781 RobjKlass = R4_ARG2, 3782 RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register. 3783 Rcpool = R11_scratch1, 3784 Rtags = R12_scratch2; 3785 3786 // Null does not pass. 3787 __ cmpdi(CCR0, R17_tos, 0); 3788 __ beq(CCR0, Lis_null); 3789 3790 // Get constant pool tag to find out if the bytecode has already been "quickened". 3791 __ get_cpool_and_tags(Rcpool, Rtags); 3792 3793 __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned); 3794 3795 __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes()); 3796 __ lbzx(Rtags, Rtags, Roffset); 3797 3798 __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class); 3799 __ beq(CCR0, Lquicked); 3800 3801 // Call into the VM to "quicken" instanceof. 3802 __ push_ptr(); // for GC 3803 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 3804 __ get_vm_result_2(RspecifiedKlass); 3805 __ pop_ptr(); // Restore receiver. 3806 __ b(Lresolved); 3807 3808 // Extract target class from constant pool. 3809 __ bind(Lquicked); 3810 __ sldi(Roffset, Roffset, LogBytesPerWord); 3811 __ addi(Rcpool, Rcpool, sizeof(ConstantPool)); 3812 __ isync(); // Order load of specified Klass wrt. tags. 3813 __ ldx(RspecifiedKlass, Rcpool, Roffset); 3814 3815 // Do the checkcast. 3816 __ bind(Lresolved); 3817 // Get value klass in RobjKlass. 3818 __ load_klass(RobjKlass, R17_tos); 3819 // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure. 3820 __ li(R17_tos, 1); 3821 __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone); 3822 __ li(R17_tos, 0); 3823 3824 if (ProfileInterpreter) { 3825 __ b(Ldone); 3826 } 3827 3828 // Profile the null case. 3829 __ align(32, 12); 3830 __ bind(Lis_null); 3831 __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch. 3832 3833 __ align(32, 12); 3834 __ bind(Ldone); 3835} 3836 3837// ============================================================================= 3838// Breakpoints 3839 3840void TemplateTable::_breakpoint() { 3841 transition(vtos, vtos); 3842 3843 // Get the unpatched byte code. 3844 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp); 3845 __ mr(R31, R3_RET); 3846 3847 // Post the breakpoint event. 3848 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp); 3849 3850 // Complete the execution of original bytecode. 3851 __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos)); 3852} 3853 3854// ============================================================================= 3855// Exceptions 3856 3857void TemplateTable::athrow() { 3858 transition(atos, vtos); 3859 3860 // Exception oop is in tos 3861 __ verify_oop(R17_tos); 3862 3863 __ null_check_throw(R17_tos, -1, R11_scratch1); 3864 3865 // Throw exception interpreter entry expects exception oop to be in R3. 3866 __ mr(R3_RET, R17_tos); 3867 __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry()); 3868 __ mtctr(R11_scratch1); 3869 __ bctr(); 3870} 3871 3872// ============================================================================= 3873// Synchronization 3874// Searches the basic object lock list on the stack for a free slot 3875// and uses it to lock the obect in tos. 3876// 3877// Recursive locking is enabled by exiting the search if the same 3878// object is already found in the list. Thus, a new basic lock obj lock 3879// is allocated "higher up" in the stack and thus is found first 3880// at next monitor exit. 3881void TemplateTable::monitorenter() { 3882 transition(atos, vtos); 3883 3884 __ verify_oop(R17_tos); 3885 3886 Register Rcurrent_monitor = R11_scratch1, 3887 Rcurrent_obj = R12_scratch2, 3888 Robj_to_lock = R17_tos, 3889 Rscratch1 = R3_ARG1, 3890 Rscratch2 = R4_ARG2, 3891 Rscratch3 = R5_ARG3, 3892 Rcurrent_obj_addr = R6_ARG4; 3893 3894 // ------------------------------------------------------------------------------ 3895 // Null pointer exception. 3896 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 3897 3898 // Try to acquire a lock on the object. 3899 // Repeat until succeeded (i.e., until monitorenter returns true). 3900 3901 // ------------------------------------------------------------------------------ 3902 // Find a free slot in the monitor block. 3903 Label Lfound, Lexit, Lallocate_new; 3904 ConditionRegister found_free_slot = CCR0, 3905 found_same_obj = CCR1, 3906 reached_limit = CCR6; 3907 { 3908 Label Lloop, Lentry; 3909 Register Rlimit = Rcurrent_monitor; 3910 3911 // Set up search loop - start with topmost monitor. 3912 __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor); 3913 3914 __ ld(Rlimit, 0, R1_SP); 3915 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base 3916 3917 // Check if any slot is present => short cut to allocation if not. 3918 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3919 __ bgt(reached_limit, Lallocate_new); 3920 3921 // Pre-load topmost slot. 3922 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3923 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3924 // The search loop. 3925 __ bind(Lloop); 3926 // Found free slot? 3927 __ cmpdi(found_free_slot, Rcurrent_obj, 0); 3928 // Is this entry for same obj? If so, stop the search and take the found 3929 // free slot or allocate a new one to enable recursive locking. 3930 __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock); 3931 __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit); 3932 __ beq(found_free_slot, Lexit); 3933 __ beq(found_same_obj, Lallocate_new); 3934 __ bgt(reached_limit, Lallocate_new); 3935 // Check if last allocated BasicLockObj reached. 3936 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 3937 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 3938 // Next iteration if unchecked BasicObjectLocks exist on the stack. 3939 __ b(Lloop); 3940 } 3941 3942 // ------------------------------------------------------------------------------ 3943 // Check if we found a free slot. 3944 __ bind(Lexit); 3945 3946 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 3947 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize); 3948 __ b(Lfound); 3949 3950 // We didn't find a free BasicObjLock => allocate one. 3951 __ align(32, 12); 3952 __ bind(Lallocate_new); 3953 __ add_monitor_to_stack(false, Rscratch1, Rscratch2); 3954 __ mr(Rcurrent_monitor, R26_monitor); 3955 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 3956 3957 // ------------------------------------------------------------------------------ 3958 // We now have a slot to lock. 3959 __ bind(Lfound); 3960 3961 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly. 3962 // The object has already been poped from the stack, so the expression stack looks correct. 3963 __ addi(R14_bcp, R14_bcp, 1); 3964 3965 __ std(Robj_to_lock, 0, Rcurrent_obj_addr); 3966 __ lock_object(Rcurrent_monitor, Robj_to_lock); 3967 3968 // Check if there's enough space on the stack for the monitors after locking. 3969 Label Lskip_stack_check; 3970 // Optimization: If the monitors stack section is less then a std page size (4K) don't run 3971 // the stack check. There should be enough shadow pages to fit that in. 3972 __ ld(Rscratch3, 0, R1_SP); 3973 __ sub(Rscratch3, Rscratch3, R26_monitor); 3974 __ cmpdi(CCR0, Rscratch3, 4*K); 3975 __ blt(CCR0, Lskip_stack_check); 3976 3977 DEBUG_ONLY(__ untested("stack overflow check during monitor enter");) 3978 __ li(Rscratch1, 0); 3979 __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2); 3980 3981 __ align(32, 12); 3982 __ bind(Lskip_stack_check); 3983 3984 // The bcp has already been incremented. Just need to dispatch to next instruction. 3985 __ dispatch_next(vtos); 3986} 3987 3988void TemplateTable::monitorexit() { 3989 transition(atos, vtos); 3990 __ verify_oop(R17_tos); 3991 3992 Register Rcurrent_monitor = R11_scratch1, 3993 Rcurrent_obj = R12_scratch2, 3994 Robj_to_lock = R17_tos, 3995 Rcurrent_obj_addr = R3_ARG1, 3996 Rlimit = R4_ARG2; 3997 Label Lfound, Lillegal_monitor_state; 3998 3999 // Check corner case: unbalanced monitorEnter / Exit. 4000 __ ld(Rlimit, 0, R1_SP); 4001 __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 4002 4003 // Null pointer check. 4004 __ null_check_throw(Robj_to_lock, -1, R11_scratch1); 4005 4006 __ cmpld(CCR0, R26_monitor, Rlimit); 4007 __ bgt(CCR0, Lillegal_monitor_state); 4008 4009 // Find the corresponding slot in the monitors stack section. 4010 { 4011 Label Lloop; 4012 4013 // Start with topmost monitor. 4014 __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes()); 4015 __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes()); 4016 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4017 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4018 4019 __ bind(Lloop); 4020 // Is this entry for same obj? 4021 __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock); 4022 __ beq(CCR0, Lfound); 4023 4024 // Check if last allocated BasicLockObj reached. 4025 4026 __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 4027 __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit); 4028 __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize); 4029 4030 // Next iteration if unchecked BasicObjectLocks exist on the stack. 4031 __ ble(CCR0, Lloop); 4032 } 4033 4034 // Fell through without finding the basic obj lock => throw up! 4035 __ bind(Lillegal_monitor_state); 4036 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 4037 __ should_not_reach_here(); 4038 4039 __ align(32, 12); 4040 __ bind(Lfound); 4041 __ addi(Rcurrent_monitor, Rcurrent_obj_addr, 4042 -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes()); 4043 __ unlock_object(Rcurrent_monitor); 4044} 4045 4046// ============================================================================ 4047// Wide bytecodes 4048 4049// Wide instructions. Simply redirects to the wide entry point for that instruction. 4050void TemplateTable::wide() { 4051 transition(vtos, vtos); 4052 4053 const Register Rtable = R11_scratch1, 4054 Rindex = R12_scratch2, 4055 Rtmp = R0; 4056 4057 __ lbz(Rindex, 1, R14_bcp); 4058 4059 __ load_dispatch_table(Rtable, Interpreter::_wentry_point); 4060 4061 __ slwi(Rindex, Rindex, LogBytesPerWord); 4062 __ ldx(Rtmp, Rtable, Rindex); 4063 __ mtctr(Rtmp); 4064 __ bctr(); 4065 // Note: the bcp increment step is part of the individual wide bytecode implementations. 4066} 4067#endif // !CC_INTERP 4068