interp_masm_ppc_64.cpp revision 7863:96fec51ac851
1210311Sjmallett/* 2210311Sjmallett * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved. 3210311Sjmallett * Copyright 2012, 2015 SAP AG. All rights reserved. 4210311Sjmallett * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5210311Sjmallett * 6210311Sjmallett * This code is free software; you can redistribute it and/or modify it 7210311Sjmallett * under the terms of the GNU General Public License version 2 only, as 8210311Sjmallett * published by the Free Software Foundation. 9210311Sjmallett * 10210311Sjmallett * This code is distributed in the hope that it will be useful, but WITHOUT 11210311Sjmallett * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12210311Sjmallett * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13210311Sjmallett * version 2 for more details (a copy is included in the LICENSE file that 14210311Sjmallett * accompanied this code). 15210311Sjmallett * 16210311Sjmallett * You should have received a copy of the GNU General Public License version 17210311Sjmallett * 2 along with this work; if not, write to the Free Software Foundation, 18210311Sjmallett * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19210311Sjmallett * 20210311Sjmallett * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21210311Sjmallett * or visit www.oracle.com if you need additional information or have any 22210311Sjmallett * questions. 23210311Sjmallett * 24210311Sjmallett */ 25210311Sjmallett 26210311Sjmallett 27210311Sjmallett#include "precompiled.hpp" 28210311Sjmallett#include "asm/macroAssembler.inline.hpp" 29210311Sjmallett#include "interp_masm_ppc_64.hpp" 30210311Sjmallett#include "interpreter/interpreterRuntime.hpp" 31210311Sjmallett#include "prims/jvmtiThreadState.hpp" 32210311Sjmallett#include "runtime/sharedRuntime.hpp" 33210311Sjmallett 34210311Sjmallett#ifdef PRODUCT 35210311Sjmallett#define BLOCK_COMMENT(str) // nothing 36210311Sjmallett#else 37210311Sjmallett#define BLOCK_COMMENT(str) block_comment(str) 38210311Sjmallett#endif 39210311Sjmallett 40216092Sjmallettvoid InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) { 41210311Sjmallett#ifdef CC_INTERP 42210311Sjmallett address exception_entry = StubRoutines::throw_NullPointerException_at_call_entry(); 43210311Sjmallett#else 44210311Sjmallett address exception_entry = Interpreter::throw_NullPointerException_entry(); 45210311Sjmallett#endif 46210311Sjmallett MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry); 47210311Sjmallett} 48210311Sjmallett 49210311Sjmallettvoid InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) { 50210311Sjmallett assert(entry, "Entry must have been generated by now"); 51210311Sjmallett if (is_within_range_of_b(entry, pc())) { 52210311Sjmallett b(entry); 53210311Sjmallett } else { 54210311Sjmallett load_const_optimized(Rscratch, entry, R0); 55210311Sjmallett mtctr(Rscratch); 56210311Sjmallett bctr(); 57210311Sjmallett } 58210311Sjmallett} 59210311Sjmallett 60210311Sjmallett#ifndef CC_INTERP 61210311Sjmallett 62210311Sjmallettvoid InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 63210311Sjmallett Register bytecode = R12_scratch2; 64210311Sjmallett if (bcp_incr != 0) { 65210311Sjmallett lbzu(bytecode, bcp_incr, R14_bcp); 66210311Sjmallett } else { 67210311Sjmallett lbz(bytecode, 0, R14_bcp); 68210311Sjmallett } 69210311Sjmallett 70210311Sjmallett dispatch_Lbyte_code(state, bytecode, Interpreter::dispatch_table(state)); 71210311Sjmallett} 72210311Sjmallett 73210311Sjmallettvoid InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 74210311Sjmallett // Load current bytecode. 75210311Sjmallett Register bytecode = R12_scratch2; 76210311Sjmallett lbz(bytecode, 0, R14_bcp); 77210311Sjmallett dispatch_Lbyte_code(state, bytecode, table); 78210311Sjmallett} 79210311Sjmallett 80210311Sjmallett// Dispatch code executed in the prolog of a bytecode which does not do it's 81210311Sjmallett// own dispatch. The dispatch address is computed and placed in R24_dispatch_addr. 82213090Sjmallettvoid InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 83213090Sjmallett Register bytecode = R12_scratch2; 84216092Sjmallett lbz(bytecode, bcp_incr, R14_bcp); 85216092Sjmallett 86216092Sjmallett load_dispatch_table(R24_dispatch_addr, Interpreter::dispatch_table(state)); 87216092Sjmallett 88216092Sjmallett sldi(bytecode, bytecode, LogBytesPerWord); 89210311Sjmallett ldx(R24_dispatch_addr, R24_dispatch_addr, bytecode); 90210311Sjmallett} 91210311Sjmallett 92210311Sjmallett// Dispatch code executed in the epilog of a bytecode which does not do it's 93216092Sjmallett// own dispatch. The dispatch address in R24_dispatch_addr is used for the 94216092Sjmallett// dispatch. 95216092Sjmallettvoid InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 96210311Sjmallett mtctr(R24_dispatch_addr); 97210311Sjmallett addi(R14_bcp, R14_bcp, bcp_incr); 98210311Sjmallett bctr(); 99216092Sjmallett} 100216092Sjmallett 101216092Sjmallettvoid InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 102210311Sjmallett assert(scratch_reg != R0, "can't use R0 as scratch_reg here"); 103210311Sjmallett if (JvmtiExport::can_pop_frame()) { 104210311Sjmallett Label L; 105210311Sjmallett 106210311Sjmallett // Check the "pending popframe condition" flag in the current thread. 107210311Sjmallett lwz(scratch_reg, in_bytes(JavaThread::popframe_condition_offset()), R16_thread); 108210311Sjmallett 109210311Sjmallett // Initiate popframe handling only if it is not already being 110210311Sjmallett // processed. If the flag has the popframe_processing bit set, it 111210311Sjmallett // means that this code is called *during* popframe handling - we 112210311Sjmallett // don't want to reenter. 113210311Sjmallett andi_(R0, scratch_reg, JavaThread::popframe_pending_bit); 114210311Sjmallett beq(CCR0, L); 115210311Sjmallett 116210311Sjmallett andi_(R0, scratch_reg, JavaThread::popframe_processing_bit); 117210311Sjmallett bne(CCR0, L); 118210311Sjmallett 119210311Sjmallett // Call the Interpreter::remove_activation_preserving_args_entry() 120210311Sjmallett // func to get the address of the same-named entrypoint in the 121210311Sjmallett // generated interpreter code. 122210311Sjmallett#if defined(ABI_ELFv2) 123210311Sjmallett call_c(CAST_FROM_FN_PTR(address, 124210311Sjmallett Interpreter::remove_activation_preserving_args_entry), 125210311Sjmallett relocInfo::none); 126210311Sjmallett#else 127210311Sjmallett call_c(CAST_FROM_FN_PTR(FunctionDescriptor*, 128210311Sjmallett Interpreter::remove_activation_preserving_args_entry), 129210311Sjmallett relocInfo::none); 130210311Sjmallett#endif 131210311Sjmallett 132210311Sjmallett // Jump to Interpreter::_remove_activation_preserving_args_entry. 133210311Sjmallett mtctr(R3_RET); 134210311Sjmallett bctr(); 135210311Sjmallett 136210311Sjmallett align(32, 12); 137210311Sjmallett bind(L); 138210311Sjmallett } 139210311Sjmallett} 140210311Sjmallett 141210311Sjmallettvoid InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 142210311Sjmallett const Register Rthr_state_addr = scratch_reg; 143210311Sjmallett if (JvmtiExport::can_force_early_return()) { 144210311Sjmallett Label Lno_early_ret; 145210311Sjmallett ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 146210311Sjmallett cmpdi(CCR0, Rthr_state_addr, 0); 147210311Sjmallett beq(CCR0, Lno_early_ret); 148210311Sjmallett 149210311Sjmallett lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr); 150210311Sjmallett cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending); 151210311Sjmallett bne(CCR0, Lno_early_ret); 152210311Sjmallett 153210311Sjmallett // Jump to Interpreter::_earlyret_entry. 154210311Sjmallett lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr); 155210311Sjmallett call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry)); 156210311Sjmallett mtlr(R3_RET); 157210311Sjmallett blr(); 158210311Sjmallett 159210311Sjmallett align(32, 12); 160210311Sjmallett bind(Lno_early_ret); 161210311Sjmallett } 162210311Sjmallett} 163210311Sjmallett 164210311Sjmallettvoid InterpreterMacroAssembler::load_earlyret_value(TosState state, Register Rscratch1) { 165210311Sjmallett const Register RjvmtiState = Rscratch1; 166210311Sjmallett const Register Rscratch2 = R0; 167210311Sjmallett 168210311Sjmallett ld(RjvmtiState, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread); 169210311Sjmallett li(Rscratch2, 0); 170210311Sjmallett 171210311Sjmallett switch (state) { 172210311Sjmallett case atos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState); 173210311Sjmallett std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState); 174210311Sjmallett break; 175210311Sjmallett case ltos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState); 176210311Sjmallett break; 177210311Sjmallett case btos: // fall through 178210311Sjmallett case ctos: // fall through 179210311Sjmallett case stos: // fall through 180210311Sjmallett case itos: lwz(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState); 181210311Sjmallett break; 182210311Sjmallett case ftos: lfs(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState); 183210311Sjmallett break; 184210311Sjmallett case dtos: lfd(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState); 185210311Sjmallett break; 186210311Sjmallett case vtos: break; 187210311Sjmallett default : ShouldNotReachHere(); 188210311Sjmallett } 189210311Sjmallett 190210311Sjmallett // Clean up tos value in the jvmti thread state. 191210311Sjmallett std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState); 192210311Sjmallett // Set tos state field to illegal value. 193210311Sjmallett li(Rscratch2, ilgl); 194210311Sjmallett stw(Rscratch2, in_bytes(JvmtiThreadState::earlyret_tos_offset()), RjvmtiState); 195210311Sjmallett} 196210311Sjmallett 197210311Sjmallett// Common code to dispatch and dispatch_only. 198210311Sjmallett// Dispatch value in Lbyte_code and increment Lbcp. 199210311Sjmallett 200210311Sjmallettvoid InterpreterMacroAssembler::load_dispatch_table(Register dst, address* table) { 201210311Sjmallett address table_base = (address)Interpreter::dispatch_table((TosState)0); 202210311Sjmallett intptr_t table_offs = (intptr_t)table - (intptr_t)table_base; 203210311Sjmallett if (is_simm16(table_offs)) { 204210311Sjmallett addi(dst, R25_templateTableBase, (int)table_offs); 205210311Sjmallett } else { 206210311Sjmallett load_const_optimized(dst, table, R0); 207210311Sjmallett } 208210311Sjmallett} 209210311Sjmallett 210210311Sjmallettvoid InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify) { 211216092Sjmallett if (verify) { 212210311Sjmallett unimplemented("dispatch_Lbyte_code: verify"); // See Sparc Implementation to implement this 213210311Sjmallett } 214210311Sjmallett 215210311Sjmallett#ifdef FAST_DISPATCH 216210311Sjmallett unimplemented("dispatch_Lbyte_code FAST_DISPATCH"); 217210311Sjmallett#else 218210311Sjmallett assert_different_registers(bytecode, R11_scratch1); 219210311Sjmallett 220210311Sjmallett // Calc dispatch table address. 221210311Sjmallett load_dispatch_table(R11_scratch1, table); 222216092Sjmallett 223216092Sjmallett sldi(R12_scratch2, bytecode, LogBytesPerWord); 224216092Sjmallett ldx(R11_scratch1, R11_scratch1, R12_scratch2); 225210311Sjmallett 226210311Sjmallett // Jump off! 227210311Sjmallett mtctr(R11_scratch1); 228210311Sjmallett bctr(); 229210311Sjmallett#endif 230216092Sjmallett} 231216092Sjmallett 232216092Sjmallettvoid InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) { 233210311Sjmallett sldi(Rrecv_dst, Rparam_count, Interpreter::logStackElementSize); 234216092Sjmallett ldx(Rrecv_dst, Rrecv_dst, R15_esp); 235216092Sjmallett} 236216092Sjmallett 237210311Sjmallett// helpers for expression stack 238210311Sjmallett 239210311Sjmallettvoid InterpreterMacroAssembler::pop_i(Register r) { 240216092Sjmallett lwzu(r, Interpreter::stackElementSize, R15_esp); 241210311Sjmallett} 242210311Sjmallett 243210311Sjmallettvoid InterpreterMacroAssembler::pop_ptr(Register r) { 244210311Sjmallett ldu(r, Interpreter::stackElementSize, R15_esp); 245210311Sjmallett} 246210311Sjmallett 247210311Sjmallettvoid InterpreterMacroAssembler::pop_l(Register r) { 248210311Sjmallett ld(r, Interpreter::stackElementSize, R15_esp); 249210311Sjmallett addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize); 250210311Sjmallett} 251210311Sjmallett 252210311Sjmallettvoid InterpreterMacroAssembler::pop_f(FloatRegister f) { 253210311Sjmallett lfsu(f, Interpreter::stackElementSize, R15_esp); 254210311Sjmallett} 255210311Sjmallett 256210311Sjmallettvoid InterpreterMacroAssembler::pop_d(FloatRegister f) { 257213090Sjmallett lfd(f, Interpreter::stackElementSize, R15_esp); 258213090Sjmallett addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize); 259213090Sjmallett} 260213090Sjmallett 261213090Sjmallettvoid InterpreterMacroAssembler::push_i(Register r) { 262213090Sjmallett stw(r, 0, R15_esp); 263213090Sjmallett addi(R15_esp, R15_esp, - Interpreter::stackElementSize ); 264213090Sjmallett} 265213090Sjmallett 266213090Sjmallettvoid InterpreterMacroAssembler::push_ptr(Register r) { 267213090Sjmallett std(r, 0, R15_esp); 268213090Sjmallett addi(R15_esp, R15_esp, - Interpreter::stackElementSize ); 269213090Sjmallett} 270216092Sjmallett 271216092Sjmallettvoid InterpreterMacroAssembler::push_l(Register r) { 272216092Sjmallett std(r, - Interpreter::stackElementSize, R15_esp); 273216092Sjmallett addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize ); 274216092Sjmallett} 275216092Sjmallett 276216092Sjmallettvoid InterpreterMacroAssembler::push_f(FloatRegister f) { 277216092Sjmallett stfs(f, 0, R15_esp); 278216092Sjmallett addi(R15_esp, R15_esp, - Interpreter::stackElementSize ); 279216092Sjmallett} 280216092Sjmallett 281216092Sjmallettvoid InterpreterMacroAssembler::push_d(FloatRegister f) { 282216092Sjmallett stfd(f, - Interpreter::stackElementSize, R15_esp); 283216092Sjmallett addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize ); 284216092Sjmallett} 285216092Sjmallett 286216092Sjmallettvoid InterpreterMacroAssembler::push_2ptrs(Register first, Register second) { 287216092Sjmallett std(first, 0, R15_esp); 288216092Sjmallett std(second, -Interpreter::stackElementSize, R15_esp); 289216092Sjmallett addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize ); 290216092Sjmallett} 291216092Sjmallett 292216092Sjmallettvoid InterpreterMacroAssembler::push_l_pop_d(Register l, FloatRegister d) { 293216092Sjmallett std(l, 0, R15_esp); 294216092Sjmallett lfd(d, 0, R15_esp); 295216092Sjmallett} 296216092Sjmallett 297216092Sjmallettvoid InterpreterMacroAssembler::push_d_pop_l(FloatRegister d, Register l) { 298216092Sjmallett stfd(d, 0, R15_esp); 299216092Sjmallett ld(l, 0, R15_esp); 300216092Sjmallett} 301216092Sjmallett 302216092Sjmallettvoid InterpreterMacroAssembler::push(TosState state) { 303216092Sjmallett switch (state) { 304216092Sjmallett case atos: push_ptr(); break; 305216092Sjmallett case btos: 306216092Sjmallett case ctos: 307216092Sjmallett case stos: 308216092Sjmallett case itos: push_i(); break; 309216092Sjmallett case ltos: push_l(); break; 310216092Sjmallett case ftos: push_f(); break; 311216092Sjmallett case dtos: push_d(); break; 312216092Sjmallett case vtos: /* nothing to do */ break; 313216092Sjmallett default : ShouldNotReachHere(); 314210311Sjmallett } 315210311Sjmallett} 316210311Sjmallett 317210311Sjmallettvoid InterpreterMacroAssembler::pop(TosState state) { 318210311Sjmallett switch (state) { 319210311Sjmallett case atos: pop_ptr(); break; 320210311Sjmallett case btos: 321210311Sjmallett case ctos: 322210311Sjmallett case stos: 323210311Sjmallett case itos: pop_i(); break; 324210311Sjmallett case ltos: pop_l(); break; 325210311Sjmallett case ftos: pop_f(); break; 326210311Sjmallett case dtos: pop_d(); break; 327210311Sjmallett case vtos: /* nothing to do */ break; 328210311Sjmallett default : ShouldNotReachHere(); 329210311Sjmallett } 330210311Sjmallett verify_oop(R17_tos, state); 331210311Sjmallett} 332210311Sjmallett 333210311Sjmallettvoid InterpreterMacroAssembler::empty_expression_stack() { 334210311Sjmallett addi(R15_esp, R26_monitor, - Interpreter::stackElementSize); 335210311Sjmallett} 336210311Sjmallett 337210311Sjmallettvoid InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int bcp_offset, 338210311Sjmallett Register Rdst, 339210311Sjmallett signedOrNot is_signed) { 340210311Sjmallett#if defined(VM_LITTLE_ENDIAN) 341210311Sjmallett if (bcp_offset) { 342210311Sjmallett load_const_optimized(Rdst, bcp_offset); 343210311Sjmallett lhbrx(Rdst, R14_bcp, Rdst); 344216092Sjmallett } else { 345216092Sjmallett lhbrx(Rdst, R14_bcp); 346216092Sjmallett } 347216092Sjmallett if (is_signed == Signed) { 348216092Sjmallett extsh(Rdst, Rdst); 349216092Sjmallett } 350216092Sjmallett#else 351216092Sjmallett // Read Java big endian format. 352216092Sjmallett if (is_signed == Signed) { 353216092Sjmallett lha(Rdst, bcp_offset, R14_bcp); 354216092Sjmallett } else { 355216092Sjmallett lhz(Rdst, bcp_offset, R14_bcp); 356216092Sjmallett } 357216092Sjmallett#endif 358216092Sjmallett} 359216092Sjmallett 360216092Sjmallettvoid InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int bcp_offset, 361216092Sjmallett Register Rdst, 362216092Sjmallett signedOrNot is_signed) { 363216092Sjmallett#if defined(VM_LITTLE_ENDIAN) 364216092Sjmallett if (bcp_offset) { 365216092Sjmallett load_const_optimized(Rdst, bcp_offset); 366210311Sjmallett lwbrx(Rdst, R14_bcp, Rdst); 367210311Sjmallett } else { 368210311Sjmallett lwbrx(Rdst, R14_bcp); 369210311Sjmallett } 370210311Sjmallett if (is_signed == Signed) { 371210311Sjmallett extsw(Rdst, Rdst); 372210311Sjmallett } 373210311Sjmallett#else 374210311Sjmallett // Read Java big endian format. 375210311Sjmallett if (bcp_offset & 3) { // Offset unaligned? 376210311Sjmallett load_const_optimized(Rdst, bcp_offset); 377210311Sjmallett if (is_signed == Signed) { 378210311Sjmallett lwax(Rdst, R14_bcp, Rdst); 379210311Sjmallett } else { 380210311Sjmallett lwzx(Rdst, R14_bcp, Rdst); 381210311Sjmallett } 382210311Sjmallett } else { 383210311Sjmallett if (is_signed == Signed) { 384210311Sjmallett lwa(Rdst, bcp_offset, R14_bcp); 385210311Sjmallett } else { 386210311Sjmallett lwz(Rdst, bcp_offset, R14_bcp); 387210311Sjmallett } 388210311Sjmallett } 389210311Sjmallett#endif 390216092Sjmallett} 391210311Sjmallett 392216092Sjmallett 393216092Sjmallett// Load the constant pool cache index from the bytecode stream. 394216092Sjmallett// 395216092Sjmallett// Kills / writes: 396216092Sjmallett// - Rdst, Rscratch 397216092Sjmallettvoid InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) { 398216092Sjmallett assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 399216092Sjmallett // Cache index is always in the native format, courtesy of Rewriter. 400216092Sjmallett if (index_size == sizeof(u2)) { 401216092Sjmallett lhz(Rdst, bcp_offset, R14_bcp); 402216092Sjmallett } else if (index_size == sizeof(u4)) { 403216092Sjmallett if (bcp_offset & 3) { 404216092Sjmallett load_const_optimized(Rdst, bcp_offset); 405216092Sjmallett lwax(Rdst, R14_bcp, Rdst); 406216092Sjmallett } else { 407216092Sjmallett lwa(Rdst, bcp_offset, R14_bcp); 408216092Sjmallett } 409216092Sjmallett assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 410216092Sjmallett nand(Rdst, Rdst, Rdst); // convert to plain index 411216092Sjmallett } else if (index_size == sizeof(u1)) { 412216092Sjmallett lbz(Rdst, bcp_offset, R14_bcp); 413210311Sjmallett } else { 414210311Sjmallett ShouldNotReachHere(); 415210311Sjmallett } 416210311Sjmallett // Rdst now contains cp cache index. 417210311Sjmallett} 418210311Sjmallett 419210311Sjmallettvoid InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size) { 420210311Sjmallett get_cache_index_at_bcp(cache, bcp_offset, index_size); 421210311Sjmallett sldi(cache, cache, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord)); 422210311Sjmallett add(cache, R27_constPoolCache, cache); 423210311Sjmallett} 424210311Sjmallett 425210311Sjmallett// Load 4-byte signed or unsigned integer in Java format (that is, big-endian format) 426210311Sjmallett// from (Rsrc)+offset. 427210311Sjmallettvoid InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset, 428210311Sjmallett signedOrNot is_signed) { 429210311Sjmallett#if defined(VM_LITTLE_ENDIAN) 430210311Sjmallett if (offset) { 431210311Sjmallett load_const_optimized(Rdst, offset); 432210311Sjmallett lwbrx(Rdst, Rdst, Rsrc); 433210311Sjmallett } else { 434210311Sjmallett lwbrx(Rdst, Rsrc); 435210311Sjmallett } 436210311Sjmallett if (is_signed == Signed) { 437210311Sjmallett extsw(Rdst, Rdst); 438210311Sjmallett } 439210311Sjmallett#else 440210311Sjmallett if (is_signed == Signed) { 441210311Sjmallett lwa(Rdst, offset, Rsrc); 442210311Sjmallett } else { 443210311Sjmallett lwz(Rdst, offset, Rsrc); 444210311Sjmallett } 445210311Sjmallett#endif 446210311Sjmallett} 447210311Sjmallett 448210311Sjmallett// Load object from cpool->resolved_references(index). 449210311Sjmallettvoid InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index) { 450210311Sjmallett assert_different_registers(result, index); 451210311Sjmallett get_constant_pool(result); 452210311Sjmallett 453210311Sjmallett // Convert from field index to resolved_references() index and from 454210311Sjmallett // word index to byte offset. Since this is a java object, it can be compressed. 455210311Sjmallett Register tmp = index; // reuse 456210311Sjmallett sldi(tmp, index, LogBytesPerHeapOop); 457210311Sjmallett // Load pointer for resolved_references[] objArray. 458210311Sjmallett ld(result, ConstantPool::resolved_references_offset_in_bytes(), result); 459210311Sjmallett // JNIHandles::resolve(result) 460210311Sjmallett ld(result, 0, result); 461210311Sjmallett#ifdef ASSERT 462210311Sjmallett Label index_ok; 463210311Sjmallett lwa(R0, arrayOopDesc::length_offset_in_bytes(), result); 464210311Sjmallett sldi(R0, R0, LogBytesPerHeapOop); 465210311Sjmallett cmpd(CCR0, tmp, R0); 466210311Sjmallett blt(CCR0, index_ok); 467210311Sjmallett stop("resolved reference index out of bounds", 0x09256); 468210311Sjmallett bind(index_ok); 469210311Sjmallett#endif 470213090Sjmallett // Add in the index. 471216092Sjmallett add(result, tmp, result); 472216092Sjmallett load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 473216092Sjmallett} 474216092Sjmallett 475210311Sjmallett// Generate a subtype check: branch to ok_is_subtype if sub_klass is 476210311Sjmallett// a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2. 477210311Sjmallettvoid InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1, 478210311Sjmallett Register Rtmp2, Register Rtmp3, Label &ok_is_subtype) { 479210311Sjmallett // Profile the not-null value's klass. 480210311Sjmallett profile_typecheck(Rsub_klass, Rtmp1, Rtmp2); 481210311Sjmallett check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype); 482210311Sjmallett profile_typecheck_failed(Rtmp1, Rtmp2); 483210311Sjmallett} 484210311Sjmallett 485210311Sjmallettvoid InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) { 486210311Sjmallett Label done; 487210311Sjmallett sub(Rmem_frame_size, R1_SP, Rmem_frame_size); 488210311Sjmallett ld(Rscratch1, thread_(stack_overflow_limit)); 489 cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1); 490 bgt(CCR0/*is_stack_overflow*/, done); 491 492 // Load target address of the runtime stub. 493 assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order"); 494 load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0); 495 mtctr(Rscratch1); 496 // Restore caller_sp. 497#ifdef ASSERT 498 ld(Rscratch1, 0, R1_SP); 499 ld(R0, 0, R21_sender_SP); 500 cmpd(CCR0, R0, Rscratch1); 501 asm_assert_eq("backlink", 0x547); 502#endif // ASSERT 503 mr(R1_SP, R21_sender_SP); 504 bctr(); 505 506 align(32, 12); 507 bind(done); 508} 509 510// Separate these two to allow for delay slot in middle. 511// These are used to do a test and full jump to exception-throwing code. 512 513// Check that index is in range for array, then shift index by index_shift, 514// and put arrayOop + shifted_index into res. 515// Note: res is still shy of address by array offset into object. 516 517void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Register Rindex, int index_shift, Register Rtmp, Register Rres) { 518 // Check that index is in range for array, then shift index by index_shift, 519 // and put arrayOop + shifted_index into res. 520 // Note: res is still shy of address by array offset into object. 521 // Kills: 522 // - Rindex 523 // Writes: 524 // - Rres: Address that corresponds to the array index if check was successful. 525 verify_oop(Rarray); 526 const Register Rlength = R0; 527 const Register RsxtIndex = Rtmp; 528 Label LisNull, LnotOOR; 529 530 // Array nullcheck 531 if (!ImplicitNullChecks) { 532 cmpdi(CCR0, Rarray, 0); 533 beq(CCR0, LisNull); 534 } else { 535 null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex); 536 } 537 538 // Rindex might contain garbage in upper bits (remember that we don't sign extend 539 // during integer arithmetic operations). So kill them and put value into same register 540 // where ArrayIndexOutOfBounds would expect the index in. 541 rldicl(RsxtIndex, Rindex, 0, 32); // zero extend 32 bit -> 64 bit 542 543 // Index check 544 lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray); 545 cmplw(CCR0, Rindex, Rlength); 546 sldi(RsxtIndex, RsxtIndex, index_shift); 547 blt(CCR0, LnotOOR); 548 // Index should be in R17_tos, array should be in R4_ARG2. 549 mr(R17_tos, Rindex); 550 mr(R4_ARG2, Rarray); 551 load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry); 552 mtctr(Rtmp); 553 bctr(); 554 555 if (!ImplicitNullChecks) { 556 bind(LisNull); 557 load_dispatch_table(Rtmp, (address*)Interpreter::_throw_NullPointerException_entry); 558 mtctr(Rtmp); 559 bctr(); 560 } 561 562 align(32, 16); 563 bind(LnotOOR); 564 565 // Calc address 566 add(Rres, RsxtIndex, Rarray); 567} 568 569void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 570 // pop array 571 pop_ptr(array); 572 573 // check array 574 index_check_without_pop(array, index, index_shift, tmp, res); 575} 576 577void InterpreterMacroAssembler::get_const(Register Rdst) { 578 ld(Rdst, in_bytes(Method::const_offset()), R19_method); 579} 580 581void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 582 get_const(Rdst); 583 ld(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 584} 585 586void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 587 get_constant_pool(Rdst); 588 ld(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 589} 590 591void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 592 get_constant_pool(Rcpool); 593 ld(Rtags, ConstantPool::tags_offset_in_bytes(), Rcpool); 594} 595 596// Unlock if synchronized method. 597// 598// Unlock the receiver if this is a synchronized method. 599// Unlock any Java monitors from synchronized blocks. 600// 601// If there are locked Java monitors 602// If throw_monitor_exception 603// throws IllegalMonitorStateException 604// Else if install_monitor_exception 605// installs IllegalMonitorStateException 606// Else 607// no error processing 608void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 609 bool throw_monitor_exception, 610 bool install_monitor_exception) { 611 Label Lunlocked, Lno_unlock; 612 { 613 Register Rdo_not_unlock_flag = R11_scratch1; 614 Register Raccess_flags = R12_scratch2; 615 616 // Check if synchronized method or unlocking prevented by 617 // JavaThread::do_not_unlock_if_synchronized flag. 618 lbz(Rdo_not_unlock_flag, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); 619 lwz(Raccess_flags, in_bytes(Method::access_flags_offset()), R19_method); 620 li(R0, 0); 621 stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); // reset flag 622 623 push(state); 624 625 // Skip if we don't have to unlock. 626 rldicl_(R0, Raccess_flags, 64-JVM_ACC_SYNCHRONIZED_BIT, 63); // Extract bit and compare to 0. 627 beq(CCR0, Lunlocked); 628 629 cmpwi(CCR0, Rdo_not_unlock_flag, 0); 630 bne(CCR0, Lno_unlock); 631 } 632 633 // Unlock 634 { 635 Register Rmonitor_base = R11_scratch1; 636 637 Label Lunlock; 638 // If it's still locked, everything is ok, unlock it. 639 ld(Rmonitor_base, 0, R1_SP); 640 addi(Rmonitor_base, Rmonitor_base, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base 641 642 ld(R0, BasicObjectLock::obj_offset_in_bytes(), Rmonitor_base); 643 cmpdi(CCR0, R0, 0); 644 bne(CCR0, Lunlock); 645 646 // If it's already unlocked, throw exception. 647 if (throw_monitor_exception) { 648 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 649 should_not_reach_here(); 650 } else { 651 if (install_monitor_exception) { 652 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 653 b(Lunlocked); 654 } 655 } 656 657 bind(Lunlock); 658 unlock_object(Rmonitor_base); 659 } 660 661 // Check that all other monitors are unlocked. Throw IllegelMonitorState exception if not. 662 bind(Lunlocked); 663 { 664 Label Lexception, Lrestart; 665 Register Rcurrent_obj_addr = R11_scratch1; 666 const int delta = frame::interpreter_frame_monitor_size_in_bytes(); 667 assert((delta & LongAlignmentMask) == 0, "sizeof BasicObjectLock must be even number of doublewords"); 668 669 bind(Lrestart); 670 // Set up search loop: Calc num of iterations. 671 { 672 Register Riterations = R12_scratch2; 673 Register Rmonitor_base = Rcurrent_obj_addr; 674 ld(Rmonitor_base, 0, R1_SP); 675 addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size); // Monitor base 676 677 subf_(Riterations, R26_monitor, Rmonitor_base); 678 ble(CCR0, Lno_unlock); 679 680 addi(Rcurrent_obj_addr, Rmonitor_base, BasicObjectLock::obj_offset_in_bytes() - frame::interpreter_frame_monitor_size_in_bytes()); 681 // Check if any monitor is on stack, bail out if not 682 srdi(Riterations, Riterations, exact_log2(delta)); 683 mtctr(Riterations); 684 } 685 686 // The search loop: Look for locked monitors. 687 { 688 const Register Rcurrent_obj = R0; 689 Label Lloop; 690 691 ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 692 addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta); 693 bind(Lloop); 694 695 // Check if current entry is used. 696 cmpdi(CCR0, Rcurrent_obj, 0); 697 bne(CCR0, Lexception); 698 // Preload next iteration's compare value. 699 ld(Rcurrent_obj, 0, Rcurrent_obj_addr); 700 addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta); 701 bdnz(Lloop); 702 } 703 // Fell through: Everything's unlocked => finish. 704 b(Lno_unlock); 705 706 // An object is still locked => need to throw exception. 707 bind(Lexception); 708 if (throw_monitor_exception) { 709 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 710 should_not_reach_here(); 711 } else { 712 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 713 // Unlock does not block, so don't have to worry about the frame. 714 Register Rmonitor_addr = R11_scratch1; 715 addi(Rmonitor_addr, Rcurrent_obj_addr, -BasicObjectLock::obj_offset_in_bytes() + delta); 716 unlock_object(Rmonitor_addr); 717 if (install_monitor_exception) { 718 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 719 } 720 b(Lrestart); 721 } 722 } 723 724 align(32, 12); 725 bind(Lno_unlock); 726 pop(state); 727} 728 729// Support function for remove_activation & Co. 730void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc, Register Rscratch1, Register Rscratch2) { 731 // Pop interpreter frame. 732 ld(Rscratch1, 0, R1_SP); // *SP 733 ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp 734 ld(Rscratch2, 0, Rscratch1); // **SP 735#ifdef ASSERT 736 { 737 Label Lok; 738 ld(R0, _ijava_state_neg(ijava_reserved), Rscratch1); 739 cmpdi(CCR0, R0, 0x5afe); 740 beq(CCR0, Lok); 741 stop("frame corrupted (remove activation)", 0x5afe); 742 bind(Lok); 743 } 744#endif 745 if (return_pc!=noreg) { 746 ld(return_pc, _abi(lr), Rscratch1); // LR 747 } 748 749 // Merge top frames. 750 subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP 751 stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP 752} 753 754// Remove activation. 755// 756// Unlock the receiver if this is a synchronized method. 757// Unlock any Java monitors from synchronized blocks. 758// Remove the activation from the stack. 759// 760// If there are locked Java monitors 761// If throw_monitor_exception 762// throws IllegalMonitorStateException 763// Else if install_monitor_exception 764// installs IllegalMonitorStateException 765// Else 766// no error processing 767void InterpreterMacroAssembler::remove_activation(TosState state, 768 bool throw_monitor_exception, 769 bool install_monitor_exception) { 770 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 771 772 // Save result (push state before jvmti call and pop it afterwards) and notify jvmti. 773 notify_method_exit(false, state, NotifyJVMTI, true); 774 775 verify_oop(R17_tos, state); 776 verify_thread(); 777 778 merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 779 mtlr(R0); 780} 781 782#endif // !CC_INTERP 783 784// Lock object 785// 786// Registers alive 787// monitor - Address of the BasicObjectLock to be used for locking, 788// which must be initialized with the object to lock. 789// object - Address of the object to be locked. 790// 791void InterpreterMacroAssembler::lock_object(Register monitor, Register object) { 792 if (UseHeavyMonitors) { 793 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 794 monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false)); 795 } else { 796 // template code: 797 // 798 // markOop displaced_header = obj->mark().set_unlocked(); 799 // monitor->lock()->set_displaced_header(displaced_header); 800 // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { 801 // // We stored the monitor address into the object's mark word. 802 // } else if (THREAD->is_lock_owned((address)displaced_header)) 803 // // Simple recursive case. 804 // monitor->lock()->set_displaced_header(NULL); 805 // } else { 806 // // Slow path. 807 // InterpreterRuntime::monitorenter(THREAD, monitor); 808 // } 809 810 const Register displaced_header = R7_ARG5; 811 const Register object_mark_addr = R8_ARG6; 812 const Register current_header = R9_ARG7; 813 const Register tmp = R10_ARG8; 814 815 Label done; 816 Label cas_failed, slow_case; 817 818 assert_different_registers(displaced_header, object_mark_addr, current_header, tmp); 819 820 // markOop displaced_header = obj->mark().set_unlocked(); 821 822 // Load markOop from object into displaced_header. 823 ld(displaced_header, oopDesc::mark_offset_in_bytes(), object); 824 825 if (UseBiasedLocking) { 826 biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case); 827 } 828 829 // Set displaced_header to be (markOop of object | UNLOCK_VALUE). 830 ori(displaced_header, displaced_header, markOopDesc::unlocked_value); 831 832 // monitor->lock()->set_displaced_header(displaced_header); 833 834 // Initialize the box (Must happen before we update the object mark!). 835 std(displaced_header, BasicObjectLock::lock_offset_in_bytes() + 836 BasicLock::displaced_header_offset_in_bytes(), monitor); 837 838 // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) { 839 840 // Store stack address of the BasicObjectLock (this is monitor) into object. 841 addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); 842 843 // Must fence, otherwise, preceding store(s) may float below cmpxchg. 844 // CmpxchgX sets CCR0 to cmpX(current, displaced). 845 fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ? 846 cmpxchgd(/*flag=*/CCR0, 847 /*current_value=*/current_header, 848 /*compare_value=*/displaced_header, /*exchange_value=*/monitor, 849 /*where=*/object_mark_addr, 850 MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq, 851 MacroAssembler::cmpxchgx_hint_acquire_lock(), 852 noreg, 853 &cas_failed); 854 855 // If the compare-and-exchange succeeded, then we found an unlocked 856 // object and we have now locked it. 857 b(done); 858 bind(cas_failed); 859 860 // } else if (THREAD->is_lock_owned((address)displaced_header)) 861 // // Simple recursive case. 862 // monitor->lock()->set_displaced_header(NULL); 863 864 // We did not see an unlocked object so try the fast recursive case. 865 866 // Check if owner is self by comparing the value in the markOop of object 867 // (current_header) with the stack pointer. 868 sub(current_header, current_header, R1_SP); 869 870 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 871 load_const_optimized(tmp, 872 (address) (~(os::vm_page_size()-1) | 873 markOopDesc::lock_mask_in_place)); 874 875 and_(R0/*==0?*/, current_header, tmp); 876 // If condition is true we are done and hence we can store 0 in the displaced 877 // header indicating it is a recursive lock. 878 bne(CCR0, slow_case); 879 release(); 880 std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() + 881 BasicLock::displaced_header_offset_in_bytes(), monitor); 882 b(done); 883 884 // } else { 885 // // Slow path. 886 // InterpreterRuntime::monitorenter(THREAD, monitor); 887 888 // None of the above fast optimizations worked so we have to get into the 889 // slow case of monitor enter. 890 bind(slow_case); 891 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), 892 monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false)); 893 // } 894 align(32, 12); 895 bind(done); 896 } 897} 898 899// Unlocks an object. Used in monitorexit bytecode and remove_activation. 900// 901// Registers alive 902// monitor - Address of the BasicObjectLock to be used for locking, 903// which must be initialized with the object to lock. 904// 905// Throw IllegalMonitorException if object is not locked by current thread. 906void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_exceptions) { 907 if (UseHeavyMonitors) { 908 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 909 monitor, check_for_exceptions CC_INTERP_ONLY(&& false)); 910 } else { 911 912 // template code: 913 // 914 // if ((displaced_header = monitor->displaced_header()) == NULL) { 915 // // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL. 916 // monitor->set_obj(NULL); 917 // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { 918 // // We swapped the unlocked mark in displaced_header into the object's mark word. 919 // monitor->set_obj(NULL); 920 // } else { 921 // // Slow path. 922 // InterpreterRuntime::monitorexit(THREAD, monitor); 923 // } 924 925 const Register object = R7_ARG5; 926 const Register displaced_header = R8_ARG6; 927 const Register object_mark_addr = R9_ARG7; 928 const Register current_header = R10_ARG8; 929 930 Label free_slot; 931 Label slow_case; 932 933 assert_different_registers(object, displaced_header, object_mark_addr, current_header); 934 935 if (UseBiasedLocking) { 936 // The object address from the monitor is in object. 937 ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); 938 assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); 939 biased_locking_exit(CCR0, object, displaced_header, free_slot); 940 } 941 942 // Test first if we are in the fast recursive case. 943 ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() + 944 BasicLock::displaced_header_offset_in_bytes(), monitor); 945 946 // If the displaced header is zero, we have a recursive unlock. 947 cmpdi(CCR0, displaced_header, 0); 948 beq(CCR0, free_slot); // recursive unlock 949 950 // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) { 951 // // We swapped the unlocked mark in displaced_header into the object's mark word. 952 // monitor->set_obj(NULL); 953 954 // If we still have a lightweight lock, unlock the object and be done. 955 956 // The object address from the monitor is in object. 957 if (!UseBiasedLocking) { ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); } 958 addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes()); 959 960 // We have the displaced header in displaced_header. If the lock is still 961 // lightweight, it will contain the monitor address and we'll store the 962 // displaced header back into the object's mark word. 963 // CmpxchgX sets CCR0 to cmpX(current, monitor). 964 cmpxchgd(/*flag=*/CCR0, 965 /*current_value=*/current_header, 966 /*compare_value=*/monitor, /*exchange_value=*/displaced_header, 967 /*where=*/object_mark_addr, 968 MacroAssembler::MemBarRel, 969 MacroAssembler::cmpxchgx_hint_release_lock(), 970 noreg, 971 &slow_case); 972 b(free_slot); 973 974 // } else { 975 // // Slow path. 976 // InterpreterRuntime::monitorexit(THREAD, monitor); 977 978 // The lock has been converted into a heavy lock and hence 979 // we need to get into the slow case. 980 bind(slow_case); 981 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), 982 monitor, check_for_exceptions CC_INTERP_ONLY(&& false)); 983 // } 984 985 Label done; 986 b(done); // Monitor register may be overwritten! Runtime has already freed the slot. 987 988 // Exchange worked, do monitor->set_obj(NULL); 989 align(32, 12); 990 bind(free_slot); 991 li(R0, 0); 992 std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor); 993 bind(done); 994 } 995} 996 997#ifndef CC_INTERP 998 999// Load compiled (i2c) or interpreter entry when calling from interpreted and 1000// do the call. Centralized so that all interpreter calls will do the same actions. 1001// If jvmti single stepping is on for a thread we must not call compiled code. 1002// 1003// Input: 1004// - Rtarget_method: method to call 1005// - Rret_addr: return address 1006// - 2 scratch regs 1007// 1008void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2) { 1009 assert_different_registers(Rscratch1, Rscratch2, Rtarget_method, Rret_addr); 1010 // Assume we want to go compiled if available. 1011 const Register Rtarget_addr = Rscratch1; 1012 const Register Rinterp_only = Rscratch2; 1013 1014 ld(Rtarget_addr, in_bytes(Method::from_interpreted_offset()), Rtarget_method); 1015 1016 if (JvmtiExport::can_post_interpreter_events()) { 1017 lwz(Rinterp_only, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 1018 1019 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 1020 // compiled code in threads for which the event is enabled. Check here for 1021 // interp_only_mode if these events CAN be enabled. 1022 Label done; 1023 verify_thread(); 1024 cmpwi(CCR0, Rinterp_only, 0); 1025 beq(CCR0, done); 1026 ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method); 1027 align(32, 12); 1028 bind(done); 1029 } 1030 1031#ifdef ASSERT 1032 { 1033 Label Lok; 1034 cmpdi(CCR0, Rtarget_addr, 0); 1035 bne(CCR0, Lok); 1036 stop("null entry point"); 1037 bind(Lok); 1038 } 1039#endif // ASSERT 1040 1041 mr(R21_sender_SP, R1_SP); 1042 1043 // Calc a precise SP for the call. The SP value we calculated in 1044 // generate_fixed_frame() is based on the max_stack() value, so we would waste stack space 1045 // if esp is not max. Also, the i2c adapter extends the stack space without restoring 1046 // our pre-calced value, so repeating calls via i2c would result in stack overflow. 1047 // Since esp already points to an empty slot, we just have to sub 1 additional slot 1048 // to meet the abi scratch requirements. 1049 // The max_stack pointer will get restored by means of the GR_Lmax_stack local in 1050 // the return entry of the interpreter. 1051 addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::abi_reg_args_size); 1052 clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address 1053 resize_frame_absolute(Rscratch2, Rscratch2, R0); 1054 1055 mr_if_needed(R19_method, Rtarget_method); 1056 mtctr(Rtarget_addr); 1057 mtlr(Rret_addr); 1058 1059 save_interpreter_state(Rscratch2); 1060#ifdef ASSERT 1061 ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp 1062 cmpd(CCR0, R21_sender_SP, Rscratch1); 1063 asm_assert_eq("top_frame_sp incorrect", 0x951); 1064#endif 1065 1066 bctr(); 1067} 1068 1069// Set the method data pointer for the current bcp. 1070void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1071 assert(ProfileInterpreter, "must be profiling interpreter"); 1072 Label get_continue; 1073 ld(R28_mdx, in_bytes(Method::method_data_offset()), R19_method); 1074 test_method_data_pointer(get_continue); 1075 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R19_method, R14_bcp); 1076 1077 addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset())); 1078 add(R28_mdx, R28_mdx, R3_RET); 1079 bind(get_continue); 1080} 1081 1082// Test ImethodDataPtr. If it is null, continue at the specified label. 1083void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1084 assert(ProfileInterpreter, "must be profiling interpreter"); 1085 cmpdi(CCR0, R28_mdx, 0); 1086 beq(CCR0, zero_continue); 1087} 1088 1089void InterpreterMacroAssembler::verify_method_data_pointer() { 1090 assert(ProfileInterpreter, "must be profiling interpreter"); 1091#ifdef ASSERT 1092 Label verify_continue; 1093 test_method_data_pointer(verify_continue); 1094 1095 // If the mdp is valid, it will point to a DataLayout header which is 1096 // consistent with the bcp. The converse is highly probable also. 1097 lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx); 1098 ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method); 1099 addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset())); 1100 add(R11_scratch1, R12_scratch2, R12_scratch2); 1101 cmpd(CCR0, R11_scratch1, R14_bcp); 1102 beq(CCR0, verify_continue); 1103 1104 call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx); 1105 1106 bind(verify_continue); 1107#endif 1108} 1109 1110void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1111 Register Rscratch, 1112 Label &profile_continue) { 1113 assert(ProfileInterpreter, "must be profiling interpreter"); 1114 // Control will flow to "profile_continue" if the counter is less than the 1115 // limit or if we call profile_method(). 1116 Label done; 1117 1118 // If no method data exists, and the counter is high enough, make one. 1119 int ipl_offs = load_const_optimized(Rscratch, &InvocationCounter::InterpreterProfileLimit, R0, true); 1120 lwz(Rscratch, ipl_offs, Rscratch); 1121 1122 cmpdi(CCR0, R28_mdx, 0); 1123 // Test to see if we should create a method data oop. 1124 cmpd(CCR1, Rscratch /* InterpreterProfileLimit */, invocation_count); 1125 bne(CCR0, done); 1126 bge(CCR1, profile_continue); 1127 1128 // Build it now. 1129 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1130 set_method_data_pointer_for_bcp(); 1131 b(profile_continue); 1132 1133 align(32, 12); 1134 bind(done); 1135} 1136 1137void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp) { 1138 assert_different_registers(backedge_count, Rtmp, branch_bcp); 1139 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 1140 1141 Label did_not_overflow; 1142 Label overflow_with_error; 1143 1144 int ibbl_offs = load_const_optimized(Rtmp, &InvocationCounter::InterpreterBackwardBranchLimit, R0, true); 1145 lwz(Rtmp, ibbl_offs, Rtmp); 1146 cmpw(CCR0, backedge_count, Rtmp); 1147 1148 blt(CCR0, did_not_overflow); 1149 1150 // When ProfileInterpreter is on, the backedge_count comes from the 1151 // methodDataOop, which value does not get reset on the call to 1152 // frequency_counter_overflow(). To avoid excessive calls to the overflow 1153 // routine while the method is being compiled, add a second test to make sure 1154 // the overflow function is called only once every overflow_frequency. 1155 if (ProfileInterpreter) { 1156 const int overflow_frequency = 1024; 1157 li(Rtmp, overflow_frequency-1); 1158 andr(Rtmp, Rtmp, backedge_count); 1159 cmpwi(CCR0, Rtmp, 0); 1160 bne(CCR0, did_not_overflow); 1161 } 1162 1163 // Overflow in loop, pass branch bytecode. 1164 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, true); 1165 1166 // Was an OSR adapter generated? 1167 // O0 = osr nmethod 1168 cmpdi(CCR0, R3_RET, 0); 1169 beq(CCR0, overflow_with_error); 1170 1171 // Has the nmethod been invalidated already? 1172 lbz(Rtmp, nmethod::state_offset(), R3_RET); 1173 cmpwi(CCR0, Rtmp, nmethod::in_use); 1174 bne(CCR0, overflow_with_error); 1175 1176 // Migrate the interpreter frame off of the stack. 1177 // We can use all registers because we will not return to interpreter from this point. 1178 1179 // Save nmethod. 1180 const Register osr_nmethod = R31; 1181 mr(osr_nmethod, R3_RET); 1182 set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1); 1183 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread); 1184 reset_last_Java_frame(); 1185 // OSR buffer is in ARG1 1186 1187 // Remove the interpreter frame. 1188 merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2); 1189 1190 // Jump to the osr code. 1191 ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod); 1192 mtlr(R0); 1193 mtctr(R11_scratch1); 1194 bctr(); 1195 1196 align(32, 12); 1197 bind(overflow_with_error); 1198 bind(did_not_overflow); 1199} 1200 1201// Store a value at some constant offset from the method data pointer. 1202void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1203 assert(ProfileInterpreter, "must be profiling interpreter"); 1204 1205 std(value, constant, R28_mdx); 1206} 1207 1208// Increment the value at some constant offset from the method data pointer. 1209void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1210 Register counter_addr, 1211 Register Rbumped_count, 1212 bool decrement) { 1213 // Locate the counter at a fixed offset from the mdp: 1214 addi(counter_addr, R28_mdx, constant); 1215 increment_mdp_data_at(counter_addr, Rbumped_count, decrement); 1216} 1217 1218// Increment the value at some non-fixed (reg + constant) offset from 1219// the method data pointer. 1220void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1221 int constant, 1222 Register scratch, 1223 Register Rbumped_count, 1224 bool decrement) { 1225 // Add the constant to reg to get the offset. 1226 add(scratch, R28_mdx, reg); 1227 // Then calculate the counter address. 1228 addi(scratch, scratch, constant); 1229 increment_mdp_data_at(scratch, Rbumped_count, decrement); 1230} 1231 1232void InterpreterMacroAssembler::increment_mdp_data_at(Register counter_addr, 1233 Register Rbumped_count, 1234 bool decrement) { 1235 assert(ProfileInterpreter, "must be profiling interpreter"); 1236 1237 // Load the counter. 1238 ld(Rbumped_count, 0, counter_addr); 1239 1240 if (decrement) { 1241 // Decrement the register. Set condition codes. 1242 addi(Rbumped_count, Rbumped_count, - DataLayout::counter_increment); 1243 // Store the decremented counter, if it is still negative. 1244 std(Rbumped_count, 0, counter_addr); 1245 // Note: add/sub overflow check are not ported, since 64 bit 1246 // calculation should never overflow. 1247 } else { 1248 // Increment the register. Set carry flag. 1249 addi(Rbumped_count, Rbumped_count, DataLayout::counter_increment); 1250 // Store the incremented counter. 1251 std(Rbumped_count, 0, counter_addr); 1252 } 1253} 1254 1255// Set a flag value at the current method data pointer position. 1256void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1257 Register scratch) { 1258 assert(ProfileInterpreter, "must be profiling interpreter"); 1259 // Load the data header. 1260 lbz(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx); 1261 // Set the flag. 1262 ori(scratch, scratch, flag_constant); 1263 // Store the modified header. 1264 stb(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx); 1265} 1266 1267// Test the location at some offset from the method data pointer. 1268// If it is not equal to value, branch to the not_equal_continue Label. 1269void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1270 Register value, 1271 Label& not_equal_continue, 1272 Register test_out) { 1273 assert(ProfileInterpreter, "must be profiling interpreter"); 1274 1275 ld(test_out, offset, R28_mdx); 1276 cmpd(CCR0, value, test_out); 1277 bne(CCR0, not_equal_continue); 1278} 1279 1280// Update the method data pointer by the displacement located at some fixed 1281// offset from the method data pointer. 1282void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1283 Register scratch) { 1284 assert(ProfileInterpreter, "must be profiling interpreter"); 1285 1286 ld(scratch, offset_of_disp, R28_mdx); 1287 add(R28_mdx, scratch, R28_mdx); 1288} 1289 1290// Update the method data pointer by the displacement located at the 1291// offset (reg + offset_of_disp). 1292void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1293 int offset_of_disp, 1294 Register scratch) { 1295 assert(ProfileInterpreter, "must be profiling interpreter"); 1296 1297 add(scratch, reg, R28_mdx); 1298 ld(scratch, offset_of_disp, scratch); 1299 add(R28_mdx, scratch, R28_mdx); 1300} 1301 1302// Update the method data pointer by a simple constant displacement. 1303void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1304 assert(ProfileInterpreter, "must be profiling interpreter"); 1305 addi(R28_mdx, R28_mdx, constant); 1306} 1307 1308// Update the method data pointer for a _ret bytecode whose target 1309// was not among our cached targets. 1310void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1311 Register return_bci) { 1312 assert(ProfileInterpreter, "must be profiling interpreter"); 1313 1314 push(state); 1315 assert(return_bci->is_nonvolatile(), "need to protect return_bci"); 1316 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1317 pop(state); 1318} 1319 1320// Increments the backedge counter. 1321// Returns backedge counter + invocation counter in Rdst. 1322void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcounters, const Register Rdst, 1323 const Register Rtmp1, Register Rscratch) { 1324 assert(UseCompiler, "incrementing must be useful"); 1325 assert_different_registers(Rdst, Rtmp1); 1326 const Register invocation_counter = Rtmp1; 1327 const Register counter = Rdst; 1328 // TODO ppc port assert(4 == InvocationCounter::sz_counter(), "unexpected field size."); 1329 1330 // Load backedge counter. 1331 lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) + 1332 in_bytes(InvocationCounter::counter_offset()), Rcounters); 1333 // Load invocation counter. 1334 lwz(invocation_counter, in_bytes(MethodCounters::invocation_counter_offset()) + 1335 in_bytes(InvocationCounter::counter_offset()), Rcounters); 1336 1337 // Add the delta to the backedge counter. 1338 addi(counter, counter, InvocationCounter::count_increment); 1339 1340 // Mask the invocation counter. 1341 li(Rscratch, InvocationCounter::count_mask_value); 1342 andr(invocation_counter, invocation_counter, Rscratch); 1343 1344 // Store new counter value. 1345 stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) + 1346 in_bytes(InvocationCounter::counter_offset()), Rcounters); 1347 // Return invocation counter + backedge counter. 1348 add(counter, counter, invocation_counter); 1349} 1350 1351// Count a taken branch in the bytecodes. 1352void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1353 if (ProfileInterpreter) { 1354 Label profile_continue; 1355 1356 // If no method data exists, go to profile_continue. 1357 test_method_data_pointer(profile_continue); 1358 1359 // We are taking a branch. Increment the taken count. 1360 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), scratch, bumped_count); 1361 1362 // The method data pointer needs to be updated to reflect the new target. 1363 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1364 bind (profile_continue); 1365 } 1366} 1367 1368// Count a not-taken branch in the bytecodes. 1369void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch1, Register scratch2) { 1370 if (ProfileInterpreter) { 1371 Label profile_continue; 1372 1373 // If no method data exists, go to profile_continue. 1374 test_method_data_pointer(profile_continue); 1375 1376 // We are taking a branch. Increment the not taken count. 1377 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch1, scratch2); 1378 1379 // The method data pointer needs to be updated to correspond to the 1380 // next bytecode. 1381 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1382 bind (profile_continue); 1383 } 1384} 1385 1386// Count a non-virtual call in the bytecodes. 1387void InterpreterMacroAssembler::profile_call(Register scratch1, Register scratch2) { 1388 if (ProfileInterpreter) { 1389 Label profile_continue; 1390 1391 // If no method data exists, go to profile_continue. 1392 test_method_data_pointer(profile_continue); 1393 1394 // We are making a call. Increment the count. 1395 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2); 1396 1397 // The method data pointer needs to be updated to reflect the new target. 1398 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1399 bind (profile_continue); 1400 } 1401} 1402 1403// Count a final call in the bytecodes. 1404void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register scratch2) { 1405 if (ProfileInterpreter) { 1406 Label profile_continue; 1407 1408 // If no method data exists, go to profile_continue. 1409 test_method_data_pointer(profile_continue); 1410 1411 // We are making a call. Increment the count. 1412 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2); 1413 1414 // The method data pointer needs to be updated to reflect the new target. 1415 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1416 bind (profile_continue); 1417 } 1418} 1419 1420// Count a virtual call in the bytecodes. 1421void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver, 1422 Register Rscratch1, 1423 Register Rscratch2, 1424 bool receiver_can_be_null) { 1425 if (!ProfileInterpreter) { return; } 1426 Label profile_continue; 1427 1428 // If no method data exists, go to profile_continue. 1429 test_method_data_pointer(profile_continue); 1430 1431 Label skip_receiver_profile; 1432 if (receiver_can_be_null) { 1433 Label not_null; 1434 cmpdi(CCR0, Rreceiver, 0); 1435 bne(CCR0, not_null); 1436 // We are making a call. Increment the count for null receiver. 1437 increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2); 1438 b(skip_receiver_profile); 1439 bind(not_null); 1440 } 1441 1442 // Record the receiver type. 1443 record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2, true); 1444 bind(skip_receiver_profile); 1445 1446 // The method data pointer needs to be updated to reflect the new target. 1447 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1448 bind (profile_continue); 1449} 1450 1451void InterpreterMacroAssembler::profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2) { 1452 if (ProfileInterpreter) { 1453 Label profile_continue; 1454 1455 // If no method data exists, go to profile_continue. 1456 test_method_data_pointer(profile_continue); 1457 1458 int mdp_delta = in_bytes(BitData::bit_data_size()); 1459 if (TypeProfileCasts) { 1460 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1461 1462 // Record the object type. 1463 record_klass_in_profile(Rklass, Rscratch1, Rscratch2, false); 1464 } 1465 1466 // The method data pointer needs to be updated. 1467 update_mdp_by_constant(mdp_delta); 1468 1469 bind (profile_continue); 1470 } 1471} 1472 1473void InterpreterMacroAssembler::profile_typecheck_failed(Register Rscratch1, Register Rscratch2) { 1474 if (ProfileInterpreter && TypeProfileCasts) { 1475 Label profile_continue; 1476 1477 // If no method data exists, go to profile_continue. 1478 test_method_data_pointer(profile_continue); 1479 1480 int count_offset = in_bytes(CounterData::count_offset()); 1481 // Back up the address, since we have already bumped the mdp. 1482 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1483 1484 // *Decrement* the counter. We expect to see zero or small negatives. 1485 increment_mdp_data_at(count_offset, Rscratch1, Rscratch2, true); 1486 1487 bind (profile_continue); 1488 } 1489} 1490 1491// Count a ret in the bytecodes. 1492void InterpreterMacroAssembler::profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2) { 1493 if (ProfileInterpreter) { 1494 Label profile_continue; 1495 uint row; 1496 1497 // If no method data exists, go to profile_continue. 1498 test_method_data_pointer(profile_continue); 1499 1500 // Update the total ret count. 1501 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2 ); 1502 1503 for (row = 0; row < RetData::row_limit(); row++) { 1504 Label next_test; 1505 1506 // See if return_bci is equal to bci[n]: 1507 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), return_bci, next_test, scratch1); 1508 1509 // return_bci is equal to bci[n]. Increment the count. 1510 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch1, scratch2); 1511 1512 // The method data pointer needs to be updated to reflect the new target. 1513 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch1); 1514 b(profile_continue); 1515 bind(next_test); 1516 } 1517 1518 update_mdp_for_ret(state, return_bci); 1519 1520 bind (profile_continue); 1521 } 1522} 1523 1524// Count the default case of a switch construct. 1525void InterpreterMacroAssembler::profile_switch_default(Register scratch1, Register scratch2) { 1526 if (ProfileInterpreter) { 1527 Label profile_continue; 1528 1529 // If no method data exists, go to profile_continue. 1530 test_method_data_pointer(profile_continue); 1531 1532 // Update the default case count 1533 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1534 scratch1, scratch2); 1535 1536 // The method data pointer needs to be updated. 1537 update_mdp_by_offset(in_bytes(MultiBranchData::default_displacement_offset()), 1538 scratch1); 1539 1540 bind (profile_continue); 1541 } 1542} 1543 1544// Count the index'th case of a switch construct. 1545void InterpreterMacroAssembler::profile_switch_case(Register index, 1546 Register scratch1, 1547 Register scratch2, 1548 Register scratch3) { 1549 if (ProfileInterpreter) { 1550 assert_different_registers(index, scratch1, scratch2, scratch3); 1551 Label profile_continue; 1552 1553 // If no method data exists, go to profile_continue. 1554 test_method_data_pointer(profile_continue); 1555 1556 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes(). 1557 li(scratch3, in_bytes(MultiBranchData::case_array_offset())); 1558 1559 assert (in_bytes(MultiBranchData::per_case_size()) == 16, "so that shladd works"); 1560 sldi(scratch1, index, exact_log2(in_bytes(MultiBranchData::per_case_size()))); 1561 add(scratch1, scratch1, scratch3); 1562 1563 // Update the case count. 1564 increment_mdp_data_at(scratch1, in_bytes(MultiBranchData::relative_count_offset()), scratch2, scratch3); 1565 1566 // The method data pointer needs to be updated. 1567 update_mdp_by_offset(scratch1, in_bytes(MultiBranchData::relative_displacement_offset()), scratch2); 1568 1569 bind (profile_continue); 1570 } 1571} 1572 1573void InterpreterMacroAssembler::profile_null_seen(Register Rscratch1, Register Rscratch2) { 1574 if (ProfileInterpreter) { 1575 assert_different_registers(Rscratch1, Rscratch2); 1576 Label profile_continue; 1577 1578 // If no method data exists, go to profile_continue. 1579 test_method_data_pointer(profile_continue); 1580 1581 set_mdp_flag_at(BitData::null_seen_byte_constant(), Rscratch1); 1582 1583 // The method data pointer needs to be updated. 1584 int mdp_delta = in_bytes(BitData::bit_data_size()); 1585 if (TypeProfileCasts) { 1586 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1587 } 1588 update_mdp_by_constant(mdp_delta); 1589 1590 bind (profile_continue); 1591 } 1592} 1593 1594void InterpreterMacroAssembler::record_klass_in_profile(Register Rreceiver, 1595 Register Rscratch1, Register Rscratch2, 1596 bool is_virtual_call) { 1597 assert(ProfileInterpreter, "must be profiling"); 1598 assert_different_registers(Rreceiver, Rscratch1, Rscratch2); 1599 1600 Label done; 1601 record_klass_in_profile_helper(Rreceiver, Rscratch1, Rscratch2, 0, done, is_virtual_call); 1602 bind (done); 1603} 1604 1605void InterpreterMacroAssembler::record_klass_in_profile_helper( 1606 Register receiver, Register scratch1, Register scratch2, 1607 int start_row, Label& done, bool is_virtual_call) { 1608 if (TypeProfileWidth == 0) { 1609 if (is_virtual_call) { 1610 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2); 1611 } 1612 return; 1613 } 1614 1615 int last_row = VirtualCallData::row_limit() - 1; 1616 assert(start_row <= last_row, "must be work left to do"); 1617 // Test this row for both the receiver and for null. 1618 // Take any of three different outcomes: 1619 // 1. found receiver => increment count and goto done 1620 // 2. found null => keep looking for case 1, maybe allocate this cell 1621 // 3. found something else => keep looking for cases 1 and 2 1622 // Case 3 is handled by a recursive call. 1623 for (int row = start_row; row <= last_row; row++) { 1624 Label next_test; 1625 bool test_for_null_also = (row == start_row); 1626 1627 // See if the receiver is receiver[n]. 1628 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1629 test_mdp_data_at(recvr_offset, receiver, next_test, scratch1); 1630 // delayed()->tst(scratch); 1631 1632 // The receiver is receiver[n]. Increment count[n]. 1633 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1634 increment_mdp_data_at(count_offset, scratch1, scratch2); 1635 b(done); 1636 bind(next_test); 1637 1638 if (test_for_null_also) { 1639 Label found_null; 1640 // Failed the equality check on receiver[n]... Test for null. 1641 if (start_row == last_row) { 1642 // The only thing left to do is handle the null case. 1643 if (is_virtual_call) { 1644 // Scratch1 contains test_out from test_mdp_data_at. 1645 cmpdi(CCR0, scratch1, 0); 1646 beq(CCR0, found_null); 1647 // Receiver did not match any saved receiver and there is no empty row for it. 1648 // Increment total counter to indicate polymorphic case. 1649 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2); 1650 b(done); 1651 bind(found_null); 1652 } else { 1653 cmpdi(CCR0, scratch1, 0); 1654 bne(CCR0, done); 1655 } 1656 break; 1657 } 1658 // Since null is rare, make it be the branch-taken case. 1659 cmpdi(CCR0, scratch1, 0); 1660 beq(CCR0, found_null); 1661 1662 // Put all the "Case 3" tests here. 1663 record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done, is_virtual_call); 1664 1665 // Found a null. Keep searching for a matching receiver, 1666 // but remember that this is an empty (unused) slot. 1667 bind(found_null); 1668 } 1669 } 1670 1671 // In the fall-through case, we found no matching receiver, but we 1672 // observed the receiver[start_row] is NULL. 1673 1674 // Fill in the receiver field and increment the count. 1675 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1676 set_mdp_data_at(recvr_offset, receiver); 1677 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1678 li(scratch1, DataLayout::counter_increment); 1679 set_mdp_data_at(count_offset, scratch1); 1680 if (start_row > 0) { 1681 b(done); 1682 } 1683} 1684 1685// Argument and return type profilig. 1686// kills: tmp, tmp2, R0, CR0, CR1 1687void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base, 1688 RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) { 1689 Label do_nothing, do_update; 1690 1691 // tmp2 = obj is allowed 1692 assert_different_registers(obj, mdo_addr_base, tmp, R0); 1693 assert_different_registers(tmp2, mdo_addr_base, tmp, R0); 1694 const Register klass = tmp2; 1695 1696 verify_oop(obj); 1697 1698 ld(tmp, mdo_addr_offs, mdo_addr_base); 1699 1700 // Set null_seen if obj is 0. 1701 cmpdi(CCR0, obj, 0); 1702 ori(R0, tmp, TypeEntries::null_seen); 1703 beq(CCR0, do_update); 1704 1705 load_klass(klass, obj); 1706 1707 clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask)); 1708 // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask); 1709 cmpd(CCR1, R0, klass); 1710 // Klass seen before, nothing to do (regardless of unknown bit). 1711 //beq(CCR1, do_nothing); 1712 1713 andi_(R0, klass, TypeEntries::type_unknown); 1714 // Already unknown. Nothing to do anymore. 1715 //bne(CCR0, do_nothing); 1716 crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne 1717 beq(CCR0, do_nothing); 1718 1719 clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask)); 1720 orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0). 1721 beq(CCR0, do_update); // First time here. Set profile type. 1722 1723 // Different than before. Cannot keep accurate profile. 1724 ori(R0, tmp, TypeEntries::type_unknown); 1725 1726 bind(do_update); 1727 // update profile 1728 std(R0, mdo_addr_offs, mdo_addr_base); 1729 1730 align(32, 12); 1731 bind(do_nothing); 1732} 1733 1734void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) { 1735 if (!ProfileInterpreter) { 1736 return; 1737 } 1738 1739 assert_different_registers(callee, tmp1, tmp2, R28_mdx); 1740 1741 if (MethodData::profile_arguments() || MethodData::profile_return()) { 1742 Label profile_continue; 1743 1744 test_method_data_pointer(profile_continue); 1745 1746 int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); 1747 1748 lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx); 1749 cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); 1750 bne(CCR0, profile_continue); 1751 1752 if (MethodData::profile_arguments()) { 1753 Label done; 1754 int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset()); 1755 add(R28_mdx, off_to_args, R28_mdx); 1756 1757 for (int i = 0; i < TypeProfileArgsLimit; i++) { 1758 if (i > 0 || MethodData::profile_return()) { 1759 // If return value type is profiled we may have no argument to profile. 1760 ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx); 1761 cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count()); 1762 addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count()); 1763 blt(CCR0, done); 1764 } 1765 ld(tmp1, in_bytes(Method::const_offset()), callee); 1766 lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1); 1767 // Stack offset o (zero based) from the start of the argument 1768 // list, for n arguments translates into offset n - o - 1 from 1769 // the end of the argument list. But there's an extra slot at 1770 // the top of the stack. So the offset is n - o from Lesp. 1771 ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx); 1772 subf(tmp1, tmp2, tmp1); 1773 1774 sldi(tmp1, tmp1, Interpreter::logStackElementSize); 1775 ldx(tmp1, tmp1, R15_esp); 1776 1777 profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1); 1778 1779 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size()); 1780 addi(R28_mdx, R28_mdx, to_add); 1781 off_to_args += to_add; 1782 } 1783 1784 if (MethodData::profile_return()) { 1785 ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx); 1786 addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count()); 1787 } 1788 1789 bind(done); 1790 1791 if (MethodData::profile_return()) { 1792 // We're right after the type profile for the last 1793 // argument. tmp1 is the number of cells left in the 1794 // CallTypeData/VirtualCallTypeData to reach its end. Non null 1795 // if there's a return to profile. 1796 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type"); 1797 sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size)); 1798 add(R28_mdx, tmp1, R28_mdx); 1799 } 1800 } else { 1801 assert(MethodData::profile_return(), "either profile call args or call ret"); 1802 update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size())); 1803 } 1804 1805 // Mdp points right after the end of the 1806 // CallTypeData/VirtualCallTypeData, right after the cells for the 1807 // return value type if there's one. 1808 align(32, 12); 1809 bind(profile_continue); 1810 } 1811} 1812 1813void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) { 1814 assert_different_registers(ret, tmp1, tmp2); 1815 if (ProfileInterpreter && MethodData::profile_return()) { 1816 Label profile_continue; 1817 1818 test_method_data_pointer(profile_continue); 1819 1820 if (MethodData::profile_return_jsr292_only()) { 1821 // If we don't profile all invoke bytecodes we must make sure 1822 // it's a bytecode we indeed profile. We can't go back to the 1823 // begining of the ProfileData we intend to update to check its 1824 // type because we're right after it and we don't known its 1825 // length. 1826 lbz(tmp1, 0, R14_bcp); 1827 lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method); 1828 cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic); 1829 cmpwi(CCR1, tmp1, Bytecodes::_invokehandle); 1830 cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 1831 cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm); 1832 cror(CCR0, Assembler::equal, CCR1, Assembler::equal); 1833 bne(CCR0, profile_continue); 1834 } 1835 1836 profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2); 1837 1838 align(32, 12); 1839 bind(profile_continue); 1840 } 1841} 1842 1843void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 1844 if (ProfileInterpreter && MethodData::profile_parameters()) { 1845 Label profile_continue, done; 1846 1847 test_method_data_pointer(profile_continue); 1848 1849 // Load the offset of the area within the MDO used for 1850 // parameters. If it's negative we're not profiling any parameters. 1851 lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx); 1852 cmpwi(CCR0, tmp1, 0); 1853 blt(CCR0, profile_continue); 1854 1855 // Compute a pointer to the area for parameters from the offset 1856 // and move the pointer to the slot for the last 1857 // parameters. Collect profiling from last parameter down. 1858 // mdo start + parameters offset + array length - 1 1859 1860 // Pointer to the parameter area in the MDO. 1861 const Register mdp = tmp1; 1862 add(mdp, tmp1, R28_mdx); 1863 1864 // Pffset of the current profile entry to update. 1865 const Register entry_offset = tmp2; 1866 // entry_offset = array len in number of cells 1867 ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp); 1868 1869 int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0)); 1870 assert(off_base % DataLayout::cell_size == 0, "should be a number of cells"); 1871 1872 // entry_offset (number of cells) = array len - size of 1 entry + offset of the stack slot field 1873 addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size)); 1874 // entry_offset in bytes 1875 sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size)); 1876 1877 Label loop; 1878 align(32, 12); 1879 bind(loop); 1880 1881 // Load offset on the stack from the slot for this parameter. 1882 ld(tmp3, entry_offset, mdp); 1883 sldi(tmp3, tmp3, Interpreter::logStackElementSize); 1884 neg(tmp3, tmp3); 1885 // Read the parameter from the local area. 1886 ldx(tmp3, tmp3, R18_locals); 1887 1888 // Make entry_offset now point to the type field for this parameter. 1889 int type_base = in_bytes(ParametersTypeData::type_offset(0)); 1890 assert(type_base > off_base, "unexpected"); 1891 addi(entry_offset, entry_offset, type_base - off_base); 1892 1893 // Profile the parameter. 1894 profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3); 1895 1896 // Go to next parameter. 1897 int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base); 1898 cmpdi(CCR0, entry_offset, off_base + delta); 1899 addi(entry_offset, entry_offset, -delta); 1900 bge(CCR0, loop); 1901 1902 align(32, 12); 1903 bind(profile_continue); 1904 } 1905} 1906 1907// Add a InterpMonitorElem to stack (see frame_sparc.hpp). 1908void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) { 1909 1910 // Very-local scratch registers. 1911 const Register esp = Rtemp1; 1912 const Register slot = Rtemp2; 1913 1914 // Extracted monitor_size. 1915 int monitor_size = frame::interpreter_frame_monitor_size_in_bytes(); 1916 assert(Assembler::is_aligned((unsigned int)monitor_size, 1917 (unsigned int)frame::alignment_in_bytes), 1918 "size of a monitor must respect alignment of SP"); 1919 1920 resize_frame(-monitor_size, /*temp*/esp); // Allocate space for new monitor 1921 std(R1_SP, _ijava_state_neg(top_frame_sp), esp); // esp contains fp 1922 1923 // Shuffle expression stack down. Recall that stack_base points 1924 // just above the new expression stack bottom. Old_tos and new_tos 1925 // are used to scan thru the old and new expression stacks. 1926 if (!stack_is_empty) { 1927 Label copy_slot, copy_slot_finished; 1928 const Register n_slots = slot; 1929 1930 addi(esp, R15_esp, Interpreter::stackElementSize); // Point to first element (pre-pushed stack). 1931 subf(n_slots, esp, R26_monitor); 1932 srdi_(n_slots, n_slots, LogBytesPerWord); // Compute number of slots to copy. 1933 assert(LogBytesPerWord == 3, "conflicts assembler instructions"); 1934 beq(CCR0, copy_slot_finished); // Nothing to copy. 1935 1936 mtctr(n_slots); 1937 1938 // loop 1939 bind(copy_slot); 1940 ld(slot, 0, esp); // Move expression stack down. 1941 std(slot, -monitor_size, esp); // distance = monitor_size 1942 addi(esp, esp, BytesPerWord); 1943 bdnz(copy_slot); 1944 1945 bind(copy_slot_finished); 1946 } 1947 1948 addi(R15_esp, R15_esp, -monitor_size); 1949 addi(R26_monitor, R26_monitor, -monitor_size); 1950 1951 // Restart interpreter 1952} 1953 1954// ============================================================================ 1955// Java locals access 1956 1957// Load a local variable at index in Rindex into register Rdst_value. 1958// Also puts address of local into Rdst_address as a service. 1959// Kills: 1960// - Rdst_value 1961// - Rdst_address 1962void InterpreterMacroAssembler::load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex) { 1963 sldi(Rdst_address, Rindex, Interpreter::logStackElementSize); 1964 subf(Rdst_address, Rdst_address, R18_locals); 1965 lwz(Rdst_value, 0, Rdst_address); 1966} 1967 1968// Load a local variable at index in Rindex into register Rdst_value. 1969// Also puts address of local into Rdst_address as a service. 1970// Kills: 1971// - Rdst_value 1972// - Rdst_address 1973void InterpreterMacroAssembler::load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex) { 1974 sldi(Rdst_address, Rindex, Interpreter::logStackElementSize); 1975 subf(Rdst_address, Rdst_address, R18_locals); 1976 ld(Rdst_value, -8, Rdst_address); 1977} 1978 1979// Load a local variable at index in Rindex into register Rdst_value. 1980// Also puts address of local into Rdst_address as a service. 1981// Input: 1982// - Rindex: slot nr of local variable 1983// Kills: 1984// - Rdst_value 1985// - Rdst_address 1986void InterpreterMacroAssembler::load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex) { 1987 sldi(Rdst_address, Rindex, Interpreter::logStackElementSize); 1988 subf(Rdst_address, Rdst_address, R18_locals); 1989 ld(Rdst_value, 0, Rdst_address); 1990} 1991 1992// Load a local variable at index in Rindex into register Rdst_value. 1993// Also puts address of local into Rdst_address as a service. 1994// Kills: 1995// - Rdst_value 1996// - Rdst_address 1997void InterpreterMacroAssembler::load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) { 1998 sldi(Rdst_address, Rindex, Interpreter::logStackElementSize); 1999 subf(Rdst_address, Rdst_address, R18_locals); 2000 lfs(Rdst_value, 0, Rdst_address); 2001} 2002 2003// Load a local variable at index in Rindex into register Rdst_value. 2004// Also puts address of local into Rdst_address as a service. 2005// Kills: 2006// - Rdst_value 2007// - Rdst_address 2008void InterpreterMacroAssembler::load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) { 2009 sldi(Rdst_address, Rindex, Interpreter::logStackElementSize); 2010 subf(Rdst_address, Rdst_address, R18_locals); 2011 lfd(Rdst_value, -8, Rdst_address); 2012} 2013 2014// Store an int value at local variable slot Rindex. 2015// Kills: 2016// - Rindex 2017void InterpreterMacroAssembler::store_local_int(Register Rvalue, Register Rindex) { 2018 sldi(Rindex, Rindex, Interpreter::logStackElementSize); 2019 subf(Rindex, Rindex, R18_locals); 2020 stw(Rvalue, 0, Rindex); 2021} 2022 2023// Store a long value at local variable slot Rindex. 2024// Kills: 2025// - Rindex 2026void InterpreterMacroAssembler::store_local_long(Register Rvalue, Register Rindex) { 2027 sldi(Rindex, Rindex, Interpreter::logStackElementSize); 2028 subf(Rindex, Rindex, R18_locals); 2029 std(Rvalue, -8, Rindex); 2030} 2031 2032// Store an oop value at local variable slot Rindex. 2033// Kills: 2034// - Rindex 2035void InterpreterMacroAssembler::store_local_ptr(Register Rvalue, Register Rindex) { 2036 sldi(Rindex, Rindex, Interpreter::logStackElementSize); 2037 subf(Rindex, Rindex, R18_locals); 2038 std(Rvalue, 0, Rindex); 2039} 2040 2041// Store an int value at local variable slot Rindex. 2042// Kills: 2043// - Rindex 2044void InterpreterMacroAssembler::store_local_float(FloatRegister Rvalue, Register Rindex) { 2045 sldi(Rindex, Rindex, Interpreter::logStackElementSize); 2046 subf(Rindex, Rindex, R18_locals); 2047 stfs(Rvalue, 0, Rindex); 2048} 2049 2050// Store an int value at local variable slot Rindex. 2051// Kills: 2052// - Rindex 2053void InterpreterMacroAssembler::store_local_double(FloatRegister Rvalue, Register Rindex) { 2054 sldi(Rindex, Rindex, Interpreter::logStackElementSize); 2055 subf(Rindex, Rindex, R18_locals); 2056 stfd(Rvalue, -8, Rindex); 2057} 2058 2059// Read pending exception from thread and jump to interpreter. 2060// Throw exception entry if one if pending. Fall through otherwise. 2061void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, Register Rscratch2) { 2062 assert_different_registers(Rscratch1, Rscratch2, R3); 2063 Register Rexception = Rscratch1; 2064 Register Rtmp = Rscratch2; 2065 Label Ldone; 2066 // Get pending exception oop. 2067 ld(Rexception, thread_(pending_exception)); 2068 cmpdi(CCR0, Rexception, 0); 2069 beq(CCR0, Ldone); 2070 li(Rtmp, 0); 2071 mr_if_needed(R3, Rexception); 2072 std(Rtmp, thread_(pending_exception)); // Clear exception in thread 2073 if (Interpreter::rethrow_exception_entry() != NULL) { 2074 // Already got entry address. 2075 load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry()); 2076 } else { 2077 // Dynamically load entry address. 2078 int simm16_rest = load_const_optimized(Rtmp, &Interpreter::_rethrow_exception_entry, R0, true); 2079 ld(Rtmp, simm16_rest, Rtmp); 2080 } 2081 mtctr(Rtmp); 2082 save_interpreter_state(Rtmp); 2083 bctr(); 2084 2085 align(32, 12); 2086 bind(Ldone); 2087} 2088 2089void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) { 2090 save_interpreter_state(R11_scratch1); 2091 2092 MacroAssembler::call_VM(oop_result, entry_point, false); 2093 2094 restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true); 2095 2096 check_and_handle_popframe(R11_scratch1); 2097 check_and_handle_earlyret(R11_scratch1); 2098 // Now check exceptions manually. 2099 if (check_exceptions) { 2100 check_and_forward_exception(R11_scratch1, R12_scratch2); 2101 } 2102} 2103 2104void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) { 2105 // ARG1 is reserved for the thread. 2106 mr_if_needed(R4_ARG2, arg_1); 2107 call_VM(oop_result, entry_point, check_exceptions); 2108} 2109 2110void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) { 2111 // ARG1 is reserved for the thread. 2112 mr_if_needed(R4_ARG2, arg_1); 2113 assert(arg_2 != R4_ARG2, "smashed argument"); 2114 mr_if_needed(R5_ARG3, arg_2); 2115 call_VM(oop_result, entry_point, check_exceptions); 2116} 2117 2118void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) { 2119 // ARG1 is reserved for the thread. 2120 mr_if_needed(R4_ARG2, arg_1); 2121 assert(arg_2 != R4_ARG2, "smashed argument"); 2122 mr_if_needed(R5_ARG3, arg_2); 2123 assert(arg_3 != R4_ARG2 && arg_3 != R5_ARG3, "smashed argument"); 2124 mr_if_needed(R6_ARG4, arg_3); 2125 call_VM(oop_result, entry_point, check_exceptions); 2126} 2127 2128void InterpreterMacroAssembler::save_interpreter_state(Register scratch) { 2129 ld(scratch, 0, R1_SP); 2130 std(R15_esp, _ijava_state_neg(esp), scratch); 2131 std(R14_bcp, _ijava_state_neg(bcp), scratch); 2132 std(R26_monitor, _ijava_state_neg(monitors), scratch); 2133 if (ProfileInterpreter) { std(R28_mdx, _ijava_state_neg(mdx), scratch); } 2134 // Other entries should be unchanged. 2135} 2136 2137void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only) { 2138 ld(scratch, 0, R1_SP); 2139 ld(R14_bcp, _ijava_state_neg(bcp), scratch); // Changed by VM code (exception). 2140 if (ProfileInterpreter) { ld(R28_mdx, _ijava_state_neg(mdx), scratch); } // Changed by VM code. 2141 if (!bcp_and_mdx_only) { 2142 // Following ones are Metadata. 2143 ld(R19_method, _ijava_state_neg(method), scratch); 2144 ld(R27_constPoolCache, _ijava_state_neg(cpoolCache), scratch); 2145 // Following ones are stack addresses and don't require reload. 2146 ld(R15_esp, _ijava_state_neg(esp), scratch); 2147 ld(R18_locals, _ijava_state_neg(locals), scratch); 2148 ld(R26_monitor, _ijava_state_neg(monitors), scratch); 2149 } 2150#ifdef ASSERT 2151 { 2152 Label Lok; 2153 subf(R0, R1_SP, scratch); 2154 cmpdi(CCR0, R0, frame::abi_reg_args_size + frame::ijava_state_size); 2155 bge(CCR0, Lok); 2156 stop("frame too small (restore istate)", 0x5432); 2157 bind(Lok); 2158 } 2159 { 2160 Label Lok; 2161 ld(R0, _ijava_state_neg(ijava_reserved), scratch); 2162 cmpdi(CCR0, R0, 0x5afe); 2163 beq(CCR0, Lok); 2164 stop("frame corrupted (restore istate)", 0x5afe); 2165 bind(Lok); 2166 } 2167#endif 2168} 2169 2170#endif // !CC_INTERP 2171 2172void InterpreterMacroAssembler::get_method_counters(Register method, 2173 Register Rcounters, 2174 Label& skip) { 2175 BLOCK_COMMENT("Load and ev. allocate counter object {"); 2176 Label has_counters; 2177 ld(Rcounters, in_bytes(Method::method_counters_offset()), method); 2178 cmpdi(CCR0, Rcounters, 0); 2179 bne(CCR0, has_counters); 2180 call_VM(noreg, CAST_FROM_FN_PTR(address, 2181 InterpreterRuntime::build_method_counters), method, false); 2182 ld(Rcounters, in_bytes(Method::method_counters_offset()), method); 2183 cmpdi(CCR0, Rcounters, 0); 2184 beq(CCR0, skip); // No MethodCounters, OutOfMemory. 2185 BLOCK_COMMENT("} Load and ev. allocate counter object"); 2186 2187 bind(has_counters); 2188} 2189 2190void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) { 2191 assert(UseCompiler, "incrementing must be useful"); 2192 Register invocation_count = iv_be_count; 2193 Register backedge_count = Rtmp_r0; 2194 int delta = InvocationCounter::count_increment; 2195 2196 // Load each counter in a register. 2197 // ld(inv_counter, Rtmp); 2198 // ld(be_counter, Rtmp2); 2199 int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() + 2200 InvocationCounter::counter_offset()); 2201 int be_counter_offset = in_bytes(MethodCounters::backedge_counter_offset() + 2202 InvocationCounter::counter_offset()); 2203 2204 BLOCK_COMMENT("Increment profiling counters {"); 2205 2206 // Load the backedge counter. 2207 lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int 2208 // Mask the backedge counter. 2209 Register tmp = invocation_count; 2210 li(tmp, InvocationCounter::count_mask_value); 2211 andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value. 2212 2213 // Load the invocation counter. 2214 lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int 2215 // Add the delta to the invocation counter and store the result. 2216 addi(invocation_count, invocation_count, delta); 2217 // Store value. 2218 stw(invocation_count, inv_counter_offset, Rcounters); 2219 2220 // Add invocation counter + backedge counter. 2221 add(iv_be_count, backedge_count, invocation_count); 2222 2223 // Note that this macro must leave the backedge_count + invocation_count in 2224 // register iv_be_count! 2225 BLOCK_COMMENT("} Increment profiling counters"); 2226} 2227 2228void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) { 2229 if (state == atos) { MacroAssembler::verify_oop(reg); } 2230} 2231 2232#ifndef CC_INTERP 2233// Local helper function for the verify_oop_or_return_address macro. 2234static bool verify_return_address(Method* m, int bci) { 2235#ifndef PRODUCT 2236 address pc = (address)(m->constMethod()) + in_bytes(ConstMethod::codes_offset()) + bci; 2237 // Assume it is a valid return address if it is inside m and is preceded by a jsr. 2238 if (!m->contains(pc)) return false; 2239 address jsr_pc; 2240 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2241 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2242 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2243 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2244#endif // PRODUCT 2245 return false; 2246} 2247 2248void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2249 if (VerifyFPU) { 2250 unimplemented("verfiyFPU"); 2251 } 2252} 2253 2254void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2255 if (!VerifyOops) return; 2256 2257 // The VM documentation for the astore[_wide] bytecode allows 2258 // the TOS to be not only an oop but also a return address. 2259 Label test; 2260 Label skip; 2261 // See if it is an address (in the current method): 2262 2263 const int log2_bytecode_size_limit = 16; 2264 srdi_(Rtmp, reg, log2_bytecode_size_limit); 2265 bne(CCR0, test); 2266 2267 address fd = CAST_FROM_FN_PTR(address, verify_return_address); 2268 const int nbytes_save = 11*8; // volatile gprs except R0 2269 save_volatile_gprs(R1_SP, -nbytes_save); // except R0 2270 save_LR_CR(Rtmp); // Save in old frame. 2271 push_frame_reg_args(nbytes_save, Rtmp); 2272 2273 load_const_optimized(Rtmp, fd, R0); 2274 mr_if_needed(R4_ARG2, reg); 2275 mr(R3_ARG1, R19_method); 2276 call_c(Rtmp); // call C 2277 2278 pop_frame(); 2279 restore_LR_CR(Rtmp); 2280 restore_volatile_gprs(R1_SP, -nbytes_save); // except R0 2281 b(skip); 2282 2283 // Perform a more elaborate out-of-line call. 2284 // Not an address; verify it: 2285 bind(test); 2286 verify_oop(reg); 2287 bind(skip); 2288} 2289#endif // !CC_INTERP 2290 2291// Inline assembly for: 2292// 2293// if (thread is in interp_only_mode) { 2294// InterpreterRuntime::post_method_entry(); 2295// } 2296// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) || 2297// *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2) ) { 2298// SharedRuntime::jvmpi_method_entry(method, receiver); 2299// } 2300void InterpreterMacroAssembler::notify_method_entry() { 2301 // JVMTI 2302 // Whenever JVMTI puts a thread in interp_only_mode, method 2303 // entry/exit events are sent for that thread to track stack 2304 // depth. If it is possible to enter interp_only_mode we add 2305 // the code to check if the event should be sent. 2306 if (JvmtiExport::can_post_interpreter_events()) { 2307 Label jvmti_post_done; 2308 2309 lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 2310 cmpwi(CCR0, R0, 0); 2311 beq(CCR0, jvmti_post_done); 2312 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry), 2313 /*check_exceptions=*/true CC_INTERP_ONLY(&& false)); 2314 2315 bind(jvmti_post_done); 2316 } 2317} 2318 2319// Inline assembly for: 2320// 2321// if (thread is in interp_only_mode) { 2322// // save result 2323// InterpreterRuntime::post_method_exit(); 2324// // restore result 2325// } 2326// if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) { 2327// // save result 2328// SharedRuntime::jvmpi_method_exit(); 2329// // restore result 2330// } 2331// 2332// Native methods have their result stored in d_tmp and l_tmp. 2333// Java methods have their result stored in the expression stack. 2334void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state, 2335 NotifyMethodExitMode mode, bool check_exceptions) { 2336 // JVMTI 2337 // Whenever JVMTI puts a thread in interp_only_mode, method 2338 // entry/exit events are sent for that thread to track stack 2339 // depth. If it is possible to enter interp_only_mode we add 2340 // the code to check if the event should be sent. 2341 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2342 Label jvmti_post_done; 2343 2344 lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread); 2345 cmpwi(CCR0, R0, 0); 2346 beq(CCR0, jvmti_post_done); 2347 CC_INTERP_ONLY(assert(is_native_method && !check_exceptions, "must not push state")); 2348 if (!is_native_method) push(state); // Expose tos to GC. 2349 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), 2350 /*check_exceptions=*/check_exceptions); 2351 if (!is_native_method) pop(state); 2352 2353 align(32, 12); 2354 bind(jvmti_post_done); 2355 } 2356 2357 // Dtrace support not implemented. 2358} 2359 2360#ifdef CC_INTERP 2361// Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME 2362// (using parent_frame_resize) and push a new interpreter 2363// TOP_IJAVA_FRAME (using frame_size). 2364void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize, 2365 Register tmp1, Register tmp2, Register tmp3, 2366 Register tmp4, Register pc) { 2367 assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4); 2368 ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 2369 mr(tmp2/*top_frame_sp*/, R1_SP); 2370 // Move initial_caller_sp. 2371 ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP); 2372 neg(parent_frame_resize, parent_frame_resize); 2373 resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3); 2374 2375 // Set LR in new parent frame. 2376 std(tmp1, _abi(lr), R1_SP); 2377 // Set top_frame_sp info for new parent frame. 2378 std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP); 2379 std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); 2380 2381 // Push new TOP_IJAVA_FRAME. 2382 push_frame(top_frame_size, tmp2); 2383 2384 get_PC_trash_LR(tmp3); 2385 std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 2386 // Used for non-initial callers by unextended_sp(). 2387 std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP); 2388} 2389 2390// Pop the topmost TOP_IJAVA_FRAME and convert the previous 2391// PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME. 2392void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 2393 assert_different_registers(tmp1, tmp2, tmp3, tmp4); 2394 2395 ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP); 2396 ld(tmp3, _abi(lr), tmp1); 2397 2398 ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1); 2399 2400 ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1); 2401 // Merge top frame. 2402 std(tmp2, _abi(callers_sp), R1_SP); 2403 2404 ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1); 2405 2406 // Update C stack pointer to caller's top_abi. 2407 resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/); 2408 2409 // Update LR in top_frame. 2410 std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 2411 2412 std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP); 2413 2414 // Store the top-frame stack-pointer for c2i adapters. 2415 std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP); 2416} 2417 2418// Turn state's interpreter frame into the current TOP_IJAVA_FRAME. 2419void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) { 2420 assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3); 2421 2422 if (state == R14_state) { 2423 ld(tmp1/*state's fp*/, state_(_last_Java_fp)); 2424 ld(tmp2/*state's sp*/, state_(_last_Java_sp)); 2425 } else if (state == R15_prev_state) { 2426 ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp)); 2427 ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp)); 2428 } else { 2429 ShouldNotReachHere(); 2430 } 2431 2432 // Merge top frames. 2433 std(tmp1, _abi(callers_sp), R1_SP); 2434 2435 // Tmp2 is new SP. 2436 // Tmp1 is parent's SP. 2437 resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/); 2438 2439 // Update LR in top_frame. 2440 // Must be interpreter frame. 2441 get_PC_trash_LR(tmp3); 2442 std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP); 2443 // Used for non-initial callers by unextended_sp(). 2444 std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP); 2445} 2446 2447// Set SP to initial caller's sp, but before fix the back chain. 2448void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) { 2449 ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP); 2450 ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP); 2451 std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ... 2452 mr(R1_SP, tmp1); // ... and resize to initial caller. 2453} 2454 2455// Pop the current interpreter state (without popping the correspoding 2456// frame) and restore R14_state and R15_prev_state accordingly. 2457// Use prev_state_may_be_0 to indicate whether prev_state may be 0 2458// in order to generate an extra check before retrieving prev_state_(_prev_link). 2459void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0) 2460{ 2461 // Move prev_state to state and restore prev_state from state_(_prev_link). 2462 Label prev_state_is_0; 2463 mr(R14_state, R15_prev_state); 2464 2465 // Don't retrieve /*state==*/prev_state_(_prev_link) 2466 // if /*state==*/prev_state is 0. 2467 if (prev_state_may_be_0) { 2468 cmpdi(CCR0, R15_prev_state, 0); 2469 beq(CCR0, prev_state_is_0); 2470 } 2471 2472 ld(R15_prev_state, /*state==*/prev_state_(_prev_link)); 2473 bind(prev_state_is_0); 2474} 2475 2476void InterpreterMacroAssembler::restore_prev_state() { 2477 // _prev_link is private, but cInterpreter is a friend. 2478 ld(R15_prev_state, state_(_prev_link)); 2479} 2480#endif // CC_INTERP 2481