interp_masm_sparc.cpp revision 3602:da91efe96a93
1/* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "interp_masm_sparc.hpp" 27#include "interpreter/interpreter.hpp" 28#include "interpreter/interpreterRuntime.hpp" 29#include "oops/arrayOop.hpp" 30#include "oops/markOop.hpp" 31#include "oops/methodData.hpp" 32#include "oops/method.hpp" 33#include "prims/jvmtiExport.hpp" 34#include "prims/jvmtiRedefineClassesTrace.hpp" 35#include "prims/jvmtiThreadState.hpp" 36#include "runtime/basicLock.hpp" 37#include "runtime/biasedLocking.hpp" 38#include "runtime/sharedRuntime.hpp" 39#ifdef TARGET_OS_FAMILY_linux 40# include "thread_linux.inline.hpp" 41#endif 42#ifdef TARGET_OS_FAMILY_solaris 43# include "thread_solaris.inline.hpp" 44#endif 45 46#ifndef CC_INTERP 47#ifndef FAST_DISPATCH 48#define FAST_DISPATCH 1 49#endif 50#undef FAST_DISPATCH 51 52// Implementation of InterpreterMacroAssembler 53 54// This file specializes the assember with interpreter-specific macros 55 56const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS); 57const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS); 58 59#else // CC_INTERP 60#ifndef STATE 61#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name)) 62#endif // STATE 63 64#endif // CC_INTERP 65 66void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) { 67 // Note: this algorithm is also used by C1's OSR entry sequence. 68 // Any changes should also be applied to CodeEmitter::emit_osr_entry(). 69 assert_different_registers(args_size, locals_size); 70 // max_locals*2 for TAGS. Assumes that args_size has already been adjusted. 71 subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words 72 // Use br/mov combination because it works on both V8 and V9 and is 73 // faster. 74 Label skip_move; 75 br(Assembler::negative, true, Assembler::pt, skip_move); 76 delayed()->mov(G0, delta); 77 bind(skip_move); 78 round_to(delta, WordsPerLong); // make multiple of 2 (SP must be 2-word aligned) 79 sll(delta, LogBytesPerWord, delta); // extra space for locals in bytes 80} 81 82#ifndef CC_INTERP 83 84// Dispatch code executed in the prolog of a bytecode which does not do it's 85// own dispatch. The dispatch address is computed and placed in IdispatchAddress 86void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) { 87 assert_not_delayed(); 88#ifdef FAST_DISPATCH 89 // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since 90 // they both use I2. 91 assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive"); 92 ldub(Lbcp, bcp_incr, Lbyte_code); // load next bytecode 93 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 94 // add offset to correct dispatch table 95 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 96 ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr 97#else 98 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 99 // dispatch table to use 100 AddressLiteral tbl(Interpreter::dispatch_table(state)); 101 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 102 set(tbl, G3_scratch); // compute addr of table 103 ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress); // get entry addr 104#endif 105} 106 107 108// Dispatch code executed in the epilog of a bytecode which does not do it's 109// own dispatch. The dispatch address in IdispatchAddress is used for the 110// dispatch. 111void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) { 112 assert_not_delayed(); 113 verify_FPU(1, state); 114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 115 jmp( IdispatchAddress, 0 ); 116 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 117 else delayed()->nop(); 118} 119 120 121void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) { 122 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 123 assert_not_delayed(); 124 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 125 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr); 126} 127 128 129void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) { 130 // %%%% consider branching to a single shared dispatch stub (for each bcp_incr) 131 assert_not_delayed(); 132 ldub( Lbcp, bcp_incr, Lbyte_code); // load next bytecode 133 dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false); 134} 135 136 137void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) { 138 // load current bytecode 139 assert_not_delayed(); 140 ldub( Lbcp, 0, Lbyte_code); // load next bytecode 141 dispatch_base(state, table); 142} 143 144 145void InterpreterMacroAssembler::call_VM_leaf_base( 146 Register java_thread, 147 address entry_point, 148 int number_of_arguments 149) { 150 if (!java_thread->is_valid()) 151 java_thread = L7_thread_cache; 152 // super call 153 MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments); 154} 155 156 157void InterpreterMacroAssembler::call_VM_base( 158 Register oop_result, 159 Register java_thread, 160 Register last_java_sp, 161 address entry_point, 162 int number_of_arguments, 163 bool check_exception 164) { 165 if (!java_thread->is_valid()) 166 java_thread = L7_thread_cache; 167 // See class ThreadInVMfromInterpreter, which assumes that the interpreter 168 // takes responsibility for setting its own thread-state on call-out. 169 // However, ThreadInVMfromInterpreter resets the state to "in_Java". 170 171 //save_bcp(); // save bcp 172 MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception); 173 //restore_bcp(); // restore bcp 174 //restore_locals(); // restore locals pointer 175} 176 177 178void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) { 179 if (JvmtiExport::can_pop_frame()) { 180 Label L; 181 182 // Check the "pending popframe condition" flag in the current thread 183 ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg); 184 185 // Initiate popframe handling only if it is not already being processed. If the flag 186 // has the popframe_processing bit set, it means that this code is called *during* popframe 187 // handling - we don't want to reenter. 188 btst(JavaThread::popframe_pending_bit, scratch_reg); 189 br(zero, false, pt, L); 190 delayed()->nop(); 191 btst(JavaThread::popframe_processing_bit, scratch_reg); 192 br(notZero, false, pt, L); 193 delayed()->nop(); 194 195 // Call Interpreter::remove_activation_preserving_args_entry() to get the 196 // address of the same-named entrypoint in the generated interpreter code. 197 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry)); 198 199 // Jump to Interpreter::_remove_activation_preserving_args_entry 200 jmpl(O0, G0, G0); 201 delayed()->nop(); 202 bind(L); 203 } 204} 205 206 207void InterpreterMacroAssembler::load_earlyret_value(TosState state) { 208 Register thr_state = G4_scratch; 209 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 210 const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset()); 211 const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset()); 212 const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset()); 213 switch (state) { 214 case ltos: ld_long(val_addr, Otos_l); break; 215 case atos: ld_ptr(oop_addr, Otos_l); 216 st_ptr(G0, oop_addr); break; 217 case btos: // fall through 218 case ctos: // fall through 219 case stos: // fall through 220 case itos: ld(val_addr, Otos_l1); break; 221 case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break; 222 case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break; 223 case vtos: /* nothing to do */ break; 224 default : ShouldNotReachHere(); 225 } 226 // Clean up tos value in the jvmti thread state 227 or3(G0, ilgl, G3_scratch); 228 stw(G3_scratch, tos_addr); 229 st_long(G0, val_addr); 230 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 231} 232 233 234void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { 235 if (JvmtiExport::can_force_early_return()) { 236 Label L; 237 Register thr_state = G3_scratch; 238 ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); 239 br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; 240 241 // Initiate earlyret handling only if it is not already being processed. 242 // If the flag has the earlyret_processing bit set, it means that this code 243 // is called *during* earlyret handling - we don't want to reenter. 244 ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); 245 cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); 246 247 // Call Interpreter::remove_activation_early_entry() to get the address of the 248 // same-named entrypoint in the generated interpreter code 249 ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); 250 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1); 251 252 // Jump to Interpreter::_remove_activation_early_entry 253 jmpl(O0, G0, G0); 254 delayed()->nop(); 255 bind(L); 256 } 257} 258 259 260void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) { 261 mov(arg_1, O0); 262 mov(arg_2, O1); 263 MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2); 264} 265#endif /* CC_INTERP */ 266 267 268#ifndef CC_INTERP 269 270void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) { 271 assert_not_delayed(); 272 dispatch_Lbyte_code(state, table); 273} 274 275 276void InterpreterMacroAssembler::dispatch_normal(TosState state) { 277 dispatch_base(state, Interpreter::normal_table(state)); 278} 279 280 281void InterpreterMacroAssembler::dispatch_only(TosState state) { 282 dispatch_base(state, Interpreter::dispatch_table(state)); 283} 284 285 286// common code to dispatch and dispatch_only 287// dispatch value in Lbyte_code and increment Lbcp 288 289void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) { 290 verify_FPU(1, state); 291 // %%%%% maybe implement +VerifyActivationFrameSize here 292 //verify_thread(); //too slow; we will just verify on method entry & exit 293 if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 294#ifdef FAST_DISPATCH 295 if (table == Interpreter::dispatch_table(state)) { 296 // use IdispatchTables 297 add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code); 298 // add offset to correct dispatch table 299 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 300 ld_ptr(IdispatchTables, Lbyte_code, G3_scratch); // get entry addr 301 } else { 302#endif 303 // dispatch table to use 304 AddressLiteral tbl(table); 305 sll(Lbyte_code, LogBytesPerWord, Lbyte_code); // multiply by wordSize 306 set(tbl, G3_scratch); // compute addr of table 307 ld_ptr(G3_scratch, Lbyte_code, G3_scratch); // get entry addr 308#ifdef FAST_DISPATCH 309 } 310#endif 311 jmp( G3_scratch, 0 ); 312 if (bcp_incr != 0) delayed()->inc(Lbcp, bcp_incr); 313 else delayed()->nop(); 314} 315 316 317// Helpers for expression stack 318 319// Longs and doubles are Category 2 computational types in the 320// JVM specification (section 3.11.1) and take 2 expression stack or 321// local slots. 322// Aligning them on 32 bit with tagged stacks is hard because the code generated 323// for the dup* bytecodes depends on what types are already on the stack. 324// If the types are split into the two stack/local slots, that is much easier 325// (and we can use 0 for non-reference tags). 326 327// Known good alignment in _LP64 but unknown otherwise 328void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) { 329 assert_not_delayed(); 330 331#ifdef _LP64 332 ldf(FloatRegisterImpl::D, r1, offset, d); 333#else 334 ldf(FloatRegisterImpl::S, r1, offset, d); 335 ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize, d->successor()); 336#endif 337} 338 339// Known good alignment in _LP64 but unknown otherwise 340void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) { 341 assert_not_delayed(); 342 343#ifdef _LP64 344 stf(FloatRegisterImpl::D, d, r1, offset); 345 // store something more useful here 346 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 347#else 348 stf(FloatRegisterImpl::S, d, r1, offset); 349 stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize); 350#endif 351} 352 353 354// Known good alignment in _LP64 but unknown otherwise 355void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) { 356 assert_not_delayed(); 357#ifdef _LP64 358 ldx(r1, offset, rd); 359#else 360 ld(r1, offset, rd); 361 ld(r1, offset + Interpreter::stackElementSize, rd->successor()); 362#endif 363} 364 365// Known good alignment in _LP64 but unknown otherwise 366void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) { 367 assert_not_delayed(); 368 369#ifdef _LP64 370 stx(l, r1, offset); 371 // store something more useful here 372 debug_only(stx(G0, r1, offset+Interpreter::stackElementSize);) 373#else 374 st(l, r1, offset); 375 st(l->successor(), r1, offset + Interpreter::stackElementSize); 376#endif 377} 378 379void InterpreterMacroAssembler::pop_i(Register r) { 380 assert_not_delayed(); 381 ld(Lesp, Interpreter::expr_offset_in_bytes(0), r); 382 inc(Lesp, Interpreter::stackElementSize); 383 debug_only(verify_esp(Lesp)); 384} 385 386void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) { 387 assert_not_delayed(); 388 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r); 389 inc(Lesp, Interpreter::stackElementSize); 390 debug_only(verify_esp(Lesp)); 391} 392 393void InterpreterMacroAssembler::pop_l(Register r) { 394 assert_not_delayed(); 395 load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r); 396 inc(Lesp, 2*Interpreter::stackElementSize); 397 debug_only(verify_esp(Lesp)); 398} 399 400 401void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) { 402 assert_not_delayed(); 403 ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f); 404 inc(Lesp, Interpreter::stackElementSize); 405 debug_only(verify_esp(Lesp)); 406} 407 408 409void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) { 410 assert_not_delayed(); 411 load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f); 412 inc(Lesp, 2*Interpreter::stackElementSize); 413 debug_only(verify_esp(Lesp)); 414} 415 416 417void InterpreterMacroAssembler::push_i(Register r) { 418 assert_not_delayed(); 419 debug_only(verify_esp(Lesp)); 420 st(r, Lesp, 0); 421 dec(Lesp, Interpreter::stackElementSize); 422} 423 424void InterpreterMacroAssembler::push_ptr(Register r) { 425 assert_not_delayed(); 426 st_ptr(r, Lesp, 0); 427 dec(Lesp, Interpreter::stackElementSize); 428} 429 430// remember: our convention for longs in SPARC is: 431// O0 (Otos_l1) has high-order part in first word, 432// O1 (Otos_l2) has low-order part in second word 433 434void InterpreterMacroAssembler::push_l(Register r) { 435 assert_not_delayed(); 436 debug_only(verify_esp(Lesp)); 437 // Longs are stored in memory-correct order, even if unaligned. 438 int offset = -Interpreter::stackElementSize; 439 store_unaligned_long(r, Lesp, offset); 440 dec(Lesp, 2 * Interpreter::stackElementSize); 441} 442 443 444void InterpreterMacroAssembler::push_f(FloatRegister f) { 445 assert_not_delayed(); 446 debug_only(verify_esp(Lesp)); 447 stf(FloatRegisterImpl::S, f, Lesp, 0); 448 dec(Lesp, Interpreter::stackElementSize); 449} 450 451 452void InterpreterMacroAssembler::push_d(FloatRegister d) { 453 assert_not_delayed(); 454 debug_only(verify_esp(Lesp)); 455 // Longs are stored in memory-correct order, even if unaligned. 456 int offset = -Interpreter::stackElementSize; 457 store_unaligned_double(d, Lesp, offset); 458 dec(Lesp, 2 * Interpreter::stackElementSize); 459} 460 461 462void InterpreterMacroAssembler::push(TosState state) { 463 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 464 switch (state) { 465 case atos: push_ptr(); break; 466 case btos: push_i(); break; 467 case ctos: 468 case stos: push_i(); break; 469 case itos: push_i(); break; 470 case ltos: push_l(); break; 471 case ftos: push_f(); break; 472 case dtos: push_d(); break; 473 case vtos: /* nothing to do */ break; 474 default : ShouldNotReachHere(); 475 } 476} 477 478 479void InterpreterMacroAssembler::pop(TosState state) { 480 switch (state) { 481 case atos: pop_ptr(); break; 482 case btos: pop_i(); break; 483 case ctos: 484 case stos: pop_i(); break; 485 case itos: pop_i(); break; 486 case ltos: pop_l(); break; 487 case ftos: pop_f(); break; 488 case dtos: pop_d(); break; 489 case vtos: /* nothing to do */ break; 490 default : ShouldNotReachHere(); 491 } 492 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 493} 494 495 496// Helpers for swap and dup 497void InterpreterMacroAssembler::load_ptr(int n, Register val) { 498 ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val); 499} 500void InterpreterMacroAssembler::store_ptr(int n, Register val) { 501 st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n)); 502} 503 504 505void InterpreterMacroAssembler::load_receiver(Register param_count, 506 Register recv) { 507 sll(param_count, Interpreter::logStackElementSize, param_count); 508 ld_ptr(Lesp, param_count, recv); // gets receiver oop 509} 510 511void InterpreterMacroAssembler::empty_expression_stack() { 512 // Reset Lesp. 513 sub( Lmonitors, wordSize, Lesp ); 514 515 // Reset SP by subtracting more space from Lesp. 516 Label done; 517 assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!"); 518 519 // A native does not need to do this, since its callee does not change SP. 520 ld(Lmethod, Method::access_flags_offset(), Gframe_size); // Load access flags. 521 btst(JVM_ACC_NATIVE, Gframe_size); 522 br(Assembler::notZero, false, Assembler::pt, done); 523 delayed()->nop(); 524 525 // Compute max expression stack+register save area 526 lduh(Lmethod, in_bytes(Method::max_stack_offset()), Gframe_size); // Load max stack. 527 add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size ); 528 529 // 530 // now set up a stack frame with the size computed above 531 // 532 //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below 533 sll( Gframe_size, LogBytesPerWord, Gframe_size ); 534 sub( Lesp, Gframe_size, Gframe_size ); 535 and3( Gframe_size, -(2 * wordSize), Gframe_size ); // align SP (downwards) to an 8/16-byte boundary 536 debug_only(verify_sp(Gframe_size, G4_scratch)); 537#ifdef _LP64 538 sub(Gframe_size, STACK_BIAS, Gframe_size ); 539#endif 540 mov(Gframe_size, SP); 541 542 bind(done); 543} 544 545 546#ifdef ASSERT 547void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) { 548 Label Bad, OK; 549 550 // Saved SP must be aligned. 551#ifdef _LP64 552 btst(2*BytesPerWord-1, Rsp); 553#else 554 btst(LongAlignmentMask, Rsp); 555#endif 556 br(Assembler::notZero, false, Assembler::pn, Bad); 557 delayed()->nop(); 558 559 // Saved SP, plus register window size, must not be above FP. 560 add(Rsp, frame::register_save_words * wordSize, Rtemp); 561#ifdef _LP64 562 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP 563#endif 564 cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); 565 566 // Saved SP must not be ridiculously below current SP. 567 size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); 568 set(maxstack, Rtemp); 569 sub(SP, Rtemp, Rtemp); 570#ifdef _LP64 571 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp 572#endif 573 cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); 574 575 ba_short(OK); 576 577 bind(Bad); 578 stop("on return to interpreted call, restored SP is corrupted"); 579 580 bind(OK); 581} 582 583 584void InterpreterMacroAssembler::verify_esp(Register Resp) { 585 // about to read or write Resp[0] 586 // make sure it is not in the monitors or the register save area 587 Label OK1, OK2; 588 589 cmp(Resp, Lmonitors); 590 brx(Assembler::lessUnsigned, true, Assembler::pt, OK1); 591 delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp); 592 stop("too many pops: Lesp points into monitor area"); 593 bind(OK1); 594#ifdef _LP64 595 sub(Resp, STACK_BIAS, Resp); 596#endif 597 cmp(Resp, SP); 598 brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2); 599 delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp); 600 stop("too many pushes: Lesp points into register window"); 601 bind(OK2); 602} 603#endif // ASSERT 604 605// Load compiled (i2c) or interpreter entry when calling from interpreted and 606// do the call. Centralized so that all interpreter calls will do the same actions. 607// If jvmti single stepping is on for a thread we must not call compiled code. 608void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) { 609 610 // Assume we want to go compiled if available 611 612 ld_ptr(G5_method, in_bytes(Method::from_interpreted_offset()), target); 613 614 if (JvmtiExport::can_post_interpreter_events()) { 615 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 616 // compiled code in threads for which the event is enabled. Check here for 617 // interp_only_mode if these events CAN be enabled. 618 verify_thread(); 619 Label skip_compiled_code; 620 621 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 622 ld(interp_only, scratch); 623 cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); 624 delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 625 bind(skip_compiled_code); 626 } 627 628 // the i2c_adapters need Method* in G5_method (right? %%%) 629 // do the call 630#ifdef ASSERT 631 { 632 Label ok; 633 br_notnull_short(target, Assembler::pt, ok); 634 stop("null entry point"); 635 bind(ok); 636 } 637#endif // ASSERT 638 639 // Adjust Rret first so Llast_SP can be same as Rret 640 add(Rret, -frame::pc_return_offset, O7); 641 add(Lesp, BytesPerWord, Gargs); // setup parameter pointer 642 // Record SP so we can remove any stack space allocated by adapter transition 643 jmp(target, 0); 644 delayed()->mov(SP, Llast_SP); 645} 646 647void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) { 648 assert_not_delayed(); 649 650 Label not_taken; 651 if (ptr_compare) brx(cc, false, Assembler::pn, not_taken); 652 else br (cc, false, Assembler::pn, not_taken); 653 delayed()->nop(); 654 655 TemplateTable::branch(false,false); 656 657 bind(not_taken); 658 659 profile_not_taken_branch(G3_scratch); 660} 661 662 663void InterpreterMacroAssembler::get_2_byte_integer_at_bcp( 664 int bcp_offset, 665 Register Rtmp, 666 Register Rdst, 667 signedOrNot is_signed, 668 setCCOrNot should_set_CC ) { 669 assert(Rtmp != Rdst, "need separate temp register"); 670 assert_not_delayed(); 671 switch (is_signed) { 672 default: ShouldNotReachHere(); 673 674 case Signed: ldsb( Lbcp, bcp_offset, Rdst ); break; // high byte 675 case Unsigned: ldub( Lbcp, bcp_offset, Rdst ); break; // high byte 676 } 677 ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte 678 sll( Rdst, BitsPerByte, Rdst); 679 switch (should_set_CC ) { 680 default: ShouldNotReachHere(); 681 682 case set_CC: orcc( Rdst, Rtmp, Rdst ); break; 683 case dont_set_CC: or3( Rdst, Rtmp, Rdst ); break; 684 } 685} 686 687 688void InterpreterMacroAssembler::get_4_byte_integer_at_bcp( 689 int bcp_offset, 690 Register Rtmp, 691 Register Rdst, 692 setCCOrNot should_set_CC ) { 693 assert(Rtmp != Rdst, "need separate temp register"); 694 assert_not_delayed(); 695 add( Lbcp, bcp_offset, Rtmp); 696 andcc( Rtmp, 3, G0); 697 Label aligned; 698 switch (should_set_CC ) { 699 default: ShouldNotReachHere(); 700 701 case set_CC: break; 702 case dont_set_CC: break; 703 } 704 705 br(Assembler::zero, true, Assembler::pn, aligned); 706#ifdef _LP64 707 delayed()->ldsw(Rtmp, 0, Rdst); 708#else 709 delayed()->ld(Rtmp, 0, Rdst); 710#endif 711 712 ldub(Lbcp, bcp_offset + 3, Rdst); 713 ldub(Lbcp, bcp_offset + 2, Rtmp); sll(Rtmp, 8, Rtmp); or3(Rtmp, Rdst, Rdst); 714 ldub(Lbcp, bcp_offset + 1, Rtmp); sll(Rtmp, 16, Rtmp); or3(Rtmp, Rdst, Rdst); 715#ifdef _LP64 716 ldsb(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 717#else 718 // Unsigned load is faster than signed on some implementations 719 ldub(Lbcp, bcp_offset + 0, Rtmp); sll(Rtmp, 24, Rtmp); 720#endif 721 or3(Rtmp, Rdst, Rdst ); 722 723 bind(aligned); 724 if (should_set_CC == set_CC) tst(Rdst); 725} 726 727void InterpreterMacroAssembler::get_cache_index_at_bcp(Register temp, Register index, 728 int bcp_offset, size_t index_size) { 729 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 730 if (index_size == sizeof(u2)) { 731 get_2_byte_integer_at_bcp(bcp_offset, temp, index, Unsigned); 732 } else if (index_size == sizeof(u4)) { 733 assert(EnableInvokeDynamic, "giant index used only for JSR 292"); 734 get_4_byte_integer_at_bcp(bcp_offset, temp, index); 735 assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line"); 736 xor3(index, -1, index); // convert to plain index 737 } else if (index_size == sizeof(u1)) { 738 ldub(Lbcp, bcp_offset, index); 739 } else { 740 ShouldNotReachHere(); 741 } 742} 743 744 745void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, 746 int bcp_offset, size_t index_size) { 747 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 748 assert_different_registers(cache, tmp); 749 assert_not_delayed(); 750 get_cache_index_at_bcp(cache, tmp, bcp_offset, index_size); 751 // convert from field index to ConstantPoolCacheEntry index and from 752 // word index to byte offset 753 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 754 add(LcpoolCache, tmp, cache); 755} 756 757 758void InterpreterMacroAssembler::get_cache_and_index_and_bytecode_at_bcp(Register cache, 759 Register temp, 760 Register bytecode, 761 int byte_no, 762 int bcp_offset, 763 size_t index_size) { 764 get_cache_and_index_at_bcp(cache, temp, bcp_offset, index_size); 765 ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset(), bytecode); 766 const int shift_count = (1 + byte_no) * BitsPerByte; 767 assert((byte_no == TemplateTable::f1_byte && shift_count == ConstantPoolCacheEntry::bytecode_1_shift) || 768 (byte_no == TemplateTable::f2_byte && shift_count == ConstantPoolCacheEntry::bytecode_2_shift), 769 "correct shift count"); 770 srl(bytecode, shift_count, bytecode); 771 assert(ConstantPoolCacheEntry::bytecode_1_mask == ConstantPoolCacheEntry::bytecode_2_mask, "common mask"); 772 and3(bytecode, ConstantPoolCacheEntry::bytecode_1_mask, bytecode); 773} 774 775 776void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 777 int bcp_offset, size_t index_size) { 778 assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 779 assert_different_registers(cache, tmp); 780 assert_not_delayed(); 781 if (index_size == sizeof(u2)) { 782 get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned); 783 } else { 784 ShouldNotReachHere(); // other sizes not supported here 785 } 786 // convert from field index to ConstantPoolCacheEntry index 787 // and from word index to byte offset 788 sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp); 789 // skip past the header 790 add(tmp, in_bytes(ConstantPoolCache::base_offset()), tmp); 791 // construct pointer to cache entry 792 add(LcpoolCache, tmp, cache); 793} 794 795 796// Load object from cpool->resolved_references(index) 797void InterpreterMacroAssembler::load_resolved_reference_at_index( 798 Register result, Register index) { 799 assert_different_registers(result, index); 800 assert_not_delayed(); 801 // convert from field index to resolved_references() index and from 802 // word index to byte offset. Since this is a java object, it can be compressed 803 Register tmp = index; // reuse 804 sll(index, LogBytesPerHeapOop, tmp); 805 get_constant_pool(result); 806 // load pointer for resolved_references[] objArray 807 ld_ptr(result, ConstantPool::resolved_references_offset_in_bytes(), result); 808 // JNIHandles::resolve(result) 809 ld_ptr(result, 0, result); 810 // Add in the index 811 add(result, tmp, result); 812 load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result); 813} 814 815 816// Generate a subtype check: branch to ok_is_subtype if sub_klass is 817// a subtype of super_klass. Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2. 818void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, 819 Register Rsuper_klass, 820 Register Rtmp1, 821 Register Rtmp2, 822 Register Rtmp3, 823 Label &ok_is_subtype ) { 824 Label not_subtype; 825 826 // Profile the not-null value's klass. 827 profile_typecheck(Rsub_klass, Rtmp1); 828 829 check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass, 830 Rtmp1, Rtmp2, 831 &ok_is_subtype, ¬_subtype, NULL); 832 833 check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass, 834 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg, 835 &ok_is_subtype, NULL); 836 837 bind(not_subtype); 838 profile_typecheck_failed(Rtmp1); 839} 840 841// Separate these two to allow for delay slot in middle 842// These are used to do a test and full jump to exception-throwing code. 843 844// %%%%% Could possibly reoptimize this by testing to see if could use 845// a single conditional branch (i.e. if span is small enough. 846// If you go that route, than get rid of the split and give up 847// on the delay-slot hack. 848 849void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition, 850 Label& ok ) { 851 assert_not_delayed(); 852 br(ok_condition, true, pt, ok); 853 // DELAY SLOT 854} 855 856void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition, 857 Label& ok ) { 858 assert_not_delayed(); 859 bp( ok_condition, true, Assembler::xcc, pt, ok); 860 // DELAY SLOT 861} 862 863void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition, 864 Label& ok ) { 865 assert_not_delayed(); 866 brx(ok_condition, true, pt, ok); 867 // DELAY SLOT 868} 869 870void InterpreterMacroAssembler::throw_if_not_2( address throw_entry_point, 871 Register Rscratch, 872 Label& ok ) { 873 assert(throw_entry_point != NULL, "entry point must be generated by now"); 874 AddressLiteral dest(throw_entry_point); 875 jump_to(dest, Rscratch); 876 delayed()->nop(); 877 bind(ok); 878} 879 880 881// And if you cannot use the delay slot, here is a shorthand: 882 883void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition, 884 address throw_entry_point, 885 Register Rscratch ) { 886 Label ok; 887 if (ok_condition != never) { 888 throw_if_not_1_icc( ok_condition, ok); 889 delayed()->nop(); 890 } 891 throw_if_not_2( throw_entry_point, Rscratch, ok); 892} 893void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition, 894 address throw_entry_point, 895 Register Rscratch ) { 896 Label ok; 897 if (ok_condition != never) { 898 throw_if_not_1_xcc( ok_condition, ok); 899 delayed()->nop(); 900 } 901 throw_if_not_2( throw_entry_point, Rscratch, ok); 902} 903void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition, 904 address throw_entry_point, 905 Register Rscratch ) { 906 Label ok; 907 if (ok_condition != never) { 908 throw_if_not_1_x( ok_condition, ok); 909 delayed()->nop(); 910 } 911 throw_if_not_2( throw_entry_point, Rscratch, ok); 912} 913 914// Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res 915// Note: res is still shy of address by array offset into object. 916 917void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) { 918 assert_not_delayed(); 919 920 verify_oop(array); 921#ifdef _LP64 922 // sign extend since tos (index) can be a 32bit value 923 sra(index, G0, index); 924#endif // _LP64 925 926 // check array 927 Label ptr_ok; 928 tst(array); 929 throw_if_not_1_x( notZero, ptr_ok ); 930 delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index 931 throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok); 932 933 Label index_ok; 934 cmp(index, tmp); 935 throw_if_not_1_icc( lessUnsigned, index_ok ); 936 if (index_shift > 0) delayed()->sll(index, index_shift, index); 937 else delayed()->add(array, index, res); // addr - const offset in index 938 // convention: move aberrant index into G3_scratch for exception message 939 mov(index, G3_scratch); 940 throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok); 941 942 // add offset if didn't do it in delay slot 943 if (index_shift > 0) add(array, index, res); // addr - const offset in index 944} 945 946 947void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) { 948 assert_not_delayed(); 949 950 // pop array 951 pop_ptr(array); 952 953 // check array 954 index_check_without_pop(array, index, index_shift, tmp, res); 955} 956 957 958void InterpreterMacroAssembler::get_const(Register Rdst) { 959 ld_ptr(Lmethod, in_bytes(Method::const_offset()), Rdst); 960} 961 962 963void InterpreterMacroAssembler::get_constant_pool(Register Rdst) { 964 get_const(Rdst); 965 ld_ptr(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst); 966} 967 968 969void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) { 970 get_constant_pool(Rdst); 971 ld_ptr(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst); 972} 973 974 975void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) { 976 get_constant_pool(Rcpool); 977 ld_ptr(Rcpool, ConstantPool::tags_offset_in_bytes(), Rtags); 978} 979 980 981// unlock if synchronized method 982// 983// Unlock the receiver if this is a synchronized method. 984// Unlock any Java monitors from syncronized blocks. 985// 986// If there are locked Java monitors 987// If throw_monitor_exception 988// throws IllegalMonitorStateException 989// Else if install_monitor_exception 990// installs IllegalMonitorStateException 991// Else 992// no error processing 993void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state, 994 bool throw_monitor_exception, 995 bool install_monitor_exception) { 996 Label unlocked, unlock, no_unlock; 997 998 // get the value of _do_not_unlock_if_synchronized into G1_scratch 999 const Address do_not_unlock_if_synchronized(G2_thread, 1000 JavaThread::do_not_unlock_if_synchronized_offset()); 1001 ldbool(do_not_unlock_if_synchronized, G1_scratch); 1002 stbool(G0, do_not_unlock_if_synchronized); // reset the flag 1003 1004 // check if synchronized method 1005 const Address access_flags(Lmethod, Method::access_flags_offset()); 1006 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1007 push(state); // save tos 1008 ld(access_flags, G3_scratch); // Load access flags. 1009 btst(JVM_ACC_SYNCHRONIZED, G3_scratch); 1010 br(zero, false, pt, unlocked); 1011 delayed()->nop(); 1012 1013 // Don't unlock anything if the _do_not_unlock_if_synchronized flag 1014 // is set. 1015 cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); 1016 delayed()->nop(); 1017 1018 // BasicObjectLock will be first in list, since this is a synchronized method. However, need 1019 // to check that the object has not been unlocked by an explicit monitorexit bytecode. 1020 1021 //Intel: if (throw_monitor_exception) ... else ... 1022 // Entry already unlocked, need to throw exception 1023 //... 1024 1025 // pass top-most monitor elem 1026 add( top_most_monitor(), O1 ); 1027 1028 ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); 1029 br_notnull_short(G3_scratch, pt, unlock); 1030 1031 if (throw_monitor_exception) { 1032 // Entry already unlocked need to throw an exception 1033 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1034 should_not_reach_here(); 1035 } else { 1036 // Monitor already unlocked during a stack unroll. 1037 // If requested, install an illegal_monitor_state_exception. 1038 // Continue with stack unrolling. 1039 if (install_monitor_exception) { 1040 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1041 } 1042 ba_short(unlocked); 1043 } 1044 1045 bind(unlock); 1046 1047 unlock_object(O1); 1048 1049 bind(unlocked); 1050 1051 // I0, I1: Might contain return value 1052 1053 // Check that all monitors are unlocked 1054 { Label loop, exception, entry, restart; 1055 1056 Register Rmptr = O0; 1057 Register Rtemp = O1; 1058 Register Rlimit = Lmonitors; 1059 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1060 assert( (delta & LongAlignmentMask) == 0, 1061 "sizeof BasicObjectLock must be even number of doublewords"); 1062 1063 #ifdef ASSERT 1064 add(top_most_monitor(), Rmptr, delta); 1065 { Label L; 1066 // ensure that Rmptr starts out above (or at) Rlimit 1067 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1068 stop("monitor stack has negative size"); 1069 bind(L); 1070 } 1071 #endif 1072 bind(restart); 1073 ba(entry); 1074 delayed()-> 1075 add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry 1076 1077 // Entry is still locked, need to throw exception 1078 bind(exception); 1079 if (throw_monitor_exception) { 1080 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); 1081 should_not_reach_here(); 1082 } else { 1083 // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception. 1084 // Unlock does not block, so don't have to worry about the frame 1085 unlock_object(Rmptr); 1086 if (install_monitor_exception) { 1087 MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); 1088 } 1089 ba_short(restart); 1090 } 1091 1092 bind(loop); 1093 cmp(Rtemp, G0); // check if current entry is used 1094 brx(Assembler::notEqual, false, pn, exception); 1095 delayed()-> 1096 dec(Rmptr, delta); // otherwise advance to next entry 1097 #ifdef ASSERT 1098 { Label L; 1099 // ensure that Rmptr has not somehow stepped below Rlimit 1100 cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); 1101 stop("ran off the end of the monitor stack"); 1102 bind(L); 1103 } 1104 #endif 1105 bind(entry); 1106 cmp(Rmptr, Rlimit); // check if bottom reached 1107 brx(Assembler::notEqual, true, pn, loop); // if not at bottom then check this entry 1108 delayed()-> 1109 ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp); 1110 } 1111 1112 bind(no_unlock); 1113 pop(state); 1114 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1115} 1116 1117 1118// remove activation 1119// 1120// Unlock the receiver if this is a synchronized method. 1121// Unlock any Java monitors from syncronized blocks. 1122// Remove the activation from the stack. 1123// 1124// If there are locked Java monitors 1125// If throw_monitor_exception 1126// throws IllegalMonitorStateException 1127// Else if install_monitor_exception 1128// installs IllegalMonitorStateException 1129// Else 1130// no error processing 1131void InterpreterMacroAssembler::remove_activation(TosState state, 1132 bool throw_monitor_exception, 1133 bool install_monitor_exception) { 1134 1135 unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception); 1136 1137 // save result (push state before jvmti call and pop it afterwards) and notify jvmti 1138 notify_method_exit(false, state, NotifyJVMTI); 1139 1140 interp_verify_oop(Otos_i, state, __FILE__, __LINE__); 1141 verify_thread(); 1142 1143 // return tos 1144 assert(Otos_l1 == Otos_i, "adjust code below"); 1145 switch (state) { 1146#ifdef _LP64 1147 case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0 1148#else 1149 case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through // O1 -> I1 1150#endif 1151 case btos: // fall through 1152 case ctos: 1153 case stos: // fall through 1154 case atos: // fall through 1155 case itos: mov(Otos_l1, Otos_l1->after_save()); break; // O0 -> I0 1156 case ftos: // fall through 1157 case dtos: // fall through 1158 case vtos: /* nothing to do */ break; 1159 default : ShouldNotReachHere(); 1160 } 1161 1162#if defined(COMPILER2) && !defined(_LP64) 1163 if (state == ltos) { 1164 // C2 expects long results in G1 we can't tell if we're returning to interpreted 1165 // or compiled so just be safe use G1 and O0/O1 1166 1167 // Shift bits into high (msb) of G1 1168 sllx(Otos_l1->after_save(), 32, G1); 1169 // Zero extend low bits 1170 srl (Otos_l2->after_save(), 0, Otos_l2->after_save()); 1171 or3 (Otos_l2->after_save(), G1, G1); 1172 } 1173#endif /* COMPILER2 */ 1174 1175} 1176#endif /* CC_INTERP */ 1177 1178 1179// Lock object 1180// 1181// Argument - lock_reg points to the BasicObjectLock to be used for locking, 1182// it must be initialized with the object to lock 1183void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) { 1184 if (UseHeavyMonitors) { 1185 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1186 } 1187 else { 1188 Register obj_reg = Object; 1189 Register mark_reg = G4_scratch; 1190 Register temp_reg = G1_scratch; 1191 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes()); 1192 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1193 Label done; 1194 1195 Label slow_case; 1196 1197 assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg); 1198 1199 // load markOop from object into mark_reg 1200 ld_ptr(mark_addr, mark_reg); 1201 1202 if (UseBiasedLocking) { 1203 biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case); 1204 } 1205 1206 // get the address of basicLock on stack that will be stored in the object 1207 // we need a temporary register here as we do not want to clobber lock_reg 1208 // (cas clobbers the destination register) 1209 mov(lock_reg, temp_reg); 1210 // set mark reg to be (markOop of object | UNLOCK_VALUE) 1211 or3(mark_reg, markOopDesc::unlocked_value, mark_reg); 1212 // initialize the box (Must happen before we update the object mark!) 1213 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1214 // compare and exchange object_addr, markOop | 1, stack address of basicLock 1215 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1216 casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 1217 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 1218 1219 // if the compare and exchange succeeded we are done (we saw an unlocked object) 1220 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 1221 1222 // We did not see an unlocked object so try the fast recursive case 1223 1224 // Check if owner is self by comparing the value in the markOop of object 1225 // with the stack pointer 1226 sub(temp_reg, SP, temp_reg); 1227#ifdef _LP64 1228 sub(temp_reg, STACK_BIAS, temp_reg); 1229#endif 1230 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); 1231 1232 // Composite "andcc" test: 1233 // (a) %sp -vs- markword proximity check, and, 1234 // (b) verify mark word LSBs == 0 (Stack-locked). 1235 // 1236 // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size()) 1237 // Note that the page size used for %sp proximity testing is arbitrary and is 1238 // unrelated to the actual MMU page size. We use a 'logical' page size of 1239 // 4096 bytes. F..FFF003 is designed to fit conveniently in the SIMM13 immediate 1240 // field of the andcc instruction. 1241 andcc (temp_reg, 0xFFFFF003, G0) ; 1242 1243 // if condition is true we are done and hence we can store 0 in the displaced 1244 // header indicating it is a recursive lock and be done 1245 brx(Assembler::zero, true, Assembler::pt, done); 1246 delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 1247 1248 // none of the above fast optimizations worked so we have to get into the 1249 // slow case of monitor enter 1250 bind(slow_case); 1251 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg); 1252 1253 bind(done); 1254 } 1255} 1256 1257// Unlocks an object. Used in monitorexit bytecode and remove_activation. 1258// 1259// Argument - lock_reg points to the BasicObjectLock for lock 1260// Throw IllegalMonitorException if object is not locked by current thread 1261void InterpreterMacroAssembler::unlock_object(Register lock_reg) { 1262 if (UseHeavyMonitors) { 1263 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1264 } else { 1265 Register obj_reg = G3_scratch; 1266 Register mark_reg = G4_scratch; 1267 Register displaced_header_reg = G1_scratch; 1268 Address lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes()); 1269 Address mark_addr(obj_reg, oopDesc::mark_offset_in_bytes()); 1270 Label done; 1271 1272 if (UseBiasedLocking) { 1273 // load the object out of the BasicObjectLock 1274 ld_ptr(lockobj_addr, obj_reg); 1275 biased_locking_exit(mark_addr, mark_reg, done, true); 1276 st_ptr(G0, lockobj_addr); // free entry 1277 } 1278 1279 // Test first if we are in the fast recursive case 1280 Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes()); 1281 ld_ptr(lock_addr, displaced_header_reg); 1282 br_null(displaced_header_reg, true, Assembler::pn, done); 1283 delayed()->st_ptr(G0, lockobj_addr); // free entry 1284 1285 // See if it is still a light weight lock, if so we just unlock 1286 // the object and we are done 1287 1288 if (!UseBiasedLocking) { 1289 // load the object out of the BasicObjectLock 1290 ld_ptr(lockobj_addr, obj_reg); 1291 } 1292 1293 // we have the displaced header in displaced_header_reg 1294 // we expect to see the stack address of the basicLock in case the 1295 // lock is still a light weight lock (lock_reg) 1296 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 1297 casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, 1298 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 1299 cmp(lock_reg, displaced_header_reg); 1300 brx(Assembler::equal, true, Assembler::pn, done); 1301 delayed()->st_ptr(G0, lockobj_addr); // free entry 1302 1303 // The lock has been converted into a heavy lock and hence 1304 // we need to get into the slow case 1305 1306 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg); 1307 1308 bind(done); 1309 } 1310} 1311 1312#ifndef CC_INTERP 1313 1314// Get the method data pointer from the Method* and set the 1315// specified register to its value. 1316 1317void InterpreterMacroAssembler::set_method_data_pointer() { 1318 assert(ProfileInterpreter, "must be profiling interpreter"); 1319 Label get_continue; 1320 1321 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1322 test_method_data_pointer(get_continue); 1323 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1324 bind(get_continue); 1325} 1326 1327// Set the method data pointer for the current bcp. 1328 1329void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() { 1330 assert(ProfileInterpreter, "must be profiling interpreter"); 1331 Label zero_continue; 1332 1333 // Test MDO to avoid the call if it is NULL. 1334 ld_ptr(Lmethod, in_bytes(Method::method_data_offset()), ImethodDataPtr); 1335 test_method_data_pointer(zero_continue); 1336 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp); 1337 add(ImethodDataPtr, in_bytes(MethodData::data_offset()), ImethodDataPtr); 1338 add(ImethodDataPtr, O0, ImethodDataPtr); 1339 bind(zero_continue); 1340} 1341 1342// Test ImethodDataPtr. If it is null, continue at the specified label 1343 1344void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { 1345 assert(ProfileInterpreter, "must be profiling interpreter"); 1346 br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); 1347} 1348 1349void InterpreterMacroAssembler::verify_method_data_pointer() { 1350 assert(ProfileInterpreter, "must be profiling interpreter"); 1351#ifdef ASSERT 1352 Label verify_continue; 1353 test_method_data_pointer(verify_continue); 1354 1355 // If the mdp is valid, it will point to a DataLayout header which is 1356 // consistent with the bcp. The converse is highly probable also. 1357 lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch); 1358 ld_ptr(Lmethod, Method::const_offset(), O5); 1359 add(G3_scratch, in_bytes(ConstMethod::codes_offset()), G3_scratch); 1360 add(G3_scratch, O5, G3_scratch); 1361 cmp(Lbcp, G3_scratch); 1362 brx(Assembler::equal, false, Assembler::pt, verify_continue); 1363 1364 Register temp_reg = O5; 1365 delayed()->mov(ImethodDataPtr, temp_reg); 1366 // %%% should use call_VM_leaf here? 1367 //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr); 1368 save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1); 1369 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 1370 stf(FloatRegisterImpl::D, Ftos_d, d_save); 1371 mov(temp_reg->after_save(), O2); 1372 save_thread(L7_thread_cache); 1373 call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none); 1374 delayed()->nop(); 1375 restore_thread(L7_thread_cache); 1376 ldf(FloatRegisterImpl::D, d_save, Ftos_d); 1377 restore(); 1378 bind(verify_continue); 1379#endif // ASSERT 1380} 1381 1382void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count, 1383 Register Rtmp, 1384 Label &profile_continue) { 1385 assert(ProfileInterpreter, "must be profiling interpreter"); 1386 // Control will flow to "profile_continue" if the counter is less than the 1387 // limit or if we call profile_method() 1388 1389 Label done; 1390 1391 // if no method data exists, and the counter is high enough, make one 1392 br_notnull_short(ImethodDataPtr, Assembler::pn, done); 1393 1394 // Test to see if we should create a method data oop 1395 AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); 1396 sethi(profile_limit, Rtmp); 1397 ld(Rtmp, profile_limit.low10(), Rtmp); 1398 cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue); 1399 1400 // Build it now. 1401 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1402 set_method_data_pointer_for_bcp(); 1403 ba_short(profile_continue); 1404 bind(done); 1405} 1406 1407// Store a value at some constant offset from the method data pointer. 1408 1409void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) { 1410 assert(ProfileInterpreter, "must be profiling interpreter"); 1411 st_ptr(value, ImethodDataPtr, constant); 1412} 1413 1414void InterpreterMacroAssembler::increment_mdp_data_at(Address counter, 1415 Register bumped_count, 1416 bool decrement) { 1417 assert(ProfileInterpreter, "must be profiling interpreter"); 1418 1419 // Load the counter. 1420 ld_ptr(counter, bumped_count); 1421 1422 if (decrement) { 1423 // Decrement the register. Set condition codes. 1424 subcc(bumped_count, DataLayout::counter_increment, bumped_count); 1425 1426 // If the decrement causes the counter to overflow, stay negative 1427 Label L; 1428 brx(Assembler::negative, true, Assembler::pn, L); 1429 1430 // Store the decremented counter, if it is still negative. 1431 delayed()->st_ptr(bumped_count, counter); 1432 bind(L); 1433 } else { 1434 // Increment the register. Set carry flag. 1435 addcc(bumped_count, DataLayout::counter_increment, bumped_count); 1436 1437 // If the increment causes the counter to overflow, pull back by 1. 1438 assert(DataLayout::counter_increment == 1, "subc works"); 1439 subc(bumped_count, G0, bumped_count); 1440 1441 // Store the incremented counter. 1442 st_ptr(bumped_count, counter); 1443 } 1444} 1445 1446// Increment the value at some constant offset from the method data pointer. 1447 1448void InterpreterMacroAssembler::increment_mdp_data_at(int constant, 1449 Register bumped_count, 1450 bool decrement) { 1451 // Locate the counter at a fixed offset from the mdp: 1452 Address counter(ImethodDataPtr, constant); 1453 increment_mdp_data_at(counter, bumped_count, decrement); 1454} 1455 1456// Increment the value at some non-fixed (reg + constant) offset from 1457// the method data pointer. 1458 1459void InterpreterMacroAssembler::increment_mdp_data_at(Register reg, 1460 int constant, 1461 Register bumped_count, 1462 Register scratch2, 1463 bool decrement) { 1464 // Add the constant to reg to get the offset. 1465 add(ImethodDataPtr, reg, scratch2); 1466 Address counter(scratch2, constant); 1467 increment_mdp_data_at(counter, bumped_count, decrement); 1468} 1469 1470// Set a flag value at the current method data pointer position. 1471// Updates a single byte of the header, to avoid races with other header bits. 1472 1473void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant, 1474 Register scratch) { 1475 assert(ProfileInterpreter, "must be profiling interpreter"); 1476 // Load the data header 1477 ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch); 1478 1479 // Set the flag 1480 or3(scratch, flag_constant, scratch); 1481 1482 // Store the modified header. 1483 stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset())); 1484} 1485 1486// Test the location at some offset from the method data pointer. 1487// If it is not equal to value, branch to the not_equal_continue Label. 1488// Set condition codes to match the nullness of the loaded value. 1489 1490void InterpreterMacroAssembler::test_mdp_data_at(int offset, 1491 Register value, 1492 Label& not_equal_continue, 1493 Register scratch) { 1494 assert(ProfileInterpreter, "must be profiling interpreter"); 1495 ld_ptr(ImethodDataPtr, offset, scratch); 1496 cmp(value, scratch); 1497 brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue); 1498 delayed()->tst(scratch); 1499} 1500 1501// Update the method data pointer by the displacement located at some fixed 1502// offset from the method data pointer. 1503 1504void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp, 1505 Register scratch) { 1506 assert(ProfileInterpreter, "must be profiling interpreter"); 1507 ld_ptr(ImethodDataPtr, offset_of_disp, scratch); 1508 add(ImethodDataPtr, scratch, ImethodDataPtr); 1509} 1510 1511// Update the method data pointer by the displacement located at the 1512// offset (reg + offset_of_disp). 1513 1514void InterpreterMacroAssembler::update_mdp_by_offset(Register reg, 1515 int offset_of_disp, 1516 Register scratch) { 1517 assert(ProfileInterpreter, "must be profiling interpreter"); 1518 add(reg, offset_of_disp, scratch); 1519 ld_ptr(ImethodDataPtr, scratch, scratch); 1520 add(ImethodDataPtr, scratch, ImethodDataPtr); 1521} 1522 1523// Update the method data pointer by a simple constant displacement. 1524 1525void InterpreterMacroAssembler::update_mdp_by_constant(int constant) { 1526 assert(ProfileInterpreter, "must be profiling interpreter"); 1527 add(ImethodDataPtr, constant, ImethodDataPtr); 1528} 1529 1530// Update the method data pointer for a _ret bytecode whose target 1531// was not among our cached targets. 1532 1533void InterpreterMacroAssembler::update_mdp_for_ret(TosState state, 1534 Register return_bci) { 1535 assert(ProfileInterpreter, "must be profiling interpreter"); 1536 push(state); 1537 st_ptr(return_bci, l_tmp); // protect return_bci, in case it is volatile 1538 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci); 1539 ld_ptr(l_tmp, return_bci); 1540 pop(state); 1541} 1542 1543// Count a taken branch in the bytecodes. 1544 1545void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) { 1546 if (ProfileInterpreter) { 1547 Label profile_continue; 1548 1549 // If no method data exists, go to profile_continue. 1550 test_method_data_pointer(profile_continue); 1551 1552 // We are taking a branch. Increment the taken count. 1553 increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count); 1554 1555 // The method data pointer needs to be updated to reflect the new target. 1556 update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch); 1557 bind (profile_continue); 1558 } 1559} 1560 1561 1562// Count a not-taken branch in the bytecodes. 1563 1564void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) { 1565 if (ProfileInterpreter) { 1566 Label profile_continue; 1567 1568 // If no method data exists, go to profile_continue. 1569 test_method_data_pointer(profile_continue); 1570 1571 // We are taking a branch. Increment the not taken count. 1572 increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch); 1573 1574 // The method data pointer needs to be updated to correspond to the 1575 // next bytecode. 1576 update_mdp_by_constant(in_bytes(BranchData::branch_data_size())); 1577 bind (profile_continue); 1578 } 1579} 1580 1581 1582// Count a non-virtual call in the bytecodes. 1583 1584void InterpreterMacroAssembler::profile_call(Register scratch) { 1585 if (ProfileInterpreter) { 1586 Label profile_continue; 1587 1588 // If no method data exists, go to profile_continue. 1589 test_method_data_pointer(profile_continue); 1590 1591 // We are making a call. Increment the count. 1592 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1593 1594 // The method data pointer needs to be updated to reflect the new target. 1595 update_mdp_by_constant(in_bytes(CounterData::counter_data_size())); 1596 bind (profile_continue); 1597 } 1598} 1599 1600 1601// Count a final call in the bytecodes. 1602 1603void InterpreterMacroAssembler::profile_final_call(Register scratch) { 1604 if (ProfileInterpreter) { 1605 Label profile_continue; 1606 1607 // If no method data exists, go to profile_continue. 1608 test_method_data_pointer(profile_continue); 1609 1610 // We are making a call. Increment the count. 1611 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1612 1613 // The method data pointer needs to be updated to reflect the new target. 1614 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1615 bind (profile_continue); 1616 } 1617} 1618 1619 1620// Count a virtual call in the bytecodes. 1621 1622void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 1623 Register scratch, 1624 bool receiver_can_be_null) { 1625 if (ProfileInterpreter) { 1626 Label profile_continue; 1627 1628 // If no method data exists, go to profile_continue. 1629 test_method_data_pointer(profile_continue); 1630 1631 1632 Label skip_receiver_profile; 1633 if (receiver_can_be_null) { 1634 Label not_null; 1635 br_notnull_short(receiver, Assembler::pt, not_null); 1636 // We are making a call. Increment the count for null receiver. 1637 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1638 ba_short(skip_receiver_profile); 1639 bind(not_null); 1640 } 1641 1642 // Record the receiver type. 1643 record_klass_in_profile(receiver, scratch, true); 1644 bind(skip_receiver_profile); 1645 1646 // The method data pointer needs to be updated to reflect the new target. 1647 update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size())); 1648 bind (profile_continue); 1649 } 1650} 1651 1652void InterpreterMacroAssembler::record_klass_in_profile_helper( 1653 Register receiver, Register scratch, 1654 int start_row, Label& done, bool is_virtual_call) { 1655 if (TypeProfileWidth == 0) { 1656 if (is_virtual_call) { 1657 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1658 } 1659 return; 1660 } 1661 1662 int last_row = VirtualCallData::row_limit() - 1; 1663 assert(start_row <= last_row, "must be work left to do"); 1664 // Test this row for both the receiver and for null. 1665 // Take any of three different outcomes: 1666 // 1. found receiver => increment count and goto done 1667 // 2. found null => keep looking for case 1, maybe allocate this cell 1668 // 3. found something else => keep looking for cases 1 and 2 1669 // Case 3 is handled by a recursive call. 1670 for (int row = start_row; row <= last_row; row++) { 1671 Label next_test; 1672 bool test_for_null_also = (row == start_row); 1673 1674 // See if the receiver is receiver[n]. 1675 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row)); 1676 test_mdp_data_at(recvr_offset, receiver, next_test, scratch); 1677 // delayed()->tst(scratch); 1678 1679 // The receiver is receiver[n]. Increment count[n]. 1680 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); 1681 increment_mdp_data_at(count_offset, scratch); 1682 ba_short(done); 1683 bind(next_test); 1684 1685 if (test_for_null_also) { 1686 Label found_null; 1687 // Failed the equality check on receiver[n]... Test for null. 1688 if (start_row == last_row) { 1689 // The only thing left to do is handle the null case. 1690 if (is_virtual_call) { 1691 brx(Assembler::zero, false, Assembler::pn, found_null); 1692 delayed()->nop(); 1693 // Receiver did not match any saved receiver and there is no empty row for it. 1694 // Increment total counter to indicate polymorphic case. 1695 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1696 ba_short(done); 1697 bind(found_null); 1698 } else { 1699 brx(Assembler::notZero, false, Assembler::pt, done); 1700 delayed()->nop(); 1701 } 1702 break; 1703 } 1704 // Since null is rare, make it be the branch-taken case. 1705 brx(Assembler::zero, false, Assembler::pn, found_null); 1706 delayed()->nop(); 1707 1708 // Put all the "Case 3" tests here. 1709 record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call); 1710 1711 // Found a null. Keep searching for a matching receiver, 1712 // but remember that this is an empty (unused) slot. 1713 bind(found_null); 1714 } 1715 } 1716 1717 // In the fall-through case, we found no matching receiver, but we 1718 // observed the receiver[start_row] is NULL. 1719 1720 // Fill in the receiver field and increment the count. 1721 int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row)); 1722 set_mdp_data_at(recvr_offset, receiver); 1723 int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); 1724 mov(DataLayout::counter_increment, scratch); 1725 set_mdp_data_at(count_offset, scratch); 1726 if (start_row > 0) { 1727 ba_short(done); 1728 } 1729} 1730 1731void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, 1732 Register scratch, bool is_virtual_call) { 1733 assert(ProfileInterpreter, "must be profiling"); 1734 Label done; 1735 1736 record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call); 1737 1738 bind (done); 1739} 1740 1741 1742// Count a ret in the bytecodes. 1743 1744void InterpreterMacroAssembler::profile_ret(TosState state, 1745 Register return_bci, 1746 Register scratch) { 1747 if (ProfileInterpreter) { 1748 Label profile_continue; 1749 uint row; 1750 1751 // If no method data exists, go to profile_continue. 1752 test_method_data_pointer(profile_continue); 1753 1754 // Update the total ret count. 1755 increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); 1756 1757 for (row = 0; row < RetData::row_limit(); row++) { 1758 Label next_test; 1759 1760 // See if return_bci is equal to bci[n]: 1761 test_mdp_data_at(in_bytes(RetData::bci_offset(row)), 1762 return_bci, next_test, scratch); 1763 1764 // return_bci is equal to bci[n]. Increment the count. 1765 increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); 1766 1767 // The method data pointer needs to be updated to reflect the new target. 1768 update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); 1769 ba_short(profile_continue); 1770 bind(next_test); 1771 } 1772 1773 update_mdp_for_ret(state, return_bci); 1774 1775 bind (profile_continue); 1776 } 1777} 1778 1779// Profile an unexpected null in the bytecodes. 1780void InterpreterMacroAssembler::profile_null_seen(Register scratch) { 1781 if (ProfileInterpreter) { 1782 Label profile_continue; 1783 1784 // If no method data exists, go to profile_continue. 1785 test_method_data_pointer(profile_continue); 1786 1787 set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch); 1788 1789 // The method data pointer needs to be updated. 1790 int mdp_delta = in_bytes(BitData::bit_data_size()); 1791 if (TypeProfileCasts) { 1792 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1793 } 1794 update_mdp_by_constant(mdp_delta); 1795 1796 bind (profile_continue); 1797 } 1798} 1799 1800void InterpreterMacroAssembler::profile_typecheck(Register klass, 1801 Register scratch) { 1802 if (ProfileInterpreter) { 1803 Label profile_continue; 1804 1805 // If no method data exists, go to profile_continue. 1806 test_method_data_pointer(profile_continue); 1807 1808 int mdp_delta = in_bytes(BitData::bit_data_size()); 1809 if (TypeProfileCasts) { 1810 mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size()); 1811 1812 // Record the object type. 1813 record_klass_in_profile(klass, scratch, false); 1814 } 1815 1816 // The method data pointer needs to be updated. 1817 update_mdp_by_constant(mdp_delta); 1818 1819 bind (profile_continue); 1820 } 1821} 1822 1823void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) { 1824 if (ProfileInterpreter && TypeProfileCasts) { 1825 Label profile_continue; 1826 1827 // If no method data exists, go to profile_continue. 1828 test_method_data_pointer(profile_continue); 1829 1830 int count_offset = in_bytes(CounterData::count_offset()); 1831 // Back up the address, since we have already bumped the mdp. 1832 count_offset -= in_bytes(VirtualCallData::virtual_call_data_size()); 1833 1834 // *Decrement* the counter. We expect to see zero or small negatives. 1835 increment_mdp_data_at(count_offset, scratch, true); 1836 1837 bind (profile_continue); 1838 } 1839} 1840 1841// Count the default case of a switch construct. 1842 1843void InterpreterMacroAssembler::profile_switch_default(Register scratch) { 1844 if (ProfileInterpreter) { 1845 Label profile_continue; 1846 1847 // If no method data exists, go to profile_continue. 1848 test_method_data_pointer(profile_continue); 1849 1850 // Update the default case count 1851 increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()), 1852 scratch); 1853 1854 // The method data pointer needs to be updated. 1855 update_mdp_by_offset( 1856 in_bytes(MultiBranchData::default_displacement_offset()), 1857 scratch); 1858 1859 bind (profile_continue); 1860 } 1861} 1862 1863// Count the index'th case of a switch construct. 1864 1865void InterpreterMacroAssembler::profile_switch_case(Register index, 1866 Register scratch, 1867 Register scratch2, 1868 Register scratch3) { 1869 if (ProfileInterpreter) { 1870 Label profile_continue; 1871 1872 // If no method data exists, go to profile_continue. 1873 test_method_data_pointer(profile_continue); 1874 1875 // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes() 1876 set(in_bytes(MultiBranchData::per_case_size()), scratch); 1877 smul(index, scratch, scratch); 1878 add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch); 1879 1880 // Update the case count 1881 increment_mdp_data_at(scratch, 1882 in_bytes(MultiBranchData::relative_count_offset()), 1883 scratch2, 1884 scratch3); 1885 1886 // The method data pointer needs to be updated. 1887 update_mdp_by_offset(scratch, 1888 in_bytes(MultiBranchData::relative_displacement_offset()), 1889 scratch2); 1890 1891 bind (profile_continue); 1892 } 1893} 1894 1895// add a InterpMonitorElem to stack (see frame_sparc.hpp) 1896 1897void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty, 1898 Register Rtemp, 1899 Register Rtemp2 ) { 1900 1901 Register Rlimit = Lmonitors; 1902 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 1903 assert( (delta & LongAlignmentMask) == 0, 1904 "sizeof BasicObjectLock must be even number of doublewords"); 1905 1906 sub( SP, delta, SP); 1907 sub( Lesp, delta, Lesp); 1908 sub( Lmonitors, delta, Lmonitors); 1909 1910 if (!stack_is_empty) { 1911 1912 // must copy stack contents down 1913 1914 Label start_copying, next; 1915 1916 // untested("monitor stack expansion"); 1917 compute_stack_base(Rtemp); 1918 ba(start_copying); 1919 delayed()->cmp(Rtemp, Rlimit); // done? duplicated below 1920 1921 // note: must copy from low memory upwards 1922 // On entry to loop, 1923 // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) 1924 // Loop mutates Rtemp 1925 1926 bind( next); 1927 1928 st_ptr(Rtemp2, Rtemp, 0); 1929 inc(Rtemp, wordSize); 1930 cmp(Rtemp, Rlimit); // are we done? (duplicated above) 1931 1932 bind( start_copying ); 1933 1934 brx( notEqual, true, pn, next ); 1935 delayed()->ld_ptr( Rtemp, delta, Rtemp2 ); 1936 1937 // done copying stack 1938 } 1939} 1940 1941// Locals 1942void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) { 1943 assert_not_delayed(); 1944 sll(index, Interpreter::logStackElementSize, index); 1945 sub(Llocals, index, index); 1946 ld_ptr(index, 0, dst); 1947 // Note: index must hold the effective address--the iinc template uses it 1948} 1949 1950// Just like access_local_ptr but the tag is a returnAddress 1951void InterpreterMacroAssembler::access_local_returnAddress(Register index, 1952 Register dst ) { 1953 assert_not_delayed(); 1954 sll(index, Interpreter::logStackElementSize, index); 1955 sub(Llocals, index, index); 1956 ld_ptr(index, 0, dst); 1957} 1958 1959void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) { 1960 assert_not_delayed(); 1961 sll(index, Interpreter::logStackElementSize, index); 1962 sub(Llocals, index, index); 1963 ld(index, 0, dst); 1964 // Note: index must hold the effective address--the iinc template uses it 1965} 1966 1967 1968void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) { 1969 assert_not_delayed(); 1970 sll(index, Interpreter::logStackElementSize, index); 1971 sub(Llocals, index, index); 1972 // First half stored at index n+1 (which grows down from Llocals[n]) 1973 load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst); 1974} 1975 1976 1977void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) { 1978 assert_not_delayed(); 1979 sll(index, Interpreter::logStackElementSize, index); 1980 sub(Llocals, index, index); 1981 ldf(FloatRegisterImpl::S, index, 0, dst); 1982} 1983 1984 1985void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) { 1986 assert_not_delayed(); 1987 sll(index, Interpreter::logStackElementSize, index); 1988 sub(Llocals, index, index); 1989 load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst); 1990} 1991 1992 1993#ifdef ASSERT 1994void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) { 1995 Label L; 1996 1997 assert(Rindex != Rscratch, "Registers cannot be same"); 1998 assert(Rindex != Rscratch1, "Registers cannot be same"); 1999 assert(Rlimit != Rscratch, "Registers cannot be same"); 2000 assert(Rlimit != Rscratch1, "Registers cannot be same"); 2001 assert(Rscratch1 != Rscratch, "Registers cannot be same"); 2002 2003 // untested("reg area corruption"); 2004 add(Rindex, offset, Rscratch); 2005 add(Rlimit, 64 + STACK_BIAS, Rscratch1); 2006 cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); 2007 stop("regsave area is being clobbered"); 2008 bind(L); 2009} 2010#endif // ASSERT 2011 2012 2013void InterpreterMacroAssembler::store_local_int( Register index, Register src ) { 2014 assert_not_delayed(); 2015 sll(index, Interpreter::logStackElementSize, index); 2016 sub(Llocals, index, index); 2017 debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);) 2018 st(src, index, 0); 2019} 2020 2021void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) { 2022 assert_not_delayed(); 2023 sll(index, Interpreter::logStackElementSize, index); 2024 sub(Llocals, index, index); 2025#ifdef ASSERT 2026 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2027#endif 2028 st_ptr(src, index, 0); 2029} 2030 2031 2032 2033void InterpreterMacroAssembler::store_local_ptr( int n, Register src ) { 2034 st_ptr(src, Llocals, Interpreter::local_offset_in_bytes(n)); 2035} 2036 2037void InterpreterMacroAssembler::store_local_long( Register index, Register src ) { 2038 assert_not_delayed(); 2039 sll(index, Interpreter::logStackElementSize, index); 2040 sub(Llocals, index, index); 2041#ifdef ASSERT 2042 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2043#endif 2044 store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1 2045} 2046 2047 2048void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) { 2049 assert_not_delayed(); 2050 sll(index, Interpreter::logStackElementSize, index); 2051 sub(Llocals, index, index); 2052#ifdef ASSERT 2053 check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch); 2054#endif 2055 stf(FloatRegisterImpl::S, src, index, 0); 2056} 2057 2058 2059void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) { 2060 assert_not_delayed(); 2061 sll(index, Interpreter::logStackElementSize, index); 2062 sub(Llocals, index, index); 2063#ifdef ASSERT 2064 check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch); 2065#endif 2066 store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1)); 2067} 2068 2069 2070int InterpreterMacroAssembler::top_most_monitor_byte_offset() { 2071 const jint delta = frame::interpreter_frame_monitor_size() * wordSize; 2072 int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong); 2073 return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS; 2074} 2075 2076 2077Address InterpreterMacroAssembler::top_most_monitor() { 2078 return Address(FP, top_most_monitor_byte_offset()); 2079} 2080 2081 2082void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) { 2083 add( Lesp, wordSize, Rdest ); 2084} 2085 2086#endif /* CC_INTERP */ 2087 2088void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) { 2089 assert(UseCompiler, "incrementing must be useful"); 2090#ifdef CC_INTERP 2091 Address inv_counter(G5_method, Method::invocation_counter_offset() + 2092 InvocationCounter::counter_offset()); 2093 Address be_counter (G5_method, Method::backedge_counter_offset() + 2094 InvocationCounter::counter_offset()); 2095#else 2096 Address inv_counter(Lmethod, Method::invocation_counter_offset() + 2097 InvocationCounter::counter_offset()); 2098 Address be_counter (Lmethod, Method::backedge_counter_offset() + 2099 InvocationCounter::counter_offset()); 2100#endif /* CC_INTERP */ 2101 int delta = InvocationCounter::count_increment; 2102 2103 // Load each counter in a register 2104 ld( inv_counter, Rtmp ); 2105 ld( be_counter, Rtmp2 ); 2106 2107 assert( is_simm13( delta ), " delta too large."); 2108 2109 // Add the delta to the invocation counter and store the result 2110 add( Rtmp, delta, Rtmp ); 2111 2112 // Mask the backedge counter 2113 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2114 2115 // Store value 2116 st( Rtmp, inv_counter); 2117 2118 // Add invocation counter + backedge counter 2119 add( Rtmp, Rtmp2, Rtmp); 2120 2121 // Note that this macro must leave the backedge_count + invocation_count in Rtmp! 2122} 2123 2124void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) { 2125 assert(UseCompiler, "incrementing must be useful"); 2126#ifdef CC_INTERP 2127 Address be_counter (G5_method, Method::backedge_counter_offset() + 2128 InvocationCounter::counter_offset()); 2129 Address inv_counter(G5_method, Method::invocation_counter_offset() + 2130 InvocationCounter::counter_offset()); 2131#else 2132 Address be_counter (Lmethod, Method::backedge_counter_offset() + 2133 InvocationCounter::counter_offset()); 2134 Address inv_counter(Lmethod, Method::invocation_counter_offset() + 2135 InvocationCounter::counter_offset()); 2136#endif /* CC_INTERP */ 2137 int delta = InvocationCounter::count_increment; 2138 // Load each counter in a register 2139 ld( be_counter, Rtmp ); 2140 ld( inv_counter, Rtmp2 ); 2141 2142 // Add the delta to the backedge counter 2143 add( Rtmp, delta, Rtmp ); 2144 2145 // Mask the invocation counter, add to backedge counter 2146 and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 ); 2147 2148 // and store the result to memory 2149 st( Rtmp, be_counter ); 2150 2151 // Add backedge + invocation counter 2152 add( Rtmp, Rtmp2, Rtmp ); 2153 2154 // Note that this macro must leave backedge_count + invocation_count in Rtmp! 2155} 2156 2157#ifndef CC_INTERP 2158void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count, 2159 Register branch_bcp, 2160 Register Rtmp ) { 2161 Label did_not_overflow; 2162 Label overflow_with_error; 2163 assert_different_registers(backedge_count, Rtmp, branch_bcp); 2164 assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); 2165 2166 AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); 2167 load_contents(limit, Rtmp); 2168 cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); 2169 2170 // When ProfileInterpreter is on, the backedge_count comes from the 2171 // MethodData*, which value does not get reset on the call to 2172 // frequency_counter_overflow(). To avoid excessive calls to the overflow 2173 // routine while the method is being compiled, add a second test to make sure 2174 // the overflow function is called only once every overflow_frequency. 2175 if (ProfileInterpreter) { 2176 const int overflow_frequency = 1024; 2177 andcc(backedge_count, overflow_frequency-1, Rtmp); 2178 brx(Assembler::notZero, false, Assembler::pt, did_not_overflow); 2179 delayed()->nop(); 2180 } 2181 2182 // overflow in loop, pass branch bytecode 2183 set(6,Rtmp); 2184 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); 2185 2186 // Was an OSR adapter generated? 2187 // O0 = osr nmethod 2188 br_null_short(O0, Assembler::pn, overflow_with_error); 2189 2190 // Has the nmethod been invalidated already? 2191 ld(O0, nmethod::entry_bci_offset(), O2); 2192 cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); 2193 2194 // migrate the interpreter frame off of the stack 2195 2196 mov(G2_thread, L7); 2197 // save nmethod 2198 mov(O0, L6); 2199 set_last_Java_frame(SP, noreg); 2200 call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7); 2201 reset_last_Java_frame(); 2202 mov(L7, G2_thread); 2203 2204 // move OSR nmethod to I1 2205 mov(L6, I1); 2206 2207 // OSR buffer to I0 2208 mov(O0, I0); 2209 2210 // remove the interpreter frame 2211 restore(I5_savedSP, 0, SP); 2212 2213 // Jump to the osr code. 2214 ld_ptr(O1, nmethod::osr_entry_point_offset(), O2); 2215 jmp(O2, G0); 2216 delayed()->nop(); 2217 2218 bind(overflow_with_error); 2219 2220 bind(did_not_overflow); 2221} 2222 2223 2224 2225void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) { 2226 if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); } 2227} 2228 2229 2230// local helper function for the verify_oop_or_return_address macro 2231static bool verify_return_address(Method* m, int bci) { 2232#ifndef PRODUCT 2233 address pc = (address)(m->constMethod()) 2234 + in_bytes(ConstMethod::codes_offset()) + bci; 2235 // assume it is a valid return address if it is inside m and is preceded by a jsr 2236 if (!m->contains(pc)) return false; 2237 address jsr_pc; 2238 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr); 2239 if (*jsr_pc == Bytecodes::_jsr && jsr_pc >= m->code_base()) return true; 2240 jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w); 2241 if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base()) return true; 2242#endif // PRODUCT 2243 return false; 2244} 2245 2246 2247void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) { 2248 if (!VerifyOops) return; 2249 // the VM documentation for the astore[_wide] bytecode allows 2250 // the TOS to be not only an oop but also a return address 2251 Label test; 2252 Label skip; 2253 // See if it is an address (in the current method): 2254 2255 mov(reg, Rtmp); 2256 const int log2_bytecode_size_limit = 16; 2257 srl(Rtmp, log2_bytecode_size_limit, Rtmp); 2258 br_notnull_short( Rtmp, pt, test ); 2259 2260 // %%% should use call_VM_leaf here? 2261 save_frame_and_mov(0, Lmethod, O0, reg, O1); 2262 save_thread(L7_thread_cache); 2263 call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none); 2264 delayed()->nop(); 2265 restore_thread(L7_thread_cache); 2266 br_notnull( O0, false, pt, skip ); 2267 delayed()->restore(); 2268 2269 // Perform a more elaborate out-of-line call 2270 // Not an address; verify it: 2271 bind(test); 2272 verify_oop(reg); 2273 bind(skip); 2274} 2275 2276 2277void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { 2278 if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth); 2279} 2280#endif /* CC_INTERP */ 2281 2282// Inline assembly for: 2283// 2284// if (thread is in interp_only_mode) { 2285// InterpreterRuntime::post_method_entry(); 2286// } 2287// if (DTraceMethodProbes) { 2288// SharedRuntime::dtrace_method_entry(method, receiver); 2289// } 2290// if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2291// SharedRuntime::rc_trace_method_entry(method, receiver); 2292// } 2293 2294void InterpreterMacroAssembler::notify_method_entry() { 2295 2296 // C++ interpreter only uses this for native methods. 2297 2298 // Whenever JVMTI puts a thread in interp_only_mode, method 2299 // entry/exit events are sent for that thread to track stack 2300 // depth. If it is possible to enter interp_only_mode we add 2301 // the code to check if the event should be sent. 2302 if (JvmtiExport::can_post_interpreter_events()) { 2303 Label L; 2304 Register temp_reg = O5; 2305 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2306 ld(interp_only, temp_reg); 2307 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2308 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); 2309 bind(L); 2310 } 2311 2312 { 2313 Register temp_reg = O5; 2314 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2315 call_VM_leaf(noreg, 2316 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 2317 G2_thread, Lmethod); 2318 } 2319 2320 // RedefineClasses() tracing support for obsolete method entry 2321 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 2322 call_VM_leaf(noreg, 2323 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 2324 G2_thread, Lmethod); 2325 } 2326} 2327 2328 2329// Inline assembly for: 2330// 2331// if (thread is in interp_only_mode) { 2332// // save result 2333// InterpreterRuntime::post_method_exit(); 2334// // restore result 2335// } 2336// if (DTraceMethodProbes) { 2337// SharedRuntime::dtrace_method_exit(thread, method); 2338// } 2339// 2340// Native methods have their result stored in d_tmp and l_tmp 2341// Java methods have their result stored in the expression stack 2342 2343void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, 2344 TosState state, 2345 NotifyMethodExitMode mode) { 2346 // C++ interpreter only uses this for native methods. 2347 2348 // Whenever JVMTI puts a thread in interp_only_mode, method 2349 // entry/exit events are sent for that thread to track stack 2350 // depth. If it is possible to enter interp_only_mode we add 2351 // the code to check if the event should be sent. 2352 if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { 2353 Label L; 2354 Register temp_reg = O5; 2355 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 2356 ld(interp_only, temp_reg); 2357 cmp_and_br_short(temp_reg, 0, equal, pt, L); 2358 2359 // Note: frame::interpreter_frame_result has a dependency on how the 2360 // method result is saved across the call to post_method_exit. For 2361 // native methods it assumes the result registers are saved to 2362 // l_scratch and d_scratch. If this changes then the interpreter_frame_result 2363 // implementation will need to be updated too. 2364 2365 save_return_value(state, is_native_method); 2366 call_VM(noreg, 2367 CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit)); 2368 restore_return_value(state, is_native_method); 2369 bind(L); 2370 } 2371 2372 { 2373 Register temp_reg = O5; 2374 // Dtrace notification 2375 SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero); 2376 save_return_value(state, is_native_method); 2377 call_VM_leaf( 2378 noreg, 2379 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 2380 G2_thread, Lmethod); 2381 restore_return_value(state, is_native_method); 2382 } 2383} 2384 2385void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) { 2386#ifdef CC_INTERP 2387 // result potentially in O0/O1: save it across calls 2388 stf(FloatRegisterImpl::D, F0, STATE(_native_fresult)); 2389#ifdef _LP64 2390 stx(O0, STATE(_native_lresult)); 2391#else 2392 std(O0, STATE(_native_lresult)); 2393#endif 2394#else // CC_INTERP 2395 if (is_native_call) { 2396 stf(FloatRegisterImpl::D, F0, d_tmp); 2397#ifdef _LP64 2398 stx(O0, l_tmp); 2399#else 2400 std(O0, l_tmp); 2401#endif 2402 } else { 2403 push(state); 2404 } 2405#endif // CC_INTERP 2406} 2407 2408void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) { 2409#ifdef CC_INTERP 2410 ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0); 2411#ifdef _LP64 2412 ldx(STATE(_native_lresult), O0); 2413#else 2414 ldd(STATE(_native_lresult), O0); 2415#endif 2416#else // CC_INTERP 2417 if (is_native_call) { 2418 ldf(FloatRegisterImpl::D, d_tmp, F0); 2419#ifdef _LP64 2420 ldx(l_tmp, O0); 2421#else 2422 ldd(l_tmp, O0); 2423#endif 2424 } else { 2425 pop(state); 2426 } 2427#endif // CC_INTERP 2428} 2429 2430// Jump if ((*counter_addr += increment) & mask) satisfies the condition. 2431void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr, 2432 int increment, int mask, 2433 Register scratch1, Register scratch2, 2434 Condition cond, Label *where) { 2435 ld(counter_addr, scratch1); 2436 add(scratch1, increment, scratch1); 2437 if (is_simm13(mask)) { 2438 andcc(scratch1, mask, G0); 2439 } else { 2440 set(mask, scratch2); 2441 andcc(scratch1, scratch2, G0); 2442 } 2443 br(cond, false, Assembler::pn, *where); 2444 delayed()->st(scratch1, counter_addr); 2445} 2446