templateInterpreterGenerator_arm.cpp revision 12993:a8503d22944f
1141121Sphk/* 2141121Sphk * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. 3202870Sjoerg * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4141121Sphk * 5141121Sphk * This code is free software; you can redistribute it and/or modify it 6141121Sphk * under the terms of the GNU General Public License version 2 only, as 7141121Sphk * published by the Free Software Foundation. 8141121Sphk * 9141121Sphk * This code is distributed in the hope that it will be useful, but WITHOUT 10141398Sphk * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11141121Sphk * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12141121Sphk * version 2 for more details (a copy is included in the LICENSE file that 13141121Sphk * accompanied this code). 14141121Sphk * 15141398Sphk * You should have received a copy of the GNU General Public License version 16141398Sphk * 2 along with this work; if not, write to the Free Software Foundation, 17141398Sphk * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18141398Sphk * 19141398Sphk * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20141398Sphk * or visit www.oracle.com if you need additional information or have any 21141398Sphk * questions. 22141398Sphk * 23141398Sphk */ 24141398Sphk 25141398Sphk#include "precompiled.hpp" 26141121Sphk#include "asm/assembler.hpp" 27230134Suqs#include "interpreter/bytecodeHistogram.hpp" 28141398Sphk#include "interpreter/interp_masm.hpp" 29141398Sphk#include "interpreter/interpreter.hpp" 30141398Sphk#include "interpreter/interpreterRuntime.hpp" 31141398Sphk#include "interpreter/templateInterpreterGenerator.hpp" 32141121Sphk#include "interpreter/templateTable.hpp" 33141123Sphk#include "oops/arrayOop.hpp" 34141123Sphk#include "oops/methodData.hpp" 35141123Sphk#include "oops/method.hpp" 36141121Sphk#include "oops/oop.inline.hpp" 37202870Sjoerg#include "prims/jvmtiExport.hpp" 38202870Sjoerg#include "prims/jvmtiThreadState.hpp" 39141121Sphk#include "prims/methodHandles.hpp" 40141121Sphk#include "runtime/arguments.hpp" 41141121Sphk#include "runtime/deoptimization.hpp" 42141121Sphk#include "runtime/frame.inline.hpp" 43141121Sphk#include "runtime/sharedRuntime.hpp" 44141121Sphk#include "runtime/stubRoutines.hpp" 45141121Sphk#include "runtime/synchronizer.hpp" 46141121Sphk#include "runtime/timer.hpp" 47141398Sphk#include "runtime/vframeArray.hpp" 48141398Sphk#include "utilities/debug.hpp" 49141121Sphk#include "utilities/macros.hpp" 50141121Sphk 51141121Sphk// Size of interpreter code. Increase if too small. Interpreter will 52141121Sphk// fail with a guarantee ("not enough space for interpreter generation"); 53141121Sphk// if too small. 54141121Sphk// Run with +PrintInterpreter to get the VM to print out the size. 55141398Sphk// Max size with JVMTI 56141121Sphkint TemplateInterpreter::InterpreterCodeSize = 180 * 1024; 57141747Sphk 58141398Sphk#define __ _masm-> 59141121Sphk 60141121Sphk//------------------------------------------------------------------------------------------------------------------------ 61141121Sphk 62202870Sjoergaddress TemplateInterpreterGenerator::generate_slow_signature_handler() { 63141121Sphk address entry = __ pc(); 64141121Sphk 65141121Sphk // callee-save register for saving LR, shared with generate_native_entry 66141121Sphk const Register Rsaved_ret_addr = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0); 67141121Sphk 68141121Sphk __ mov(Rsaved_ret_addr, LR); 69141121Sphk 70141121Sphk __ mov(R1, Rmethod); 71141121Sphk __ mov(R2, Rlocals); 72141121Sphk __ mov(R3, SP); 73141121Sphk 74141121Sphk#ifdef AARCH64 75141121Sphk // expand expr. stack and extended SP to avoid cutting SP in call_VM 76141121Sphk __ mov(Rstack_top, SP); 77141121Sphk __ str(Rstack_top, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 78141121Sphk __ check_stack_top(); 79141121Sphk 80141121Sphk __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), R1, R2, R3, false); 81150525Sphk 82150525Sphk __ ldp(ZR, c_rarg1, Address(SP, 2*wordSize, post_indexed)); 83150525Sphk __ ldp(c_rarg2, c_rarg3, Address(SP, 2*wordSize, post_indexed)); 84150525Sphk __ ldp(c_rarg4, c_rarg5, Address(SP, 2*wordSize, post_indexed)); 85202870Sjoerg __ ldp(c_rarg6, c_rarg7, Address(SP, 2*wordSize, post_indexed)); 86202870Sjoerg 87202870Sjoerg __ ldp_d(V0, V1, Address(SP, 2*wordSize, post_indexed)); 88202870Sjoerg __ ldp_d(V2, V3, Address(SP, 2*wordSize, post_indexed)); 89202870Sjoerg __ ldp_d(V4, V5, Address(SP, 2*wordSize, post_indexed)); 90202870Sjoerg __ ldp_d(V6, V7, Address(SP, 2*wordSize, post_indexed)); 91202870Sjoerg#else 92202870Sjoerg 93150525Sphk // Safer to save R9 (when scratched) since callers may have been 94150525Sphk // written assuming R9 survives. This is suboptimal but 95150525Sphk // probably not important for this slow case call site. 96141121Sphk // Note for R9 saving: slow_signature_handler may copy register 97141121Sphk // arguments above the current SP (passed as R3). It is safe for 98141121Sphk // call_VM to use push and pop to protect additional values on the 99141423Sphk // stack if needed. 100141121Sphk __ call_VM(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), true /* save R9 if needed*/); 101141121Sphk __ add(SP, SP, wordSize); // Skip R0 102141121Sphk __ pop(RegisterSet(R1, R3)); // Load arguments passed in registers 103141121Sphk#ifdef __ABI_HARD__ 104141121Sphk // Few alternatives to an always-load-FP-registers approach: 105150525Sphk // - parse method signature to detect FP arguments 106202870Sjoerg // - keep a counter/flag on a stack indicationg number of FP arguments in the method. 107150525Sphk // The later has been originally implemented and tested but a conditional path could 108150525Sphk // eliminate any gain imposed by avoiding 8 double word loads. 109141121Sphk __ fldmiad(SP, FloatRegisterSet(D0, 8), writeback); 110141121Sphk#endif // __ABI_HARD__ 111150525Sphk#endif // AARCH64 112141121Sphk 113141121Sphk __ ret(Rsaved_ret_addr); 114141121Sphk 115141121Sphk return entry; 116202870Sjoerg} 117202870Sjoerg 118202870Sjoerg 119230134Suqs// 120202870Sjoerg// Various method entries (that c++ and asm interpreter agree upon) 121202870Sjoerg//------------------------------------------------------------------------------------------------------------------------ 122202870Sjoerg// 123202870Sjoerg// 124202870Sjoerg 125202870Sjoerg// Abstract method entry 126230134Suqs// Attempt to execute abstract method. Throw exception 127202870Sjoergaddress TemplateInterpreterGenerator::generate_abstract_entry(void) { 128202870Sjoerg address entry_point = __ pc(); 129202870Sjoerg 130202870Sjoerg#ifdef AARCH64 131202870Sjoerg __ restore_sp_after_call(Rtemp); 132202870Sjoerg __ restore_stack_top(); 133202870Sjoerg#endif 134202870Sjoerg 135202870Sjoerg __ empty_expression_stack(); 136202870Sjoerg 137202870Sjoerg __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError)); 138202870Sjoerg 139202870Sjoerg DEBUG_ONLY(STOP("generate_abstract_entry");) // Should not reach here 140202870Sjoerg return entry_point; 141202898Sjoerg} 142202898Sjoerg 143202898Sjoergaddress TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { 144202898Sjoerg if (!InlineIntrinsics) return NULL; // Generate a vanilla entry 145141121Sphk 146202870Sjoerg // TODO: ARM 147202870Sjoerg return NULL; 148202870Sjoerg 149202870Sjoerg address entry_point = __ pc(); 150202870Sjoerg STOP("generate_math_entry"); 151202870Sjoerg return entry_point; 152202870Sjoerg} 153202870Sjoerg 154202870Sjoergaddress TemplateInterpreterGenerator::generate_StackOverflowError_handler() { 155202870Sjoerg address entry = __ pc(); 156202870Sjoerg 157141121Sphk // Note: There should be a minimal interpreter frame set up when stack 158202870Sjoerg // overflow occurs since we check explicitly for it now. 159202870Sjoerg // 160202870Sjoerg#ifdef ASSERT 161202870Sjoerg { Label L; 162202870Sjoerg __ sub(Rtemp, FP, - frame::interpreter_frame_monitor_block_top_offset * wordSize); 163202870Sjoerg __ cmp(SP, Rtemp); // Rtemp = maximal SP for current FP, 164202870Sjoerg // (stack grows negative) 165202870Sjoerg __ b(L, ls); // check if frame is complete 166202870Sjoerg __ stop ("interpreter frame not set up"); 167202870Sjoerg __ bind(L); 168202870Sjoerg } 169202870Sjoerg#endif // ASSERT 170150525Sphk 171202870Sjoerg // Restore bcp under the assumption that the current frame is still 172202870Sjoerg // interpreted 173150525Sphk __ restore_bcp(); 174202870Sjoerg 175150525Sphk // expression stack must be empty before entering the VM if an exception 176202870Sjoerg // happened 177230134Suqs __ empty_expression_stack(); 178202870Sjoerg 179202870Sjoerg // throw exception 180202870Sjoerg __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError)); 181141121Sphk 182202870Sjoerg __ should_not_reach_here(); 183150525Sphk 184150525Sphk return entry; 185150525Sphk} 186202870Sjoerg 187150525Sphkaddress TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) { 188202870Sjoerg address entry = __ pc(); 189202870Sjoerg 190141121Sphk // index is in R4_ArrayIndexOutOfBounds_index 191141121Sphk 192150525Sphk InlinedString Lname(name); 193202870Sjoerg 194202870Sjoerg // expression stack must be empty before entering the VM if an exception happened 195150525Sphk __ empty_expression_stack(); 196150525Sphk 197202870Sjoerg // setup parameters 198202870Sjoerg __ ldr_literal(R1, Lname); 199202870Sjoerg __ mov(R2, R4_ArrayIndexOutOfBounds_index); 200150525Sphk 201141121Sphk __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), R1, R2); 202141121Sphk 203141121Sphk __ nop(); // to avoid filling CPU pipeline with invalid instructions 204141121Sphk __ nop(); 205141121Sphk __ should_not_reach_here(); 206141121Sphk __ bind_literal(Lname); 207141121Sphk 208202870Sjoerg return entry; 209141121Sphk} 210141121Sphk 211150525Sphkaddress TemplateInterpreterGenerator::generate_ClassCastException_handler() { 212141121Sphk address entry = __ pc(); 213141121Sphk 214141121Sphk // object is in R2_ClassCastException_obj 215141121Sphk 216141121Sphk // expression stack must be empty before entering the VM if an exception 217141121Sphk // happened 218141121Sphk __ empty_expression_stack(); 219202870Sjoerg 220202870Sjoerg __ mov(R1, R2_ClassCastException_obj); 221202870Sjoerg __ call_VM(noreg, 222202870Sjoerg CAST_FROM_FN_PTR(address, 223202870Sjoerg InterpreterRuntime::throw_ClassCastException), 224150525Sphk R1); 225150525Sphk 226150525Sphk __ should_not_reach_here(); 227150525Sphk 228150525Sphk return entry; 229166914Simp} 230150525Sphk 231150525Sphkaddress TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) { 232150525Sphk assert(!pass_oop || message == NULL, "either oop or message but not both"); 233150525Sphk address entry = __ pc(); 234150525Sphk 235150525Sphk InlinedString Lname(name); 236141121Sphk InlinedString Lmessage(message); 237202870Sjoerg 238202898Sjoerg if (pass_oop) { 239141121Sphk // object is at TOS 240202870Sjoerg __ pop_ptr(R2); 241203360Sjoerg } 242150525Sphk 243150525Sphk // expression stack must be empty before entering the VM if an exception happened 244150525Sphk __ empty_expression_stack(); 245150525Sphk 246150525Sphk // setup parameters 247150525Sphk __ ldr_literal(R1, Lname); 248141121Sphk 249203360Sjoerg if (pass_oop) { 250203360Sjoerg __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), R1, R2); 251203360Sjoerg } else { 252141121Sphk if (message != NULL) { 253141121Sphk __ ldr_literal(R2, Lmessage); 254141121Sphk } else { 255141121Sphk __ mov(R2, 0); 256 } 257 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), R1, R2); 258 } 259 260 // throw exception 261 __ b(Interpreter::throw_exception_entry()); 262 263 __ nop(); // to avoid filling CPU pipeline with invalid instructions 264 __ nop(); 265 __ bind_literal(Lname); 266 if (!pass_oop && (message != NULL)) { 267 __ bind_literal(Lmessage); 268 } 269 270 return entry; 271} 272 273address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) { 274 address entry = __ pc(); 275 276 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 277 278#ifdef AARCH64 279 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 280 __ restore_stack_top(); 281#else 282 // Restore stack bottom in case i2c adjusted stack 283 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 284 // and NULL it as marker that SP is now tos until next java call 285 __ mov(Rtemp, (int)NULL_WORD); 286 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 287#endif // AARCH64 288 289 __ restore_method(); 290 __ restore_bcp(); 291 __ restore_dispatch(); 292 __ restore_locals(); 293 294 const Register Rcache = R2_tmp; 295 const Register Rindex = R3_tmp; 296 __ get_cache_and_index_at_bcp(Rcache, Rindex, 1, index_size); 297 298 __ add(Rtemp, Rcache, AsmOperand(Rindex, lsl, LogBytesPerWord)); 299 __ ldrb(Rtemp, Address(Rtemp, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset())); 300 __ check_stack_top(); 301 __ add(Rstack_top, Rstack_top, AsmOperand(Rtemp, lsl, Interpreter::logStackElementSize)); 302 303#ifndef AARCH64 304 __ convert_retval_to_tos(state); 305#endif // !AARCH64 306 307 __ check_and_handle_popframe(); 308 __ check_and_handle_earlyret(); 309 310 __ dispatch_next(state, step); 311 312 return entry; 313} 314 315 316address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) { 317 address entry = __ pc(); 318 319 __ interp_verify_oop(R0_tos, state, __FILE__, __LINE__); 320 321#ifdef AARCH64 322 __ restore_sp_after_call(Rtemp); // Restore SP to extended SP 323 __ restore_stack_top(); 324#else 325 // The stack is not extended by deopt but we must NULL last_sp as this 326 // entry is like a "return". 327 __ mov(Rtemp, 0); 328 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 329#endif // AARCH64 330 331 __ restore_method(); 332 __ restore_bcp(); 333 __ restore_dispatch(); 334 __ restore_locals(); 335 336 // handle exceptions 337 { Label L; 338 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 339 __ cbz(Rtemp, L); 340 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception)); 341 __ should_not_reach_here(); 342 __ bind(L); 343 } 344 345 __ dispatch_next(state, step); 346 347 return entry; 348} 349 350address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) { 351#ifdef AARCH64 352 address entry = __ pc(); 353 switch (type) { 354 case T_BOOLEAN: 355 __ tst(R0, 0xff); 356 __ cset(R0, ne); 357 break; 358 case T_CHAR : __ zero_extend(R0, R0, 16); break; 359 case T_BYTE : __ sign_extend(R0, R0, 8); break; 360 case T_SHORT : __ sign_extend(R0, R0, 16); break; 361 case T_INT : // fall through 362 case T_LONG : // fall through 363 case T_VOID : // fall through 364 case T_FLOAT : // fall through 365 case T_DOUBLE : /* nothing to do */ break; 366 case T_OBJECT : 367 // retrieve result from frame 368 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 369 // and verify it 370 __ verify_oop(R0); 371 break; 372 default : ShouldNotReachHere(); 373 } 374 __ ret(); 375 return entry; 376#else 377 // Result handlers are not used on 32-bit ARM 378 // since the returned value is already in appropriate format. 379 __ should_not_reach_here(); // to avoid empty code block 380 381 // The result handler non-zero indicates an object is returned and this is 382 // used in the native entry code. 383 return type == T_OBJECT ? (address)(-1) : NULL; 384#endif // AARCH64 385} 386 387address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) { 388 address entry = __ pc(); 389 __ push(state); 390 __ call_VM(noreg, runtime_entry); 391 392 // load current bytecode 393 __ ldrb(R3_bytecode, Address(Rbcp)); 394 __ dispatch_only_normal(vtos); 395 return entry; 396} 397 398 399// Helpers for commoning out cases in the various type of method entries. 400// 401 402// increment invocation count & check for overflow 403// 404// Note: checking for negative value instead of overflow 405// so we have a 'sticky' overflow test 406// 407// In: Rmethod. 408// 409// Uses R0, R1, Rtemp. 410// 411void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, 412 Label* profile_method, 413 Label* profile_method_continue) { 414 Label done; 415 const Register Rcounters = Rtemp; 416 const Address invocation_counter(Rcounters, 417 MethodCounters::invocation_counter_offset() + 418 InvocationCounter::counter_offset()); 419 420 // Note: In tiered we increment either counters in MethodCounters* or 421 // in MDO depending if we're profiling or not. 422 if (TieredCompilation) { 423 int increment = InvocationCounter::count_increment; 424 Label no_mdo; 425 if (ProfileInterpreter) { 426 // Are we profiling? 427 __ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset())); 428 __ cbz(R1_tmp, no_mdo); 429 // Increment counter in the MDO 430 const Address mdo_invocation_counter(R1_tmp, 431 in_bytes(MethodData::invocation_counter_offset()) + 432 in_bytes(InvocationCounter::counter_offset())); 433 const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset())); 434 __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow); 435 __ b(done); 436 } 437 __ bind(no_mdo); 438 __ get_method_counters(Rmethod, Rcounters, done); 439 const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset())); 440 __ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow); 441 __ bind(done); 442 } else { // not TieredCompilation 443 const Address backedge_counter(Rcounters, 444 MethodCounters::backedge_counter_offset() + 445 InvocationCounter::counter_offset()); 446 447 const Register Ricnt = R0_tmp; // invocation counter 448 const Register Rbcnt = R1_tmp; // backedge counter 449 450 __ get_method_counters(Rmethod, Rcounters, done); 451 452 if (ProfileInterpreter) { 453 const Register Riic = R1_tmp; 454 __ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 455 __ add(Riic, Riic, 1); 456 __ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset())); 457 } 458 459 // Update standard invocation counters 460 461 __ ldr_u32(Ricnt, invocation_counter); 462 __ ldr_u32(Rbcnt, backedge_counter); 463 464 __ add(Ricnt, Ricnt, InvocationCounter::count_increment); 465 466#ifdef AARCH64 467 __ andr(Rbcnt, Rbcnt, (unsigned int)InvocationCounter::count_mask_value); // mask out the status bits 468#else 469 __ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits 470#endif // AARCH64 471 472 __ str_32(Ricnt, invocation_counter); // save invocation count 473 __ add(Ricnt, Ricnt, Rbcnt); // add both counters 474 475 // profile_method is non-null only for interpreted method so 476 // profile_method != NULL == !native_call 477 // BytecodeInterpreter only calls for native so code is elided. 478 479 if (ProfileInterpreter && profile_method != NULL) { 480 assert(profile_method_continue != NULL, "should be non-null"); 481 482 // Test to see if we should create a method data oop 483 // Reuse R1_tmp as we don't need backedge counters anymore. 484 Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset())); 485 __ ldr_s32(R1_tmp, profile_limit); 486 __ cmp_32(Ricnt, R1_tmp); 487 __ b(*profile_method_continue, lt); 488 489 // if no method data exists, go to profile_method 490 __ test_method_data_pointer(R1_tmp, *profile_method); 491 } 492 493 Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset())); 494 __ ldr_s32(R1_tmp, invoke_limit); 495 __ cmp_32(Ricnt, R1_tmp); 496 __ b(*overflow, hs); 497 __ bind(done); 498 } 499} 500 501void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) { 502 // InterpreterRuntime::frequency_counter_overflow takes one argument 503 // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp). 504 // The call returns the address of the verified entry point for the method or NULL 505 // if the compilation did not complete (either went background or bailed out). 506 __ mov(R1, (int)false); 507 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1); 508 509 // jump to the interpreted entry. 510 __ b(do_continue); 511} 512 513void TemplateInterpreterGenerator::generate_stack_overflow_check(void) { 514 // Check if we've got enough room on the stack for 515 // - overhead; 516 // - locals; 517 // - expression stack. 518 // 519 // Registers on entry: 520 // 521 // R3 = number of additional locals 522 // R11 = max expression stack slots (AArch64 only) 523 // Rthread 524 // Rmethod 525 // Registers used: R0, R1, R2, Rtemp. 526 527 const Register Radditional_locals = R3; 528 const Register RmaxStack = AARCH64_ONLY(R11) NOT_AARCH64(R2); 529 530 // monitor entry size 531 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 532 533 // total overhead size: entry_size + (saved registers, thru expr stack bottom). 534 // be sure to change this if you add/subtract anything to/from the overhead area 535 const int overhead_size = (frame::sender_sp_offset - frame::interpreter_frame_initial_sp_offset)*wordSize + entry_size; 536 537 // Pages reserved for VM runtime calls and subsequent Java calls. 538 const int reserved_pages = JavaThread::stack_shadow_zone_size(); 539 540 // Thread::stack_size() includes guard pages, and they should not be touched. 541 const int guard_pages = JavaThread::stack_guard_zone_size(); 542 543 __ ldr(R0, Address(Rthread, Thread::stack_base_offset())); 544 __ ldr(R1, Address(Rthread, Thread::stack_size_offset())); 545#ifndef AARCH64 546 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 547 __ ldrh(RmaxStack, Address(Rtemp, ConstMethod::max_stack_offset())); 548#endif // !AARCH64 549 __ sub_slow(Rtemp, SP, overhead_size + reserved_pages + guard_pages + Method::extra_stack_words()); 550 551 // reserve space for additional locals 552 __ sub(Rtemp, Rtemp, AsmOperand(Radditional_locals, lsl, Interpreter::logStackElementSize)); 553 554 // stack size 555 __ sub(R0, R0, R1); 556 557 // reserve space for expression stack 558 __ sub(Rtemp, Rtemp, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 559 560 __ cmp(Rtemp, R0); 561 562#ifdef AARCH64 563 Label L; 564 __ b(L, hi); 565 __ mov(SP, Rsender_sp); // restore SP 566 __ b(StubRoutines::throw_StackOverflowError_entry()); 567 __ bind(L); 568#else 569 __ mov(SP, Rsender_sp, ls); // restore SP 570 __ b(StubRoutines::throw_StackOverflowError_entry(), ls); 571#endif // AARCH64 572} 573 574 575// Allocate monitor and lock method (asm interpreter) 576// 577void TemplateInterpreterGenerator::lock_method() { 578 // synchronize method 579 580 const int entry_size = frame::interpreter_frame_monitor_size() * wordSize; 581 assert ((entry_size % StackAlignmentInBytes) == 0, "should keep stack alignment"); 582 583 #ifdef ASSERT 584 { Label L; 585 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 586 __ tbnz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 587 __ stop("method doesn't need synchronization"); 588 __ bind(L); 589 } 590 #endif // ASSERT 591 592 // get synchronization object 593 { Label done; 594 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 595#ifdef AARCH64 596 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case) 597 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, done); 598#else 599 __ tst(Rtemp, JVM_ACC_STATIC); 600 __ ldr(R0, Address(Rlocals, Interpreter::local_offset_in_bytes(0)), eq); // get receiver (assume this is frequent case) 601 __ b(done, eq); 602#endif // AARCH64 603 __ load_mirror(R0, Rmethod, Rtemp); 604 __ bind(done); 605 } 606 607 // add space for monitor & lock 608 609#ifdef AARCH64 610 __ check_extended_sp(Rtemp); 611 __ sub(SP, SP, entry_size); // adjust extended SP 612 __ mov(Rtemp, SP); 613 __ str(Rtemp, Address(FP, frame::interpreter_frame_extended_sp_offset * wordSize)); 614#endif // AARCH64 615 616 __ sub(Rstack_top, Rstack_top, entry_size); 617 __ check_stack_top_on_expansion(); 618 // add space for a monitor entry 619 __ str(Rstack_top, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 620 // set new monitor block top 621 __ str(R0, Address(Rstack_top, BasicObjectLock::obj_offset_in_bytes())); 622 // store object 623 __ mov(R1, Rstack_top); // monitor entry address 624 __ lock_object(R1); 625} 626 627#ifdef AARCH64 628 629// 630// Generate a fixed interpreter frame. This is identical setup for interpreted methods 631// and for native methods hence the shared code. 632// 633// On entry: 634// R10 = ConstMethod 635// R11 = max expr. stack (in slots), if !native_call 636// 637// On exit: 638// Rbcp, Rstack_top are initialized, SP is extended 639// 640void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 641 // Incoming registers 642 const Register RconstMethod = R10; 643 const Register RmaxStack = R11; 644 // Temporary registers 645 const Register RextendedSP = R0; 646 const Register Rcache = R1; 647 const Register Rmdp = ProfileInterpreter ? R2 : ZR; 648 649 // Generates the following stack layout (stack grows up in this picture): 650 // 651 // [ expr. stack bottom ] 652 // [ saved Rbcp ] 653 // [ current Rlocals ] 654 // [ cache ] 655 // [ mdx ] 656 // [ mirror ] 657 // [ Method* ] 658 // [ extended SP ] 659 // [ expr. stack top ] 660 // [ sender_sp ] 661 // [ saved FP ] <--- FP 662 // [ saved LR ] 663 664 // initialize fixed part of activation frame 665 __ stp(FP, LR, Address(SP, -2*wordSize, pre_indexed)); 666 __ mov(FP, SP); // establish new FP 667 668 // setup Rbcp 669 if (native_call) { 670 __ mov(Rbcp, ZR); // bcp = 0 for native calls 671 } else { 672 __ add(Rbcp, RconstMethod, in_bytes(ConstMethod::codes_offset())); // get codebase 673 } 674 675 // Rstack_top & RextendedSP 676 __ sub(Rstack_top, SP, 10*wordSize); 677 if (native_call) { 678 __ sub(RextendedSP, Rstack_top, round_to(wordSize, StackAlignmentInBytes)); // reserve 1 slot for exception handling 679 } else { 680 __ sub(RextendedSP, Rstack_top, AsmOperand(RmaxStack, lsl, Interpreter::logStackElementSize)); 681 __ align_reg(RextendedSP, RextendedSP, StackAlignmentInBytes); 682 } 683 __ mov(SP, RextendedSP); 684 __ check_stack_top(); 685 686 // Load Rmdp 687 if (ProfileInterpreter) { 688 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 689 __ tst(Rtemp, Rtemp); 690 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset())); 691 __ csel(Rmdp, ZR, Rtemp, eq); 692 } 693 694 // Load Rcache 695 __ ldr(Rtemp, Address(RconstMethod, ConstMethod::constants_offset())); 696 __ ldr(Rcache, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 697 // Get mirror and store it in the frame as GC root for this Method* 698 __ load_mirror(Rtemp, Rmethod, Rtemp); 699 700 // Build fixed frame 701 __ stp(Rstack_top, Rbcp, Address(FP, -10*wordSize)); 702 __ stp(Rlocals, Rcache, Address(FP, -8*wordSize)); 703 __ stp(Rmdp, Rtemp, Address(FP, -6*wordSize)); 704 __ stp(Rmethod, RextendedSP, Address(FP, -4*wordSize)); 705 __ stp(ZR, Rsender_sp, Address(FP, -2*wordSize)); 706 assert(frame::interpreter_frame_initial_sp_offset == -10, "interpreter frame broken"); 707 assert(frame::interpreter_frame_stack_top_offset == -2, "stack top broken"); 708} 709 710#else // AARCH64 711 712// 713// Generate a fixed interpreter frame. This is identical setup for interpreted methods 714// and for native methods hence the shared code. 715 716void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) { 717 // Generates the following stack layout: 718 // 719 // [ expr. stack bottom ] 720 // [ saved Rbcp ] 721 // [ current Rlocals ] 722 // [ cache ] 723 // [ mdx ] 724 // [ Method* ] 725 // [ last_sp ] 726 // [ sender_sp ] 727 // [ saved FP ] <--- FP 728 // [ saved LR ] 729 730 // initialize fixed part of activation frame 731 __ push(LR); // save return address 732 __ push(FP); // save FP 733 __ mov(FP, SP); // establish new FP 734 735 __ push(Rsender_sp); 736 737 __ mov(R0, 0); 738 __ push(R0); // leave last_sp as null 739 740 // setup Rbcp 741 if (native_call) { 742 __ mov(Rbcp, 0); // bcp = 0 for native calls 743 } else { 744 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); // get ConstMethod* 745 __ add(Rbcp, Rtemp, ConstMethod::codes_offset()); // get codebase 746 } 747 748 __ push(Rmethod); // save Method* 749 // Get mirror and store it in the frame as GC root for this Method* 750 __ load_mirror(Rtemp, Rmethod, Rtemp); 751 __ push(Rtemp); 752 753 if (ProfileInterpreter) { 754 __ ldr(Rtemp, Address(Rmethod, Method::method_data_offset())); 755 __ tst(Rtemp, Rtemp); 756 __ add(Rtemp, Rtemp, in_bytes(MethodData::data_offset()), ne); 757 __ push(Rtemp); // set the mdp (method data pointer) 758 } else { 759 __ push(R0); 760 } 761 762 __ ldr(Rtemp, Address(Rmethod, Method::const_offset())); 763 __ ldr(Rtemp, Address(Rtemp, ConstMethod::constants_offset())); 764 __ ldr(Rtemp, Address(Rtemp, ConstantPool::cache_offset_in_bytes())); 765 __ push(Rtemp); // set constant pool cache 766 __ push(Rlocals); // set locals pointer 767 __ push(Rbcp); // set bcp 768 __ push(R0); // reserve word for pointer to expression stack bottom 769 __ str(SP, Address(SP, 0)); // set expression stack bottom 770} 771 772#endif // AARCH64 773 774// End of helpers 775 776//------------------------------------------------------------------------------------------------------------------------ 777// Entry points 778// 779// Here we generate the various kind of entries into the interpreter. 780// The two main entry type are generic bytecode methods and native call method. 781// These both come in synchronized and non-synchronized versions but the 782// frame layout they create is very similar. The other method entry 783// types are really just special purpose entries that are really entry 784// and interpretation all in one. These are for trivial methods like 785// accessor, empty, or special math methods. 786// 787// When control flow reaches any of the entry types for the interpreter 788// the following holds -> 789// 790// Arguments: 791// 792// Rmethod: Method* 793// Rthread: thread 794// Rsender_sp: sender sp 795// Rparams (SP on 32-bit ARM): pointer to method parameters 796// 797// LR: return address 798// 799// Stack layout immediately at entry 800// 801// [ optional padding(*)] <--- SP (AArch64) 802// [ parameter n ] <--- Rparams (SP on 32-bit ARM) 803// ... 804// [ parameter 1 ] 805// [ expression stack ] (caller's java expression stack) 806 807// Assuming that we don't go to one of the trivial specialized 808// entries the stack will look like below when we are ready to execute 809// the first bytecode (or call the native routine). The register usage 810// will be as the template based interpreter expects. 811// 812// local variables follow incoming parameters immediately; i.e. 813// the return address is saved at the end of the locals. 814// 815// [ reserved stack (*) ] <--- SP (AArch64) 816// [ expr. stack ] <--- Rstack_top (SP on 32-bit ARM) 817// [ monitor entry ] 818// ... 819// [ monitor entry ] 820// [ expr. stack bottom ] 821// [ saved Rbcp ] 822// [ current Rlocals ] 823// [ cache ] 824// [ mdx ] 825// [ mirror ] 826// [ Method* ] 827// 828// 32-bit ARM: 829// [ last_sp ] 830// 831// AArch64: 832// [ extended SP (*) ] 833// [ stack top (*) ] 834// 835// [ sender_sp ] 836// [ saved FP ] <--- FP 837// [ saved LR ] 838// [ optional padding(*)] 839// [ local variable m ] 840// ... 841// [ local variable 1 ] 842// [ parameter n ] 843// ... 844// [ parameter 1 ] <--- Rlocals 845// 846// (*) - AArch64 only 847// 848 849address TemplateInterpreterGenerator::generate_Reference_get_entry(void) { 850#if INCLUDE_ALL_GCS 851 if (UseG1GC) { 852 // Code: _aload_0, _getfield, _areturn 853 // parameter size = 1 854 // 855 // The code that gets generated by this routine is split into 2 parts: 856 // 1. The "intrinsified" code for G1 (or any SATB based GC), 857 // 2. The slow path - which is an expansion of the regular method entry. 858 // 859 // Notes:- 860 // * In the G1 code we do not check whether we need to block for 861 // a safepoint. If G1 is enabled then we must execute the specialized 862 // code for Reference.get (except when the Reference object is null) 863 // so that we can log the value in the referent field with an SATB 864 // update buffer. 865 // If the code for the getfield template is modified so that the 866 // G1 pre-barrier code is executed when the current method is 867 // Reference.get() then going through the normal method entry 868 // will be fine. 869 // * The G1 code can, however, check the receiver object (the instance 870 // of java.lang.Reference) and jump to the slow path if null. If the 871 // Reference object is null then we obviously cannot fetch the referent 872 // and so we don't need to call the G1 pre-barrier. Thus we can use the 873 // regular method entry code to generate the NPE. 874 // 875 // This code is based on generate_accessor_enty. 876 // 877 // Rmethod: Method* 878 // Rthread: thread 879 // Rsender_sp: sender sp, must be preserved for slow path, set SP to it on fast path 880 // Rparams: parameters 881 882 address entry = __ pc(); 883 Label slow_path; 884 const Register Rthis = R0; 885 const Register Rret_addr = Rtmp_save1; 886 assert_different_registers(Rthis, Rret_addr, Rsender_sp); 887 888 const int referent_offset = java_lang_ref_Reference::referent_offset; 889 guarantee(referent_offset > 0, "referent offset not initialized"); 890 891 // Check if local 0 != NULL 892 // If the receiver is null then it is OK to jump to the slow path. 893 __ ldr(Rthis, Address(Rparams)); 894 __ cbz(Rthis, slow_path); 895 896 // Generate the G1 pre-barrier code to log the value of 897 // the referent field in an SATB buffer. 898 899 // Load the value of the referent field. 900 __ load_heap_oop(R0, Address(Rthis, referent_offset)); 901 902 // Preserve LR 903 __ mov(Rret_addr, LR); 904 905 __ g1_write_barrier_pre(noreg, // store_addr 906 noreg, // new_val 907 R0, // pre_val 908 Rtemp, // tmp1 909 R1_tmp); // tmp2 910 911 // _areturn 912 __ mov(SP, Rsender_sp); 913 __ ret(Rret_addr); 914 915 // generate a vanilla interpreter entry as the slow path 916 __ bind(slow_path); 917 __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals)); 918 return entry; 919 } 920#endif // INCLUDE_ALL_GCS 921 922 // If G1 is not enabled then attempt to go through the normal entry point 923 return NULL; 924} 925 926// Not supported 927address TemplateInterpreterGenerator::generate_CRC32_update_entry() { return NULL; } 928address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 929address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; } 930 931// 932// Interpreter stub for calling a native method. (asm interpreter) 933// This sets up a somewhat different looking stack for calling the native method 934// than the typical interpreter frame setup. 935// 936 937address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) { 938 // determine code generation flags 939 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 940 941 // Incoming registers: 942 // 943 // Rmethod: Method* 944 // Rthread: thread 945 // Rsender_sp: sender sp 946 // Rparams: parameters 947 948 address entry_point = __ pc(); 949 950 // Register allocation 951 const Register Rsize_of_params = AARCH64_ONLY(R20) NOT_AARCH64(R6); 952 const Register Rsig_handler = AARCH64_ONLY(R21) NOT_AARCH64(Rtmp_save0 /* R4 */); 953 const Register Rnative_code = AARCH64_ONLY(R22) NOT_AARCH64(Rtmp_save1 /* R5 */); 954 const Register Rresult_handler = AARCH64_ONLY(Rsig_handler) NOT_AARCH64(R6); 955 956#ifdef AARCH64 957 const Register RconstMethod = R10; // also used in generate_fixed_frame (should match) 958 const Register Rsaved_result = Rnative_code; 959 const FloatRegister Dsaved_result = V8; 960#else 961 const Register Rsaved_result_lo = Rtmp_save0; // R4 962 const Register Rsaved_result_hi = Rtmp_save1; // R5 963 FloatRegister saved_result_fp; 964#endif // AARCH64 965 966 967#ifdef AARCH64 968 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 969 __ ldrh(Rsize_of_params, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 970#else 971 __ ldr(Rsize_of_params, Address(Rmethod, Method::const_offset())); 972 __ ldrh(Rsize_of_params, Address(Rsize_of_params, ConstMethod::size_of_parameters_offset())); 973#endif // AARCH64 974 975 // native calls don't need the stack size check since they have no expression stack 976 // and the arguments are already on the stack and we only add a handful of words 977 // to the stack 978 979 // compute beginning of parameters (Rlocals) 980 __ sub(Rlocals, Rparams, wordSize); 981 __ add(Rlocals, Rlocals, AsmOperand(Rsize_of_params, lsl, Interpreter::logStackElementSize)); 982 983#ifdef AARCH64 984 int extra_stack_reserve = 2*wordSize; // extra space for oop_temp 985 if(__ can_post_interpreter_events()) { 986 // extra space for saved results 987 extra_stack_reserve += 2*wordSize; 988 } 989 // reserve extra stack space and nullify oop_temp slot 990 __ stp(ZR, ZR, Address(SP, -extra_stack_reserve, pre_indexed)); 991#else 992 // reserve stack space for oop_temp 993 __ mov(R0, 0); 994 __ push(R0); 995#endif // AARCH64 996 997 generate_fixed_frame(true); // Note: R9 is now saved in the frame 998 999 // make sure method is native & not abstract 1000#ifdef ASSERT 1001 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1002 { 1003 Label L; 1004 __ tbnz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1005 __ stop("tried to execute non-native method as native"); 1006 __ bind(L); 1007 } 1008 { Label L; 1009 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1010 __ stop("tried to execute abstract method in interpreter"); 1011 __ bind(L); 1012 } 1013#endif 1014 1015 // increment invocation count & check for overflow 1016 Label invocation_counter_overflow; 1017 if (inc_counter) { 1018 if (synchronized) { 1019 // Avoid unlocking method's monitor in case of exception, as it has not 1020 // been locked yet. 1021 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1022 } 1023 generate_counter_incr(&invocation_counter_overflow, NULL, NULL); 1024 } 1025 1026 Label continue_after_compile; 1027 __ bind(continue_after_compile); 1028 1029 if (inc_counter && synchronized) { 1030 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1031 } 1032 1033 // check for synchronized methods 1034 // Must happen AFTER invocation_counter check and stack overflow check, 1035 // so method is not locked if overflows. 1036 // 1037 if (synchronized) { 1038 lock_method(); 1039 } else { 1040 // no synchronization necessary 1041#ifdef ASSERT 1042 { Label L; 1043 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1044 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1045 __ stop("method needs synchronization"); 1046 __ bind(L); 1047 } 1048#endif 1049 } 1050 1051 // start execution 1052#ifdef ASSERT 1053 { Label L; 1054 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1055 __ cmp(Rtemp, Rstack_top); 1056 __ b(L, eq); 1057 __ stop("broken stack frame setup in interpreter"); 1058 __ bind(L); 1059 } 1060#endif 1061 __ check_extended_sp(Rtemp); 1062 1063 // jvmti/dtrace support 1064 __ notify_method_entry(); 1065#if R9_IS_SCRATCHED 1066 __ restore_method(); 1067#endif 1068 1069 { 1070 Label L; 1071 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1072 __ cbnz(Rsig_handler, L); 1073 __ mov(R1, Rmethod); 1074 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1, true); 1075 __ ldr(Rsig_handler, Address(Rmethod, Method::signature_handler_offset())); 1076 __ bind(L); 1077 } 1078 1079 { 1080 Label L; 1081 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1082 __ cbnz(Rnative_code, L); 1083 __ mov(R1, Rmethod); 1084 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), R1); 1085 __ ldr(Rnative_code, Address(Rmethod, Method::native_function_offset())); 1086 __ bind(L); 1087 } 1088 1089 // Allocate stack space for arguments 1090 1091#ifdef AARCH64 1092 __ sub(Rtemp, SP, Rsize_of_params, ex_uxtw, LogBytesPerWord); 1093 __ align_reg(SP, Rtemp, StackAlignmentInBytes); 1094 1095 // Allocate more stack space to accomodate all arguments passed on GP and FP registers: 1096 // 8 * wordSize for GPRs 1097 // 8 * wordSize for FPRs 1098 int reg_arguments = round_to(8*wordSize + 8*wordSize, StackAlignmentInBytes); 1099#else 1100 1101 // C functions need aligned stack 1102 __ bic(SP, SP, StackAlignmentInBytes - 1); 1103 // Multiply by BytesPerLong instead of BytesPerWord, because calling convention 1104 // may require empty slots due to long alignment, e.g. func(int, jlong, int, jlong) 1105 __ sub(SP, SP, AsmOperand(Rsize_of_params, lsl, LogBytesPerLong)); 1106 1107#ifdef __ABI_HARD__ 1108 // Allocate more stack space to accomodate all GP as well as FP registers: 1109 // 4 * wordSize 1110 // 8 * BytesPerLong 1111 int reg_arguments = round_to((4*wordSize) + (8*BytesPerLong), StackAlignmentInBytes); 1112#else 1113 // Reserve at least 4 words on the stack for loading 1114 // of parameters passed on registers (R0-R3). 1115 // See generate_slow_signature_handler(). 1116 // It is also used for JNIEnv & class additional parameters. 1117 int reg_arguments = 4 * wordSize; 1118#endif // __ABI_HARD__ 1119#endif // AARCH64 1120 1121 __ sub(SP, SP, reg_arguments); 1122 1123 1124 // Note: signature handler blows R4 (32-bit ARM) or R21 (AArch64) besides all scratch registers. 1125 // See AbstractInterpreterGenerator::generate_slow_signature_handler(). 1126 __ call(Rsig_handler); 1127#if R9_IS_SCRATCHED 1128 __ restore_method(); 1129#endif 1130 __ mov(Rresult_handler, R0); 1131 1132 // Pass JNIEnv and mirror for static methods 1133 { 1134 Label L; 1135 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1136 __ add(R0, Rthread, in_bytes(JavaThread::jni_environment_offset())); 1137 __ tbz(Rtemp, JVM_ACC_STATIC_BIT, L); 1138 __ load_mirror(Rtemp, Rmethod, Rtemp); 1139 __ add(R1, FP, frame::interpreter_frame_oop_temp_offset * wordSize); 1140 __ str(Rtemp, Address(R1, 0)); 1141 __ bind(L); 1142 } 1143 1144 __ set_last_Java_frame(SP, FP, true, Rtemp); 1145 1146 // Changing state to _thread_in_native must be the last thing to do 1147 // before the jump to native code. At this moment stack must be 1148 // safepoint-safe and completely prepared for stack walking. 1149#ifdef ASSERT 1150 { 1151 Label L; 1152 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1153 __ cmp_32(Rtemp, _thread_in_Java); 1154 __ b(L, eq); 1155 __ stop("invalid thread state"); 1156 __ bind(L); 1157 } 1158#endif 1159 1160#ifdef AARCH64 1161 __ mov(Rtemp, _thread_in_native); 1162 __ add(Rtemp2, Rthread, in_bytes(JavaThread::thread_state_offset())); 1163 // STLR is used to force all preceding writes to be observed prior to thread state change 1164 __ stlr_w(Rtemp, Rtemp2); 1165#else 1166 // Force all preceding writes to be observed prior to thread state change 1167 __ membar(MacroAssembler::StoreStore, Rtemp); 1168 1169 __ mov(Rtemp, _thread_in_native); 1170 __ str(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1171#endif // AARCH64 1172 1173 __ call(Rnative_code); 1174#if R9_IS_SCRATCHED 1175 __ restore_method(); 1176#endif 1177 1178 // Set FPSCR/FPCR to a known state 1179 if (AlwaysRestoreFPU) { 1180 __ restore_default_fp_mode(); 1181 } 1182 1183 // Do safepoint check 1184 __ mov(Rtemp, _thread_in_native_trans); 1185 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1186 1187 // Force this write out before the read below 1188 __ membar(MacroAssembler::StoreLoad, Rtemp); 1189 1190 __ ldr_global_s32(Rtemp, SafepointSynchronize::address_of_state()); 1191 1192 // Protect the return value in the interleaved code: save it to callee-save registers. 1193#ifdef AARCH64 1194 __ mov(Rsaved_result, R0); 1195 __ fmov_d(Dsaved_result, D0); 1196#else 1197 __ mov(Rsaved_result_lo, R0); 1198 __ mov(Rsaved_result_hi, R1); 1199#ifdef __ABI_HARD__ 1200 // preserve native FP result in a callee-saved register 1201 saved_result_fp = D8; 1202 __ fcpyd(saved_result_fp, D0); 1203#else 1204 saved_result_fp = fnoreg; 1205#endif // __ABI_HARD__ 1206#endif // AARCH64 1207 1208 { 1209 __ ldr_u32(R3, Address(Rthread, JavaThread::suspend_flags_offset())); 1210 __ cmp(Rtemp, SafepointSynchronize::_not_synchronized); 1211 __ cond_cmp(R3, 0, eq); 1212 1213#ifdef AARCH64 1214 Label L; 1215 __ b(L, eq); 1216 __ mov(R0, Rthread); 1217 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none); 1218 __ bind(L); 1219#else 1220 __ mov(R0, Rthread, ne); 1221 __ call(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans), relocInfo::none, ne); 1222#if R9_IS_SCRATCHED 1223 __ restore_method(); 1224#endif 1225#endif // AARCH64 1226 } 1227 1228 // Perform Native->Java thread transition 1229 __ mov(Rtemp, _thread_in_Java); 1230 __ str_32(Rtemp, Address(Rthread, JavaThread::thread_state_offset())); 1231 1232 // Zero handles and last_java_sp 1233 __ reset_last_Java_frame(Rtemp); 1234 __ ldr(R3, Address(Rthread, JavaThread::active_handles_offset())); 1235 __ str_32(__ zero_register(Rtemp), Address(R3, JNIHandleBlock::top_offset_in_bytes())); 1236 if (CheckJNICalls) { 1237 __ str(__ zero_register(Rtemp), Address(Rthread, JavaThread::pending_jni_exception_check_fn_offset())); 1238 } 1239 1240 // Unbox oop result, e.g. JNIHandles::resolve result if it's an oop. 1241 { 1242 Label Lnot_oop; 1243#ifdef AARCH64 1244 __ mov_slow(Rtemp, AbstractInterpreter::result_handler(T_OBJECT)); 1245 __ cmp(Rresult_handler, Rtemp); 1246 __ b(Lnot_oop, ne); 1247#else // !AARCH64 1248 // For ARM32, Rresult_handler is -1 for oop result, 0 otherwise. 1249 __ cbz(Rresult_handler, Lnot_oop); 1250#endif // !AARCH64 1251 Register value = AARCH64_ONLY(Rsaved_result) NOT_AARCH64(Rsaved_result_lo); 1252 __ resolve_jobject(value, // value 1253 Rtemp, // tmp1 1254 R1_tmp); // tmp2 1255 // Store resolved result in frame for GC visibility. 1256 __ str(value, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize)); 1257 __ bind(Lnot_oop); 1258 } 1259 1260#ifdef AARCH64 1261 // Restore SP (drop native parameters area), to keep SP in sync with extended_sp in frame 1262 __ restore_sp_after_call(Rtemp); 1263 __ check_stack_top(); 1264#endif // AARCH64 1265 1266 // reguard stack if StackOverflow exception happened while in native. 1267 { 1268 __ ldr_u32(Rtemp, Address(Rthread, JavaThread::stack_guard_state_offset())); 1269 __ cmp_32(Rtemp, JavaThread::stack_guard_yellow_reserved_disabled); 1270#ifdef AARCH64 1271 Label L; 1272 __ b(L, ne); 1273 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none); 1274 __ bind(L); 1275#else 1276 __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages), relocInfo::none, eq); 1277#if R9_IS_SCRATCHED 1278 __ restore_method(); 1279#endif 1280#endif // AARCH64 1281 } 1282 1283 // check pending exceptions 1284 { 1285 __ ldr(Rtemp, Address(Rthread, Thread::pending_exception_offset())); 1286#ifdef AARCH64 1287 Label L; 1288 __ cbz(Rtemp, L); 1289 __ mov_pc_to(Rexception_pc); 1290 __ b(StubRoutines::forward_exception_entry()); 1291 __ bind(L); 1292#else 1293 __ cmp(Rtemp, 0); 1294 __ mov(Rexception_pc, PC, ne); 1295 __ b(StubRoutines::forward_exception_entry(), ne); 1296#endif // AARCH64 1297 } 1298 1299 if (synchronized) { 1300 // address of first monitor 1301 __ sub(R1, FP, - (frame::interpreter_frame_monitor_block_bottom_offset - frame::interpreter_frame_monitor_size()) * wordSize); 1302 __ unlock_object(R1); 1303 } 1304 1305 // jvmti/dtrace support 1306 // Note: This must happen _after_ handling/throwing any exceptions since 1307 // the exception handler code notifies the runtime of method exits 1308 // too. If this happens before, method entry/exit notifications are 1309 // not properly paired (was bug - gri 11/22/99). 1310#ifdef AARCH64 1311 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result, noreg, Dsaved_result); 1312#else 1313 __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI, true, Rsaved_result_lo, Rsaved_result_hi, saved_result_fp); 1314#endif // AARCH64 1315 1316 // Restore the result. Oop result is restored from the stack. 1317#ifdef AARCH64 1318 __ mov(R0, Rsaved_result); 1319 __ fmov_d(D0, Dsaved_result); 1320 1321 __ blr(Rresult_handler); 1322#else 1323 __ cmp(Rresult_handler, 0); 1324 __ ldr(R0, Address(FP, frame::interpreter_frame_oop_temp_offset * wordSize), ne); 1325 __ mov(R0, Rsaved_result_lo, eq); 1326 __ mov(R1, Rsaved_result_hi); 1327 1328#ifdef __ABI_HARD__ 1329 // reload native FP result 1330 __ fcpyd(D0, D8); 1331#endif // __ABI_HARD__ 1332 1333#ifdef ASSERT 1334 if (VerifyOops) { 1335 Label L; 1336 __ cmp(Rresult_handler, 0); 1337 __ b(L, eq); 1338 __ verify_oop(R0); 1339 __ bind(L); 1340 } 1341#endif // ASSERT 1342#endif // AARCH64 1343 1344 // Restore FP/LR, sender_sp and return 1345#ifdef AARCH64 1346 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize)); 1347 __ ldp(FP, LR, Address(FP)); 1348 __ mov(SP, Rtemp); 1349#else 1350 __ mov(Rtemp, FP); 1351 __ ldmia(FP, RegisterSet(FP) | RegisterSet(LR)); 1352 __ ldr(SP, Address(Rtemp, frame::interpreter_frame_sender_sp_offset * wordSize)); 1353#endif // AARCH64 1354 1355 __ ret(); 1356 1357 if (inc_counter) { 1358 // Handle overflow of counter and compile method 1359 __ bind(invocation_counter_overflow); 1360 generate_counter_overflow(continue_after_compile); 1361 } 1362 1363 return entry_point; 1364} 1365 1366// 1367// Generic interpreted method entry to (asm) interpreter 1368// 1369address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) { 1370 // determine code generation flags 1371 bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods; 1372 1373 // Rmethod: Method* 1374 // Rthread: thread 1375 // Rsender_sp: sender sp (could differ from SP if we were called via c2i) 1376 // Rparams: pointer to the last parameter in the stack 1377 1378 address entry_point = __ pc(); 1379 1380 const Register RconstMethod = AARCH64_ONLY(R10) NOT_AARCH64(R3); 1381 1382#ifdef AARCH64 1383 const Register RmaxStack = R11; 1384 const Register RlocalsBase = R12; 1385#endif // AARCH64 1386 1387 __ ldr(RconstMethod, Address(Rmethod, Method::const_offset())); 1388 1389 __ ldrh(R2, Address(RconstMethod, ConstMethod::size_of_parameters_offset())); 1390 __ ldrh(R3, Address(RconstMethod, ConstMethod::size_of_locals_offset())); 1391 1392 // setup Rlocals 1393 __ sub(Rlocals, Rparams, wordSize); 1394 __ add(Rlocals, Rlocals, AsmOperand(R2, lsl, Interpreter::logStackElementSize)); 1395 1396 __ sub(R3, R3, R2); // number of additional locals 1397 1398#ifdef AARCH64 1399 // setup RmaxStack 1400 __ ldrh(RmaxStack, Address(RconstMethod, ConstMethod::max_stack_offset())); 1401 // We have to add extra reserved slots to max_stack. There are 3 users of the extra slots, 1402 // none of which are at the same time, so we just need to make sure there is enough room 1403 // for the biggest user: 1404 // -reserved slot for exception handler 1405 // -reserved slots for JSR292. Method::extra_stack_entries() is the size. 1406 // -3 reserved slots so get_method_counters() can save some registers before call_VM(). 1407 __ add(RmaxStack, RmaxStack, MAX2(3, Method::extra_stack_entries())); 1408#endif // AARCH64 1409 1410 // see if we've got enough room on the stack for locals plus overhead. 1411 generate_stack_overflow_check(); 1412 1413#ifdef AARCH64 1414 1415 // allocate space for locals 1416 { 1417 __ sub(RlocalsBase, Rparams, AsmOperand(R3, lsl, Interpreter::logStackElementSize)); 1418 __ align_reg(SP, RlocalsBase, StackAlignmentInBytes); 1419 } 1420 1421 // explicitly initialize locals 1422 { 1423 Label zero_loop, done; 1424 __ cbz(R3, done); 1425 1426 __ tbz(R3, 0, zero_loop); 1427 __ subs(R3, R3, 1); 1428 __ str(ZR, Address(RlocalsBase, wordSize, post_indexed)); 1429 __ b(done, eq); 1430 1431 __ bind(zero_loop); 1432 __ subs(R3, R3, 2); 1433 __ stp(ZR, ZR, Address(RlocalsBase, 2*wordSize, post_indexed)); 1434 __ b(zero_loop, ne); 1435 1436 __ bind(done); 1437 } 1438 1439#else 1440 // allocate space for locals 1441 // explicitly initialize locals 1442 1443 // Loop is unrolled 4 times 1444 Label loop; 1445 __ mov(R0, 0); 1446 __ bind(loop); 1447 1448 // #1 1449 __ subs(R3, R3, 1); 1450 __ push(R0, ge); 1451 1452 // #2 1453 __ subs(R3, R3, 1, ge); 1454 __ push(R0, ge); 1455 1456 // #3 1457 __ subs(R3, R3, 1, ge); 1458 __ push(R0, ge); 1459 1460 // #4 1461 __ subs(R3, R3, 1, ge); 1462 __ push(R0, ge); 1463 1464 __ b(loop, gt); 1465#endif // AARCH64 1466 1467 // initialize fixed part of activation frame 1468 generate_fixed_frame(false); 1469 1470 __ restore_dispatch(); 1471 1472 // make sure method is not native & not abstract 1473#ifdef ASSERT 1474 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1475 { 1476 Label L; 1477 __ tbz(Rtemp, JVM_ACC_NATIVE_BIT, L); 1478 __ stop("tried to execute native method as non-native"); 1479 __ bind(L); 1480 } 1481 { Label L; 1482 __ tbz(Rtemp, JVM_ACC_ABSTRACT_BIT, L); 1483 __ stop("tried to execute abstract method in interpreter"); 1484 __ bind(L); 1485 } 1486#endif 1487 1488 // increment invocation count & check for overflow 1489 Label invocation_counter_overflow; 1490 Label profile_method; 1491 Label profile_method_continue; 1492 if (inc_counter) { 1493 if (synchronized) { 1494 // Avoid unlocking method's monitor in case of exception, as it has not 1495 // been locked yet. 1496 __ set_do_not_unlock_if_synchronized(true, Rtemp); 1497 } 1498 generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue); 1499 if (ProfileInterpreter) { 1500 __ bind(profile_method_continue); 1501 } 1502 } 1503 Label continue_after_compile; 1504 __ bind(continue_after_compile); 1505 1506 if (inc_counter && synchronized) { 1507 __ set_do_not_unlock_if_synchronized(false, Rtemp); 1508 } 1509#if R9_IS_SCRATCHED 1510 __ restore_method(); 1511#endif 1512 1513 // check for synchronized methods 1514 // Must happen AFTER invocation_counter check and stack overflow check, 1515 // so method is not locked if overflows. 1516 // 1517 if (synchronized) { 1518 // Allocate monitor and lock method 1519 lock_method(); 1520 } else { 1521 // no synchronization necessary 1522#ifdef ASSERT 1523 { Label L; 1524 __ ldr_u32(Rtemp, Address(Rmethod, Method::access_flags_offset())); 1525 __ tbz(Rtemp, JVM_ACC_SYNCHRONIZED_BIT, L); 1526 __ stop("method needs synchronization"); 1527 __ bind(L); 1528 } 1529#endif 1530 } 1531 1532 // start execution 1533#ifdef ASSERT 1534 { Label L; 1535 __ ldr(Rtemp, Address(FP, frame::interpreter_frame_monitor_block_top_offset * wordSize)); 1536 __ cmp(Rtemp, Rstack_top); 1537 __ b(L, eq); 1538 __ stop("broken stack frame setup in interpreter"); 1539 __ bind(L); 1540 } 1541#endif 1542 __ check_extended_sp(Rtemp); 1543 1544 // jvmti support 1545 __ notify_method_entry(); 1546#if R9_IS_SCRATCHED 1547 __ restore_method(); 1548#endif 1549 1550 __ dispatch_next(vtos); 1551 1552 // invocation counter overflow 1553 if (inc_counter) { 1554 if (ProfileInterpreter) { 1555 // We have decided to profile this method in the interpreter 1556 __ bind(profile_method); 1557 1558 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); 1559 __ set_method_data_pointer_for_bcp(); 1560 1561 __ b(profile_method_continue); 1562 } 1563 1564 // Handle overflow of counter and compile method 1565 __ bind(invocation_counter_overflow); 1566 generate_counter_overflow(continue_after_compile); 1567 } 1568 1569 return entry_point; 1570} 1571 1572//------------------------------------------------------------------------------------------------------------------------ 1573// Exceptions 1574 1575void TemplateInterpreterGenerator::generate_throw_exception() { 1576 // Entry point in previous activation (i.e., if the caller was interpreted) 1577 Interpreter::_rethrow_exception_entry = __ pc(); 1578 // Rexception_obj: exception 1579 1580#ifndef AARCH64 1581 // Clear interpreter_frame_last_sp. 1582 __ mov(Rtemp, 0); 1583 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1584#endif // !AARCH64 1585 1586#if R9_IS_SCRATCHED 1587 __ restore_method(); 1588#endif 1589 __ restore_bcp(); 1590 __ restore_dispatch(); 1591 __ restore_locals(); 1592 1593#ifdef AARCH64 1594 __ restore_sp_after_call(Rtemp); 1595#endif // AARCH64 1596 1597 // Entry point for exceptions thrown within interpreter code 1598 Interpreter::_throw_exception_entry = __ pc(); 1599 1600 // expression stack is undefined here 1601 // Rexception_obj: exception 1602 // Rbcp: exception bcp 1603 __ verify_oop(Rexception_obj); 1604 1605 // expression stack must be empty before entering the VM in case of an exception 1606 __ empty_expression_stack(); 1607 // find exception handler address and preserve exception oop 1608 __ mov(R1, Rexception_obj); 1609 __ call_VM(Rexception_obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), R1); 1610 // R0: exception handler entry point 1611 // Rexception_obj: preserved exception oop 1612 // Rbcp: bcp for exception handler 1613 __ push_ptr(Rexception_obj); // push exception which is now the only value on the stack 1614 __ jump(R0); // jump to exception handler (may be _remove_activation_entry!) 1615 1616 // If the exception is not handled in the current frame the frame is removed and 1617 // the exception is rethrown (i.e. exception continuation is _rethrow_exception). 1618 // 1619 // Note: At this point the bci is still the bxi for the instruction which caused 1620 // the exception and the expression stack is empty. Thus, for any VM calls 1621 // at this point, GC will find a legal oop map (with empty expression stack). 1622 1623 // In current activation 1624 // tos: exception 1625 // Rbcp: exception bcp 1626 1627 // 1628 // JVMTI PopFrame support 1629 // 1630 Interpreter::_remove_activation_preserving_args_entry = __ pc(); 1631 1632#ifdef AARCH64 1633 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1634#endif // AARCH64 1635 1636 __ empty_expression_stack(); 1637 1638 // Set the popframe_processing bit in _popframe_condition indicating that we are 1639 // currently handling popframe, so that call_VMs that may happen later do not trigger new 1640 // popframe handling cycles. 1641 1642 __ ldr_s32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1643 __ orr(Rtemp, Rtemp, (unsigned)JavaThread::popframe_processing_bit); 1644 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1645 1646 { 1647 // Check to see whether we are returning to a deoptimized frame. 1648 // (The PopFrame call ensures that the caller of the popped frame is 1649 // either interpreted or compiled and deoptimizes it if compiled.) 1650 // In this case, we can't call dispatch_next() after the frame is 1651 // popped, but instead must save the incoming arguments and restore 1652 // them after deoptimization has occurred. 1653 // 1654 // Note that we don't compare the return PC against the 1655 // deoptimization blob's unpack entry because of the presence of 1656 // adapter frames in C2. 1657 Label caller_not_deoptimized; 1658 __ ldr(R0, Address(FP, frame::return_addr_offset * wordSize)); 1659 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), R0); 1660 __ cbnz_32(R0, caller_not_deoptimized); 1661#ifdef AARCH64 1662 __ NOT_TESTED(); 1663#endif 1664 1665 // Compute size of arguments for saving when returning to deoptimized caller 1666 __ restore_method(); 1667 __ ldr(R0, Address(Rmethod, Method::const_offset())); 1668 __ ldrh(R0, Address(R0, ConstMethod::size_of_parameters_offset())); 1669 1670 __ logical_shift_left(R1, R0, Interpreter::logStackElementSize); 1671 // Save these arguments 1672 __ restore_locals(); 1673 __ sub(R2, Rlocals, R1); 1674 __ add(R2, R2, wordSize); 1675 __ mov(R0, Rthread); 1676 __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), R0, R1, R2); 1677 1678 __ remove_activation(vtos, LR, 1679 /* throw_monitor_exception */ false, 1680 /* install_monitor_exception */ false, 1681 /* notify_jvmdi */ false); 1682 1683 // Inform deoptimization that it is responsible for restoring these arguments 1684 __ mov(Rtemp, JavaThread::popframe_force_deopt_reexecution_bit); 1685 __ str_32(Rtemp, Address(Rthread, JavaThread::popframe_condition_offset())); 1686 1687 // Continue in deoptimization handler 1688 __ ret(); 1689 1690 __ bind(caller_not_deoptimized); 1691 } 1692 1693 __ remove_activation(vtos, R4, 1694 /* throw_monitor_exception */ false, 1695 /* install_monitor_exception */ false, 1696 /* notify_jvmdi */ false); 1697 1698#ifndef AARCH64 1699 // Finish with popframe handling 1700 // A previous I2C followed by a deoptimization might have moved the 1701 // outgoing arguments further up the stack. PopFrame expects the 1702 // mutations to those outgoing arguments to be preserved and other 1703 // constraints basically require this frame to look exactly as 1704 // though it had previously invoked an interpreted activation with 1705 // no space between the top of the expression stack (current 1706 // last_sp) and the top of stack. Rather than force deopt to 1707 // maintain this kind of invariant all the time we call a small 1708 // fixup routine to move the mutated arguments onto the top of our 1709 // expression stack if necessary. 1710 __ mov(R1, SP); 1711 __ ldr(R2, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1712 // PC must point into interpreter here 1713 __ set_last_Java_frame(SP, FP, true, Rtemp); 1714 __ mov(R0, Rthread); 1715 __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), R0, R1, R2); 1716 __ reset_last_Java_frame(Rtemp); 1717#endif // !AARCH64 1718 1719#ifdef AARCH64 1720 __ restore_sp_after_call(Rtemp); 1721 __ restore_stack_top(); 1722#else 1723 // Restore the last_sp and null it out 1724 __ ldr(SP, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1725 __ mov(Rtemp, (int)NULL_WORD); 1726 __ str(Rtemp, Address(FP, frame::interpreter_frame_last_sp_offset * wordSize)); 1727#endif // AARCH64 1728 1729 __ restore_bcp(); 1730 __ restore_dispatch(); 1731 __ restore_locals(); 1732 __ restore_method(); 1733 1734 // The method data pointer was incremented already during 1735 // call profiling. We have to restore the mdp for the current bcp. 1736 if (ProfileInterpreter) { 1737 __ set_method_data_pointer_for_bcp(); 1738 } 1739 1740 // Clear the popframe condition flag 1741 assert(JavaThread::popframe_inactive == 0, "adjust this code"); 1742 __ str_32(__ zero_register(Rtemp), Address(Rthread, JavaThread::popframe_condition_offset())); 1743 1744#if INCLUDE_JVMTI 1745 { 1746 Label L_done; 1747 1748 __ ldrb(Rtemp, Address(Rbcp, 0)); 1749 __ cmp(Rtemp, Bytecodes::_invokestatic); 1750 __ b(L_done, ne); 1751 1752 // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call. 1753 // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL. 1754 1755 // get local0 1756 __ ldr(R1, Address(Rlocals, 0)); 1757 __ mov(R2, Rmethod); 1758 __ mov(R3, Rbcp); 1759 __ call_VM(R0, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R1, R2, R3); 1760 1761 __ cbz(R0, L_done); 1762 1763 __ str(R0, Address(Rstack_top)); 1764 __ bind(L_done); 1765 } 1766#endif // INCLUDE_JVMTI 1767 1768 __ dispatch_next(vtos); 1769 // end of PopFrame support 1770 1771 Interpreter::_remove_activation_entry = __ pc(); 1772 1773 // preserve exception over this code sequence 1774 __ pop_ptr(R0_tos); 1775 __ str(R0_tos, Address(Rthread, JavaThread::vm_result_offset())); 1776 // remove the activation (without doing throws on illegalMonitorExceptions) 1777 __ remove_activation(vtos, Rexception_pc, false, true, false); 1778 // restore exception 1779 __ get_vm_result(Rexception_obj, Rtemp); 1780 1781 // Inbetween activations - previous activation type unknown yet 1782 // compute continuation point - the continuation point expects 1783 // the following registers set up: 1784 // 1785 // Rexception_obj: exception 1786 // Rexception_pc: return address/pc that threw exception 1787 // SP: expression stack of caller 1788 // FP: frame pointer of caller 1789 __ mov(c_rarg0, Rthread); 1790 __ mov(c_rarg1, Rexception_pc); 1791 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), c_rarg0, c_rarg1); 1792 // Note that an "issuing PC" is actually the next PC after the call 1793 1794 __ jump(R0); // jump to exception handler of caller 1795} 1796 1797 1798// 1799// JVMTI ForceEarlyReturn support 1800// 1801address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) { 1802 address entry = __ pc(); 1803 1804#ifdef AARCH64 1805 __ restore_sp_after_call(Rtemp); // restore SP to extended SP 1806#endif // AARCH64 1807 1808 __ restore_bcp(); 1809 __ restore_dispatch(); 1810 __ restore_locals(); 1811 1812 __ empty_expression_stack(); 1813 1814 __ load_earlyret_value(state); 1815 1816 // Clear the earlyret state 1817 __ ldr(Rtemp, Address(Rthread, JavaThread::jvmti_thread_state_offset())); 1818 1819 assert(JvmtiThreadState::earlyret_inactive == 0, "adjust this code"); 1820 __ str_32(__ zero_register(R2), Address(Rtemp, JvmtiThreadState::earlyret_state_offset())); 1821 1822 __ remove_activation(state, LR, 1823 false, /* throw_monitor_exception */ 1824 false, /* install_monitor_exception */ 1825 true); /* notify_jvmdi */ 1826 1827#ifndef AARCH64 1828 // According to interpreter calling conventions, result is returned in R0/R1, 1829 // so ftos (S0) and dtos (D0) are moved to R0/R1. 1830 // This conversion should be done after remove_activation, as it uses 1831 // push(state) & pop(state) to preserve return value. 1832 __ convert_tos_to_retval(state); 1833#endif // !AARCH64 1834 __ ret(); 1835 1836 return entry; 1837} // end of ForceEarlyReturn support 1838 1839 1840//------------------------------------------------------------------------------------------------------------------------ 1841// Helper for vtos entry point generation 1842 1843void TemplateInterpreterGenerator::set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) { 1844 assert(t->is_valid() && t->tos_in() == vtos, "illegal template"); 1845 Label L; 1846 1847#ifdef __SOFTFP__ 1848 dep = __ pc(); // fall through 1849#else 1850 fep = __ pc(); __ push(ftos); __ b(L); 1851 dep = __ pc(); __ push(dtos); __ b(L); 1852#endif // __SOFTFP__ 1853 1854 lep = __ pc(); __ push(ltos); __ b(L); 1855 1856 if (AARCH64_ONLY(true) NOT_AARCH64(VerifyOops)) { // can't share atos entry with itos on AArch64 or if VerifyOops 1857 aep = __ pc(); __ push(atos); __ b(L); 1858 } else { 1859 aep = __ pc(); // fall through 1860 } 1861 1862#ifdef __SOFTFP__ 1863 fep = __ pc(); // fall through 1864#endif // __SOFTFP__ 1865 1866 bep = cep = sep = // fall through 1867 iep = __ pc(); __ push(itos); // fall through 1868 vep = __ pc(); __ bind(L); // fall through 1869 generate_and_dispatch(t); 1870} 1871 1872//------------------------------------------------------------------------------------------------------------------------ 1873 1874// Non-product code 1875#ifndef PRODUCT 1876address TemplateInterpreterGenerator::generate_trace_code(TosState state) { 1877 address entry = __ pc(); 1878 1879 // prepare expression stack 1880 __ push(state); // save tosca 1881 1882 // pass tosca registers as arguments 1883 __ mov(R2, R0_tos); 1884#ifdef AARCH64 1885 __ mov(R3, ZR); 1886#else 1887 __ mov(R3, R1_tos_hi); 1888#endif // AARCH64 1889 __ mov(R1, LR); // save return address 1890 1891 // call tracer 1892 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), R1, R2, R3); 1893 1894 __ mov(LR, R0); // restore return address 1895 __ pop(state); // restore tosca 1896 1897 // return 1898 __ ret(); 1899 1900 return entry; 1901} 1902 1903 1904void TemplateInterpreterGenerator::count_bytecode() { 1905 __ inc_global_counter((address) &BytecodeCounter::_counter_value, 0, Rtemp, R2_tmp, true); 1906} 1907 1908 1909void TemplateInterpreterGenerator::histogram_bytecode(Template* t) { 1910 __ inc_global_counter((address)&BytecodeHistogram::_counters[0], sizeof(BytecodeHistogram::_counters[0]) * t->bytecode(), Rtemp, R2_tmp, true); 1911} 1912 1913 1914void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) { 1915 const Register Rindex_addr = R2_tmp; 1916 Label Lcontinue; 1917 InlinedAddress Lcounters((address)BytecodePairHistogram::_counters); 1918 InlinedAddress Lindex((address)&BytecodePairHistogram::_index); 1919 const Register Rcounters_addr = R2_tmp; 1920 const Register Rindex = R4_tmp; 1921 1922 // calculate new index for counter: 1923 // index = (_index >> log2_number_of_codes) | (bytecode << log2_number_of_codes). 1924 // (_index >> log2_number_of_codes) is previous bytecode 1925 1926 __ ldr_literal(Rindex_addr, Lindex); 1927 __ ldr_s32(Rindex, Address(Rindex_addr)); 1928 __ mov_slow(Rtemp, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes); 1929 __ orr(Rindex, Rtemp, AsmOperand(Rindex, lsr, BytecodePairHistogram::log2_number_of_codes)); 1930 __ str_32(Rindex, Address(Rindex_addr)); 1931 1932 // Rindex (R4) contains index of counter 1933 1934 __ ldr_literal(Rcounters_addr, Lcounters); 1935 __ ldr_s32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1936 __ adds_32(Rtemp, Rtemp, 1); 1937 __ b(Lcontinue, mi); // avoid overflow 1938 __ str_32(Rtemp, Address::indexed_32(Rcounters_addr, Rindex)); 1939 1940 __ b(Lcontinue); 1941 1942 __ bind_literal(Lindex); 1943 __ bind_literal(Lcounters); 1944 1945 __ bind(Lcontinue); 1946} 1947 1948 1949void TemplateInterpreterGenerator::trace_bytecode(Template* t) { 1950 // Call a little run-time stub to avoid blow-up for each bytecode. 1951 // The run-time runtime saves the right registers, depending on 1952 // the tosca in-state for the given template. 1953 assert(Interpreter::trace_code(t->tos_in()) != NULL, 1954 "entry must have been generated"); 1955 address trace_entry = Interpreter::trace_code(t->tos_in()); 1956 __ call(trace_entry, relocInfo::none); 1957} 1958 1959 1960void TemplateInterpreterGenerator::stop_interpreter_at() { 1961 Label Lcontinue; 1962 const Register stop_at = R2_tmp; 1963 1964 __ ldr_global_s32(Rtemp, (address) &BytecodeCounter::_counter_value); 1965 __ mov_slow(stop_at, StopInterpreterAt); 1966 1967 // test bytecode counter 1968 __ cmp(Rtemp, stop_at); 1969 __ b(Lcontinue, ne); 1970 1971 __ trace_state("stop_interpreter_at"); 1972 __ breakpoint(); 1973 1974 __ bind(Lcontinue); 1975} 1976#endif // !PRODUCT 1977