methodHandles_sparc.cpp revision 3602:da91efe96a93
1/* 2 * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "interpreter/interpreter.hpp" 27#include "memory/allocation.inline.hpp" 28#include "prims/methodHandles.hpp" 29 30#define __ _masm-> 31 32#ifdef PRODUCT 33#define BLOCK_COMMENT(str) /* nothing */ 34#define STOP(error) stop(error) 35#else 36#define BLOCK_COMMENT(str) __ block_comment(str) 37#define STOP(error) block_comment(error); __ stop(error) 38#endif 39 40#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 41 42// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. 43static RegisterOrConstant constant(int value) { 44 return RegisterOrConstant(value); 45} 46 47void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { 48 if (VerifyMethodHandles) 49 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg, 50 "MH argument is a Class"); 51 __ ld_ptr(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); 52} 53 54#ifdef ASSERT 55static int check_nonzero(const char* xname, int x) { 56 assert(x != 0, err_msg("%s should be nonzero", xname)); 57 return x; 58} 59#define NONZERO(x) check_nonzero(#x, x) 60#else //ASSERT 61#define NONZERO(x) (x) 62#endif //ASSERT 63 64#ifdef ASSERT 65void MethodHandles::verify_klass(MacroAssembler* _masm, 66 Register obj_reg, SystemDictionary::WKID klass_id, 67 Register temp_reg, Register temp2_reg, 68 const char* error_message) { 69 Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); 70 KlassHandle klass = SystemDictionary::well_known_klass(klass_id); 71 bool did_save = false; 72 if (temp_reg == noreg || temp2_reg == noreg) { 73 temp_reg = L1; 74 temp2_reg = L2; 75 __ save_frame_and_mov(0, obj_reg, L0); 76 obj_reg = L0; 77 did_save = true; 78 } 79 Label L_ok, L_bad; 80 BLOCK_COMMENT("verify_klass {"); 81 __ verify_oop(obj_reg); 82 __ br_null_short(obj_reg, Assembler::pn, L_bad); 83 __ load_klass(obj_reg, temp_reg); 84 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg); 85 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); 86 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); 87 intptr_t super_check_offset = klass->super_check_offset(); 88 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg); 89 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg); 90 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); 91 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); 92 __ BIND(L_bad); 93 if (did_save) __ restore(); 94 __ STOP(error_message); 95 __ BIND(L_ok); 96 if (did_save) __ restore(); 97 BLOCK_COMMENT("} verify_klass"); 98} 99 100void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { 101 Label L; 102 BLOCK_COMMENT("verify_ref_kind {"); 103 __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp); 104 __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp); 105 __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp); 106 __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L); 107 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); 108 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); 109 if (ref_kind == JVM_REF_invokeVirtual || 110 ref_kind == JVM_REF_invokeSpecial) 111 // could do this for all ref_kinds, but would explode assembly code size 112 trace_method_handle(_masm, buf); 113 __ STOP(buf); 114 } 115 BLOCK_COMMENT("} verify_ref_kind"); 116 __ bind(L); 117} 118 119#endif // ASSERT 120 121void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, 122 bool for_compiler_entry) { 123 assert(method == G5_method, "interpreter calling convention"); 124 125 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { 126 Label run_compiled_code; 127 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 128 // compiled code in threads for which the event is enabled. Check here for 129 // interp_only_mode if these events CAN be enabled. 130 __ verify_thread(); 131 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 132 __ ld(interp_only, temp); 133 __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); 134 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 135 __ jmp(target, 0); 136 __ delayed()->nop(); 137 __ BIND(run_compiled_code); 138 // Note: we could fill some delay slots here, but 139 // it doesn't matter, since this is interpreter code. 140 } 141 142 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : 143 Method::from_interpreted_offset(); 144 __ ld_ptr(G5_method, in_bytes(entry_offset), target); 145 __ jmp(target, 0); 146 __ delayed()->nop(); 147} 148 149void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, 150 Register recv, Register method_temp, 151 Register temp2, Register temp3, 152 bool for_compiler_entry) { 153 BLOCK_COMMENT("jump_to_lambda_form {"); 154 // This is the initial entry point of a lazy method handle. 155 // After type checking, it picks up the invoker from the LambdaForm. 156 assert_different_registers(recv, method_temp, temp2, temp3); 157 assert(method_temp == G5_method, "required register for loading method"); 158 159 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); 160 161 // Load the invoker, as MH -> MH.form -> LF.vmentry 162 __ verify_oop(recv); 163 __ load_heap_oop(Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), method_temp); 164 __ verify_oop(method_temp); 165 __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp); 166 __ verify_oop(method_temp); 167 // the following assumes that a Method* is normally compressed in the vmtarget field: 168 __ ld_ptr(Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp); 169 170 if (VerifyMethodHandles && !for_compiler_entry) { 171 // make sure recv is already on stack 172 __ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()), 173 temp2, 174 sizeof(u2), /*is_signed*/ false); 175 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 176 Label L; 177 __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2); 178 __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L); 179 __ STOP("receiver not on stack"); 180 __ BIND(L); 181 } 182 183 jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); 184 BLOCK_COMMENT("} jump_to_lambda_form"); 185} 186 187 188// Code generation 189address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, 190 vmIntrinsics::ID iid) { 191 const bool not_for_compiler_entry = false; // this is the interpreter entry 192 assert(is_signature_polymorphic(iid), "expected invoke iid"); 193 if (iid == vmIntrinsics::_invokeGeneric || 194 iid == vmIntrinsics::_compiledLambdaForm) { 195 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. 196 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. 197 // They all allow an appendix argument. 198 __ should_not_reach_here(); // empty stubs make SG sick 199 return NULL; 200 } 201 202 // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted) 203 // G5_method: Method* 204 // G4 (Gargs): incoming argument list (must preserve) 205 // O0: used as temp to hold mh or receiver 206 // O1, O4: garbage temps, blown away 207 Register O1_scratch = O1; 208 Register O4_param_size = O4; // size of parameters 209 210 address code_start = __ pc(); 211 212 // here's where control starts out: 213 __ align(CodeEntryAlignment); 214 address entry_point = __ pc(); 215 216 if (VerifyMethodHandles) { 217 Label L; 218 BLOCK_COMMENT("verify_intrinsic_id {"); 219 __ ldub(Address(G5_method, Method::intrinsic_id_offset_in_bytes()), O1_scratch); 220 __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L); 221 if (iid == vmIntrinsics::_linkToVirtual || 222 iid == vmIntrinsics::_linkToSpecial) { 223 // could do this for all kinds, but would explode assembly code size 224 trace_method_handle(_masm, "bad Method*::intrinsic_id"); 225 } 226 __ STOP("bad Method*::intrinsic_id"); 227 __ bind(L); 228 BLOCK_COMMENT("} verify_intrinsic_id"); 229 } 230 231 // First task: Find out how big the argument list is. 232 Address O4_first_arg_addr; 233 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); 234 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); 235 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { 236 __ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()), 237 O4_param_size, 238 sizeof(u2), /*is_signed*/ false); 239 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 240 O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1); 241 } else { 242 DEBUG_ONLY(O4_param_size = noreg); 243 } 244 245 Register O0_mh = noreg; 246 if (!is_signature_polymorphic_static(iid)) { 247 __ ld_ptr(O4_first_arg_addr, O0_mh = O0); 248 DEBUG_ONLY(O4_param_size = noreg); 249 } 250 251 // O4_first_arg_addr is live! 252 253 if (TraceMethodHandles) { 254 const char* name = vmIntrinsics::name_at(iid); 255 if (*name == '_') name += 1; 256 const size_t len = strlen(name) + 50; 257 char* qname = NEW_C_HEAP_ARRAY(char, len, mtInternal); 258 const char* suffix = ""; 259 if (vmIntrinsics::method_for(iid) == NULL || 260 !vmIntrinsics::method_for(iid)->access_flags().is_public()) { 261 if (is_signature_polymorphic_static(iid)) 262 suffix = "/static"; 263 else 264 suffix = "/private"; 265 } 266 jio_snprintf(qname, len, "MethodHandle::interpreter_entry::%s%s", name, suffix); 267 if (O0_mh != noreg) 268 __ mov(O0_mh, G3_method_handle); // make stub happy 269 trace_method_handle(_masm, qname); 270 } 271 272 if (iid == vmIntrinsics::_invokeBasic) { 273 generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry); 274 275 } else { 276 // Adjust argument list by popping the trailing MemberName argument. 277 Register O0_recv = noreg; 278 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { 279 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. 280 __ ld_ptr(O4_first_arg_addr, O0_recv = O0); 281 DEBUG_ONLY(O4_param_size = noreg); 282 } 283 Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now 284 __ ld_ptr(__ argument_address(constant(0)), G5_member); 285 __ add(Gargs, Interpreter::stackElementSize, Gargs); 286 generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry); 287 } 288 289 if (PrintMethodHandleStubs) { 290 address code_end = __ pc(); 291 tty->print_cr("--------"); 292 tty->print_cr("method handle interpreter entry for %s", vmIntrinsics::name_at(iid)); 293 Disassembler::decode(code_start, code_end); 294 tty->cr(); 295 } 296 297 return entry_point; 298} 299 300void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, 301 vmIntrinsics::ID iid, 302 Register receiver_reg, 303 Register member_reg, 304 bool for_compiler_entry) { 305 assert(is_signature_polymorphic(iid), "expected invoke iid"); 306 // temps used in this code are not used in *either* compiled or interpreted calling sequences 307 Register temp1 = (for_compiler_entry ? G1_scratch : O1); 308 Register temp2 = (for_compiler_entry ? G4_scratch : O4); 309 Register temp3 = G3_scratch; 310 Register temp4 = (for_compiler_entry ? noreg : O2); 311 if (for_compiler_entry) { 312 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment"); 313 assert_different_registers(temp1, O0, O1, O2, O3, O4, O5); 314 assert_different_registers(temp2, O0, O1, O2, O3, O4, O5); 315 assert_different_registers(temp3, O0, O1, O2, O3, O4, O5); 316 assert_different_registers(temp4, O0, O1, O2, O3, O4, O5); 317 } 318 if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); 319 if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); 320 if (!for_compiler_entry) assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP 321 322 if (iid == vmIntrinsics::_invokeBasic) { 323 // indirect through MH.form.vmentry.vmtarget 324 jump_to_lambda_form(_masm, receiver_reg, G5_method, temp2, temp3, for_compiler_entry); 325 326 } else { 327 // The method is a member invoker used by direct method handles. 328 if (VerifyMethodHandles) { 329 // make sure the trailing argument really is a MemberName (caller responsibility) 330 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass), 331 temp1, temp2, 332 "MemberName required for invokeVirtual etc."); 333 } 334 335 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); 336 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); 337 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); 338 339 Register temp1_recv_klass = temp1; 340 if (iid != vmIntrinsics::_linkToStatic) { 341 __ verify_oop(receiver_reg); 342 if (iid == vmIntrinsics::_linkToSpecial) { 343 // Don't actually load the klass; just null-check the receiver. 344 __ null_check(receiver_reg); 345 } else { 346 // load receiver klass itself 347 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); 348 __ load_klass(receiver_reg, temp1_recv_klass); 349 __ verify_oop(temp1_recv_klass); 350 } 351 BLOCK_COMMENT("check_receiver {"); 352 // The receiver for the MemberName must be in receiver_reg. 353 // Check the receiver against the MemberName.clazz 354 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { 355 // Did not load it above... 356 __ load_klass(receiver_reg, temp1_recv_klass); 357 __ verify_oop(temp1_recv_klass); 358 } 359 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { 360 Label L_ok; 361 Register temp2_defc = temp2; 362 __ load_heap_oop(member_clazz, temp2_defc); 363 load_klass_from_Class(_masm, temp2_defc, temp3, temp4); 364 __ verify_oop(temp2_defc); 365 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); 366 // If we get here, the type check failed! 367 __ STOP("receiver class disagrees with MemberName.clazz"); 368 __ bind(L_ok); 369 } 370 BLOCK_COMMENT("} check_receiver"); 371 } 372 if (iid == vmIntrinsics::_linkToSpecial || 373 iid == vmIntrinsics::_linkToStatic) { 374 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass 375 } 376 377 // Live registers at this point: 378 // member_reg - MemberName that was the trailing argument 379 // temp1_recv_klass - klass of stacked receiver, if needed 380 // O5_savedSP - interpreter linkage (if interpreted) 381 // O0..O7,G1,G4 - compiler arguments (if compiled) 382 383 bool method_is_live = false; 384 switch (iid) { 385 case vmIntrinsics::_linkToSpecial: 386 if (VerifyMethodHandles) { 387 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3); 388 } 389 __ ld_ptr(member_vmtarget, G5_method); 390 method_is_live = true; 391 break; 392 393 case vmIntrinsics::_linkToStatic: 394 if (VerifyMethodHandles) { 395 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3); 396 } 397 __ ld_ptr(member_vmtarget, G5_method); 398 method_is_live = true; 399 break; 400 401 case vmIntrinsics::_linkToVirtual: 402 { 403 // same as TemplateTable::invokevirtual, 404 // minus the CP setup and profiling: 405 406 if (VerifyMethodHandles) { 407 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3); 408 } 409 410 // pick out the vtable index from the MemberName, and then we can discard it: 411 Register temp2_index = temp2; 412 __ ld_ptr(member_vmindex, temp2_index); 413 414 if (VerifyMethodHandles) { 415 Label L_index_ok; 416 __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok); 417 __ STOP("no virtual index"); 418 __ BIND(L_index_ok); 419 } 420 421 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget 422 // at this point. And VerifyMethodHandles has already checked clazz, if needed. 423 424 // get target Method* & entry point 425 __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method); 426 method_is_live = true; 427 break; 428 } 429 430 case vmIntrinsics::_linkToInterface: 431 { 432 // same as TemplateTable::invokeinterface 433 // (minus the CP setup and profiling, with different argument motion) 434 if (VerifyMethodHandles) { 435 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3); 436 } 437 438 Register temp3_intf = temp3; 439 __ load_heap_oop(member_clazz, temp3_intf); 440 load_klass_from_Class(_masm, temp3_intf, temp2, temp4); 441 __ verify_oop(temp3_intf); 442 443 Register G5_index = G5_method; 444 __ ld_ptr(member_vmindex, G5_index); 445 if (VerifyMethodHandles) { 446 Label L; 447 __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L); 448 __ STOP("invalid vtable index for MH.invokeInterface"); 449 __ bind(L); 450 } 451 452 // given intf, index, and recv klass, dispatch to the implementation method 453 Label L_no_such_interface; 454 Register no_sethi_temp = noreg; 455 __ lookup_interface_method(temp1_recv_klass, temp3_intf, 456 // note: next two args must be the same: 457 G5_index, G5_method, 458 temp2, no_sethi_temp, 459 L_no_such_interface); 460 461 __ verify_oop(G5_method); 462 jump_from_method_handle(_masm, G5_method, temp2, temp3, for_compiler_entry); 463 464 __ bind(L_no_such_interface); 465 AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); 466 __ jump_to(icce, temp3); 467 __ delayed()->nop(); 468 break; 469 } 470 471 default: 472 fatal(err_msg("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid))); 473 break; 474 } 475 476 if (method_is_live) { 477 // live at this point: G5_method, O5_savedSP (if interpreted) 478 479 // After figuring out which concrete method to call, jump into it. 480 // Note that this works in the interpreter with no data motion. 481 // But the compiled version will require that rcx_recv be shifted out. 482 __ verify_oop(G5_method); 483 jump_from_method_handle(_masm, G5_method, temp1, temp3, for_compiler_entry); 484 } 485 } 486} 487 488#ifndef PRODUCT 489void trace_method_handle_stub(const char* adaptername, 490 oopDesc* mh, 491 intptr_t* saved_sp, 492 intptr_t* args, 493 intptr_t* tracing_fp) { 494 bool has_mh = (strstr(adaptername, "/static") == NULL && 495 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH 496 const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; 497 tty->print_cr("MH %s %s="INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, 498 adaptername, mh_reg_name, 499 (intptr_t) mh, saved_sp, args); 500 501 if (Verbose) { 502 // dumping last frame with frame::describe 503 504 JavaThread* p = JavaThread::active(); 505 506 ResourceMark rm; 507 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here 508 FrameValues values; 509 510 // Note: We want to allow trace_method_handle from any call site. 511 // While trace_method_handle creates a frame, it may be entered 512 // without a valid return PC in O7 (e.g. not just after a call). 513 // Walking that frame could lead to failures due to that invalid PC. 514 // => carefully detect that frame when doing the stack walking 515 516 // walk up to the right frame using the "tracing_fp" argument 517 intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); 518 frame cur_frame(cur_sp, frame::unpatchable, NULL); 519 520 while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) { 521 cur_frame = os::get_sender_for_C_frame(&cur_frame); 522 } 523 524 // safely create a frame and call frame::describe 525 intptr_t *dump_sp = cur_frame.sender_sp(); 526 intptr_t *dump_fp = cur_frame.link(); 527 528 bool walkable = has_mh; // whether the traced frame shoud be walkable 529 530 // the sender for cur_frame is the caller of trace_method_handle 531 if (walkable) { 532 // The previous definition of walkable may have to be refined 533 // if new call sites cause the next frame constructor to start 534 // failing. Alternatively, frame constructors could be 535 // modified to support the current or future non walkable 536 // frames (but this is more intrusive and is not considered as 537 // part of this RFE, which will instead use a simpler output). 538 frame dump_frame = frame(dump_sp, 539 cur_frame.sp(), // younger_sp 540 false); // no adaptation 541 dump_frame.describe(values, 1); 542 } else { 543 // Robust dump for frames which cannot be constructed from sp/younger_sp 544 // Add descriptions without building a Java frame to avoid issues 545 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 546 values.describe(-1, dump_sp, "sp"); 547 } 548 549 bool has_args = has_mh; // whether Gargs is meaningful 550 551 // mark args, if seems valid (may not be valid for some adapters) 552 if (has_args) { 553 if ((args >= dump_sp) && (args < dump_fp)) { 554 values.describe(-1, args, "*G4_args"); 555 } 556 } 557 558 // mark saved_sp, if seems valid (may not be valid for some adapters) 559 intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp); 560 const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP); 561 if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) { 562 values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS"); 563 } 564 565 // Note: the unextended_sp may not be correct 566 tty->print_cr(" stack layout:"); 567 values.print(p); 568 if (has_mh && mh->is_oop()) { 569 mh->print(); 570 if (java_lang_invoke_MethodHandle::is_instance(mh)) { 571 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) 572 java_lang_invoke_MethodHandle::form(mh)->print(); 573 } 574 } 575 } 576} 577 578void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 579 if (!TraceMethodHandles) return; 580 BLOCK_COMMENT("trace_method_handle {"); 581 // save: Gargs, O5_savedSP 582 __ save_frame(16); // need space for saving required FPU state 583 584 __ set((intptr_t) adaptername, O0); 585 __ mov(G3_method_handle, O1); 586 __ mov(I5_savedSP, O2); 587 __ mov(Gargs, O3); 588 __ mov(I6, O4); // frame identifier for safe stack walking 589 590 // Save scratched registers that might be needed. Robustness is more 591 // important than optimizing the saves for this debug only code. 592 593 // save FP result, valid at some call sites (adapter_opt_return_float, ...) 594 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 595 __ stf(FloatRegisterImpl::D, Ftos_d, d_save); 596 // Safely save all globals but G2 (handled by call_VM_leaf) and G7 597 // (OS reserved). 598 __ mov(G3_method_handle, L3); 599 __ mov(Gargs, L4); 600 __ mov(G5_method_type, L5); 601 __ mov(G6, L6); 602 __ mov(G1, L1); 603 604 __ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 605 606 __ mov(L3, G3_method_handle); 607 __ mov(L4, Gargs); 608 __ mov(L5, G5_method_type); 609 __ mov(L6, G6); 610 __ mov(L1, G1); 611 __ ldf(FloatRegisterImpl::D, d_save, Ftos_d); 612 613 __ restore(); 614 BLOCK_COMMENT("} trace_method_handle"); 615} 616#endif // PRODUCT 617