1/* 2 * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "precompiled.hpp" 26#include "asm/macroAssembler.hpp" 27#include "classfile/javaClasses.inline.hpp" 28#include "interpreter/interpreter.hpp" 29#include "interpreter/interp_masm.hpp" 30#include "memory/allocation.inline.hpp" 31#include "memory/resourceArea.hpp" 32#include "prims/methodHandles.hpp" 33 34#define __ _masm-> 35 36#ifdef PRODUCT 37#define BLOCK_COMMENT(str) /* nothing */ 38#define STOP(error) stop(error) 39#else 40#define BLOCK_COMMENT(str) __ block_comment(str) 41#define STOP(error) block_comment(error); __ stop(error) 42#endif 43 44#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 45 46// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant. 47static RegisterOrConstant constant(int value) { 48 return RegisterOrConstant(value); 49} 50 51void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg, Register temp_reg, Register temp2_reg) { 52 if (VerifyMethodHandles) 53 verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class), temp_reg, temp2_reg, 54 "MH argument is a Class"); 55 __ ld_ptr(Address(klass_reg, java_lang_Class::klass_offset_in_bytes()), klass_reg); 56} 57 58#ifdef ASSERT 59static int check_nonzero(const char* xname, int x) { 60 assert(x != 0, "%s should be nonzero", xname); 61 return x; 62} 63#define NONZERO(x) check_nonzero(#x, x) 64#else //ASSERT 65#define NONZERO(x) (x) 66#endif //ASSERT 67 68#ifdef ASSERT 69void MethodHandles::verify_klass(MacroAssembler* _masm, 70 Register obj_reg, SystemDictionary::WKID klass_id, 71 Register temp_reg, Register temp2_reg, 72 const char* error_message) { 73 InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id); 74 KlassHandle klass = SystemDictionary::well_known_klass(klass_id); 75 bool did_save = false; 76 if (temp_reg == noreg || temp2_reg == noreg) { 77 temp_reg = L1; 78 temp2_reg = L2; 79 __ save_frame_and_mov(0, obj_reg, L0); 80 obj_reg = L0; 81 did_save = true; 82 } 83 Label L_ok, L_bad; 84 BLOCK_COMMENT("verify_klass {"); 85 __ verify_oop(obj_reg); 86 __ br_null_short(obj_reg, Assembler::pn, L_bad); 87 __ load_klass(obj_reg, temp_reg); 88 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg); 89 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); 90 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); 91 intptr_t super_check_offset = klass->super_check_offset(); 92 __ ld_ptr(Address(temp_reg, super_check_offset), temp_reg); 93 __ set(ExternalAddress((Metadata**)klass_addr), temp2_reg); 94 __ ld_ptr(Address(temp2_reg, 0), temp2_reg); 95 __ cmp_and_brx_short(temp_reg, temp2_reg, Assembler::equal, Assembler::pt, L_ok); 96 __ BIND(L_bad); 97 if (did_save) __ restore(); 98 __ STOP(error_message); 99 __ BIND(L_ok); 100 if (did_save) __ restore(); 101 BLOCK_COMMENT("} verify_klass"); 102} 103 104void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) { 105 Label L; 106 BLOCK_COMMENT("verify_ref_kind {"); 107 __ lduw(Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())), temp); 108 __ srl( temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT, temp); 109 __ and3(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK, temp); 110 __ cmp_and_br_short(temp, ref_kind, Assembler::equal, Assembler::pt, L); 111 { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal); 112 jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind); 113 if (ref_kind == JVM_REF_invokeVirtual || 114 ref_kind == JVM_REF_invokeSpecial) 115 // could do this for all ref_kinds, but would explode assembly code size 116 trace_method_handle(_masm, buf); 117 __ STOP(buf); 118 } 119 BLOCK_COMMENT("} verify_ref_kind"); 120 __ bind(L); 121} 122 123#endif // ASSERT 124 125void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp, 126 bool for_compiler_entry) { 127 Label L_no_such_method; 128 assert(method == G5_method, "interpreter calling convention"); 129 assert_different_registers(method, target, temp); 130 131 if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) { 132 Label run_compiled_code; 133 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 134 // compiled code in threads for which the event is enabled. Check here for 135 // interp_only_mode if these events CAN be enabled. 136 __ verify_thread(); 137 const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); 138 __ ld(interp_only, temp); 139 __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code); 140 // Null method test is replicated below in compiled case, 141 // it might be able to address across the verify_thread() 142 __ br_null_short(G5_method, Assembler::pn, L_no_such_method); 143 __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target); 144 __ jmp(target, 0); 145 __ delayed()->nop(); 146 __ BIND(run_compiled_code); 147 // Note: we could fill some delay slots here, but 148 // it doesn't matter, since this is interpreter code. 149 } 150 151 // Compiled case, either static or fall-through from runtime conditional 152 __ br_null_short(G5_method, Assembler::pn, L_no_such_method); 153 154 const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() : 155 Method::from_interpreted_offset(); 156 __ ld_ptr(G5_method, in_bytes(entry_offset), target); 157 __ jmp(target, 0); 158 __ delayed()->nop(); 159 160 __ bind(L_no_such_method); 161 AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry()); 162 __ jump_to(ame, temp); 163 __ delayed()->nop(); 164} 165 166void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm, 167 Register recv, Register method_temp, 168 Register temp2, Register temp3, 169 bool for_compiler_entry) { 170 BLOCK_COMMENT("jump_to_lambda_form {"); 171 // This is the initial entry point of a lazy method handle. 172 // After type checking, it picks up the invoker from the LambdaForm. 173 assert_different_registers(recv, method_temp, temp2); // temp3 is only passed on 174 assert(method_temp == G5_method, "required register for loading method"); 175 176 //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); }); 177 178 // Load the invoker, as MH -> MH.form -> LF.vmentry 179 __ verify_oop(recv); 180 __ load_heap_oop(Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())), method_temp); 181 __ verify_oop(method_temp); 182 __ load_heap_oop(Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())), method_temp); 183 __ verify_oop(method_temp); 184 // the following assumes that a Method* is normally compressed in the vmtarget field: 185 __ ld_ptr( Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())), method_temp); 186 187 if (VerifyMethodHandles && !for_compiler_entry) { 188 // make sure recv is already on stack 189 __ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2); 190 __ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()), 191 temp2, 192 sizeof(u2), /*is_signed*/ false); 193 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 194 Label L; 195 __ ld_ptr(__ argument_address(temp2, temp2, -1), temp2); 196 __ cmp_and_br_short(temp2, recv, Assembler::equal, Assembler::pt, L); 197 __ STOP("receiver not on stack"); 198 __ BIND(L); 199 } 200 201 jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry); 202 BLOCK_COMMENT("} jump_to_lambda_form"); 203} 204 205 206// Code generation 207address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm, 208 vmIntrinsics::ID iid) { 209 const bool not_for_compiler_entry = false; // this is the interpreter entry 210 assert(is_signature_polymorphic(iid), "expected invoke iid"); 211 if (iid == vmIntrinsics::_invokeGeneric || 212 iid == vmIntrinsics::_compiledLambdaForm) { 213 // Perhaps surprisingly, the symbolic references visible to Java are not directly used. 214 // They are linked to Java-generated adapters via MethodHandleNatives.linkMethod. 215 // They all allow an appendix argument. 216 __ should_not_reach_here(); // empty stubs make SG sick 217 return NULL; 218 } 219 220 // I5_savedSP/O5_savedSP: sender SP (must preserve; see prepare_to_jump_from_interpreted) 221 // G5_method: Method* 222 // G4 (Gargs): incoming argument list (must preserve) 223 // O0: used as temp to hold mh or receiver 224 // O1, O4: garbage temps, blown away 225 Register O1_scratch = O1; 226 Register O4_param_size = O4; // size of parameters 227 228 // here's where control starts out: 229 __ align(CodeEntryAlignment); 230 address entry_point = __ pc(); 231 232 if (VerifyMethodHandles) { 233 assert(Method::intrinsic_id_size_in_bytes() == 2, "assuming Method::_intrinsic_id is u2"); 234 235 Label L; 236 BLOCK_COMMENT("verify_intrinsic_id {"); 237 __ lduh(Address(G5_method, Method::intrinsic_id_offset_in_bytes()), O1_scratch); 238 __ cmp_and_br_short(O1_scratch, (int) iid, Assembler::equal, Assembler::pt, L); 239 if (iid == vmIntrinsics::_linkToVirtual || 240 iid == vmIntrinsics::_linkToSpecial) { 241 // could do this for all kinds, but would explode assembly code size 242 trace_method_handle(_masm, "bad Method*::intrinsic_id"); 243 } 244 __ STOP("bad Method*::intrinsic_id"); 245 __ bind(L); 246 BLOCK_COMMENT("} verify_intrinsic_id"); 247 } 248 249 // First task: Find out how big the argument list is. 250 Address O4_first_arg_addr; 251 int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid); 252 assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic"); 253 if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) { 254 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size); 255 __ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()), 256 O4_param_size, 257 sizeof(u2), /*is_signed*/ false); 258 // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), ""); 259 O4_first_arg_addr = __ argument_address(O4_param_size, O4_param_size, -1); 260 } else { 261 DEBUG_ONLY(O4_param_size = noreg); 262 } 263 264 Register O0_mh = noreg; 265 if (!is_signature_polymorphic_static(iid)) { 266 __ ld_ptr(O4_first_arg_addr, O0_mh = O0); 267 DEBUG_ONLY(O4_param_size = noreg); 268 } 269 270 // O4_first_arg_addr is live! 271 272 if (TraceMethodHandles) { 273 if (O0_mh != noreg) 274 __ mov(O0_mh, G3_method_handle); // make stub happy 275 trace_method_handle_interpreter_entry(_masm, iid); 276 } 277 278 if (iid == vmIntrinsics::_invokeBasic) { 279 generate_method_handle_dispatch(_masm, iid, O0_mh, noreg, not_for_compiler_entry); 280 281 } else { 282 // Adjust argument list by popping the trailing MemberName argument. 283 Register O0_recv = noreg; 284 if (MethodHandles::ref_kind_has_receiver(ref_kind)) { 285 // Load the receiver (not the MH; the actual MemberName's receiver) up from the interpreter stack. 286 __ ld_ptr(O4_first_arg_addr, O0_recv = O0); 287 DEBUG_ONLY(O4_param_size = noreg); 288 } 289 Register G5_member = G5_method; // MemberName ptr; incoming method ptr is dead now 290 __ ld_ptr(__ argument_address(constant(0)), G5_member); 291 __ add(Gargs, Interpreter::stackElementSize, Gargs); 292 generate_method_handle_dispatch(_masm, iid, O0_recv, G5_member, not_for_compiler_entry); 293 } 294 295 return entry_point; 296} 297 298void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm, 299 vmIntrinsics::ID iid, 300 Register receiver_reg, 301 Register member_reg, 302 bool for_compiler_entry) { 303 assert(is_signature_polymorphic(iid), "expected invoke iid"); 304 Register temp1 = (for_compiler_entry ? G1_scratch : O1); 305 Register temp2 = (for_compiler_entry ? G3_scratch : O2); 306 Register temp3 = (for_compiler_entry ? G4_scratch : O3); 307 Register temp4 = (for_compiler_entry ? noreg : O4); 308 if (for_compiler_entry) { 309 assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : O0), "only valid assignment"); 310 assert_different_registers(temp1, O0, O1, O2, O3, O4, O5); 311 assert_different_registers(temp2, O0, O1, O2, O3, O4, O5); 312 assert_different_registers(temp3, O0, O1, O2, O3, O4, O5); 313 assert_different_registers(temp4, O0, O1, O2, O3, O4, O5); 314 } else { 315 assert_different_registers(temp1, temp2, temp3, temp4, O5_savedSP); // don't trash lastSP 316 } 317 if (receiver_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, receiver_reg); 318 if (member_reg != noreg) assert_different_registers(temp1, temp2, temp3, temp4, member_reg); 319 320 if (iid == vmIntrinsics::_invokeBasic) { 321 // indirect through MH.form.vmentry.vmtarget 322 jump_to_lambda_form(_masm, receiver_reg, G5_method, temp1, temp2, for_compiler_entry); 323 324 } else { 325 // The method is a member invoker used by direct method handles. 326 if (VerifyMethodHandles) { 327 // make sure the trailing argument really is a MemberName (caller responsibility) 328 verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(MemberName_klass), 329 temp1, temp2, 330 "MemberName required for invokeVirtual etc."); 331 } 332 333 Address member_clazz( member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes())); 334 Address member_vmindex( member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes())); 335 Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())); 336 337 Register temp1_recv_klass = temp1; 338 if (iid != vmIntrinsics::_linkToStatic) { 339 __ verify_oop(receiver_reg); 340 if (iid == vmIntrinsics::_linkToSpecial) { 341 // Don't actually load the klass; just null-check the receiver. 342 __ null_check(receiver_reg); 343 } else { 344 // load receiver klass itself 345 __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); 346 __ load_klass(receiver_reg, temp1_recv_klass); 347 __ verify_klass_ptr(temp1_recv_klass); 348 } 349 BLOCK_COMMENT("check_receiver {"); 350 // The receiver for the MemberName must be in receiver_reg. 351 // Check the receiver against the MemberName.clazz 352 if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { 353 // Did not load it above... 354 __ load_klass(receiver_reg, temp1_recv_klass); 355 __ verify_klass_ptr(temp1_recv_klass); 356 } 357 if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { 358 Label L_ok; 359 Register temp2_defc = temp2; 360 __ load_heap_oop(member_clazz, temp2_defc); 361 load_klass_from_Class(_masm, temp2_defc, temp3, temp4); 362 __ verify_klass_ptr(temp2_defc); 363 __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, temp4, L_ok); 364 // If we get here, the type check failed! 365 __ STOP("receiver class disagrees with MemberName.clazz"); 366 __ bind(L_ok); 367 } 368 BLOCK_COMMENT("} check_receiver"); 369 } 370 if (iid == vmIntrinsics::_linkToSpecial || 371 iid == vmIntrinsics::_linkToStatic) { 372 DEBUG_ONLY(temp1_recv_klass = noreg); // these guys didn't load the recv_klass 373 } 374 375 // Live registers at this point: 376 // member_reg - MemberName that was the trailing argument 377 // temp1_recv_klass - klass of stacked receiver, if needed 378 // O5_savedSP - interpreter linkage (if interpreted) 379 // O0..O5 - compiler arguments (if compiled) 380 381 Label L_incompatible_class_change_error; 382 switch (iid) { 383 case vmIntrinsics::_linkToSpecial: 384 if (VerifyMethodHandles) { 385 verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp2); 386 } 387 __ ld_ptr(member_vmtarget, G5_method); 388 break; 389 390 case vmIntrinsics::_linkToStatic: 391 if (VerifyMethodHandles) { 392 verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp2); 393 } 394 __ ld_ptr(member_vmtarget, G5_method); 395 break; 396 397 case vmIntrinsics::_linkToVirtual: 398 { 399 // same as TemplateTable::invokevirtual, 400 // minus the CP setup and profiling: 401 402 if (VerifyMethodHandles) { 403 verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp2); 404 } 405 406 // pick out the vtable index from the MemberName, and then we can discard it: 407 Register temp2_index = temp2; 408 __ ld_ptr(member_vmindex, temp2_index); 409 410 if (VerifyMethodHandles) { 411 Label L_index_ok; 412 __ cmp_and_br_short(temp2_index, (int) 0, Assembler::greaterEqual, Assembler::pn, L_index_ok); 413 __ STOP("no virtual index"); 414 __ BIND(L_index_ok); 415 } 416 417 // Note: The verifier invariants allow us to ignore MemberName.clazz and vmtarget 418 // at this point. And VerifyMethodHandles has already checked clazz, if needed. 419 420 // get target Method* & entry point 421 __ lookup_virtual_method(temp1_recv_klass, temp2_index, G5_method); 422 break; 423 } 424 425 case vmIntrinsics::_linkToInterface: 426 { 427 // same as TemplateTable::invokeinterface 428 // (minus the CP setup and profiling, with different argument motion) 429 if (VerifyMethodHandles) { 430 verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp2); 431 } 432 433 Register temp2_intf = temp2; 434 __ load_heap_oop(member_clazz, temp2_intf); 435 load_klass_from_Class(_masm, temp2_intf, temp3, temp4); 436 __ verify_klass_ptr(temp2_intf); 437 438 Register G5_index = G5_method; 439 __ ld_ptr(member_vmindex, G5_index); 440 if (VerifyMethodHandles) { 441 Label L; 442 __ cmp_and_br_short(G5_index, 0, Assembler::greaterEqual, Assembler::pt, L); 443 __ STOP("invalid vtable index for MH.invokeInterface"); 444 __ bind(L); 445 } 446 447 // given intf, index, and recv klass, dispatch to the implementation method 448 __ lookup_interface_method(temp1_recv_klass, temp2_intf, 449 // note: next two args must be the same: 450 G5_index, G5_method, 451 temp3, temp4, 452 L_incompatible_class_change_error); 453 break; 454 } 455 456 default: 457 fatal("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)); 458 break; 459 } 460 461 // Live at this point: 462 // G5_method 463 // O5_savedSP (if interpreted) 464 465 // After figuring out which concrete method to call, jump into it. 466 // Note that this works in the interpreter with no data motion. 467 // But the compiled version will require that rcx_recv be shifted out. 468 __ verify_method_ptr(G5_method); 469 jump_from_method_handle(_masm, G5_method, temp1, temp2, for_compiler_entry); 470 471 if (iid == vmIntrinsics::_linkToInterface) { 472 __ BIND(L_incompatible_class_change_error); 473 AddressLiteral icce(StubRoutines::throw_IncompatibleClassChangeError_entry()); 474 __ jump_to(icce, temp1); 475 __ delayed()->nop(); 476 } 477 } 478} 479 480#ifndef PRODUCT 481void trace_method_handle_stub(const char* adaptername, 482 oopDesc* mh, 483 intptr_t* saved_sp, 484 intptr_t* args, 485 intptr_t* tracing_fp) { 486 bool has_mh = (strstr(adaptername, "/static") == NULL && 487 strstr(adaptername, "linkTo") == NULL); // static linkers don't have MH 488 const char* mh_reg_name = has_mh ? "G3_mh" : "G3"; 489 tty->print_cr("MH %s %s=" INTPTR_FORMAT " saved_sp=" INTPTR_FORMAT " args=" INTPTR_FORMAT, 490 adaptername, mh_reg_name, 491 p2i(mh), p2i(saved_sp), p2i(args)); 492 493 if (Verbose) { 494 // dumping last frame with frame::describe 495 496 JavaThread* p = JavaThread::active(); 497 498 ResourceMark rm; 499 PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here 500 FrameValues values; 501 502 // Note: We want to allow trace_method_handle from any call site. 503 // While trace_method_handle creates a frame, it may be entered 504 // without a valid return PC in O7 (e.g. not just after a call). 505 // Walking that frame could lead to failures due to that invalid PC. 506 // => carefully detect that frame when doing the stack walking 507 508 // walk up to the right frame using the "tracing_fp" argument 509 intptr_t* cur_sp = StubRoutines::Sparc::flush_callers_register_windows_func()(); 510 frame cur_frame(cur_sp, frame::unpatchable, NULL); 511 512 while (cur_frame.fp() != (intptr_t *)(STACK_BIAS+(uintptr_t)tracing_fp)) { 513 cur_frame = os::get_sender_for_C_frame(&cur_frame); 514 } 515 516 // safely create a frame and call frame::describe 517 intptr_t *dump_sp = cur_frame.sender_sp(); 518 intptr_t *dump_fp = cur_frame.link(); 519 520 bool walkable = has_mh; // whether the traced frame shoud be walkable 521 522 // the sender for cur_frame is the caller of trace_method_handle 523 if (walkable) { 524 // The previous definition of walkable may have to be refined 525 // if new call sites cause the next frame constructor to start 526 // failing. Alternatively, frame constructors could be 527 // modified to support the current or future non walkable 528 // frames (but this is more intrusive and is not considered as 529 // part of this RFE, which will instead use a simpler output). 530 frame dump_frame = frame(dump_sp, 531 cur_frame.sp(), // younger_sp 532 false); // no adaptation 533 dump_frame.describe(values, 1); 534 } else { 535 // Robust dump for frames which cannot be constructed from sp/younger_sp 536 // Add descriptions without building a Java frame to avoid issues 537 values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>"); 538 values.describe(-1, dump_sp, "sp"); 539 } 540 541 bool has_args = has_mh; // whether Gargs is meaningful 542 543 // mark args, if seems valid (may not be valid for some adapters) 544 if (has_args) { 545 if ((args >= dump_sp) && (args < dump_fp)) { 546 values.describe(-1, args, "*G4_args"); 547 } 548 } 549 550 // mark saved_sp, if seems valid (may not be valid for some adapters) 551 intptr_t *unbiased_sp = (intptr_t *)(STACK_BIAS+(uintptr_t)saved_sp); 552 const int ARG_LIMIT = 255, SLOP = 45, UNREASONABLE_STACK_MOVE = (ARG_LIMIT + SLOP); 553 if ((unbiased_sp >= dump_sp - UNREASONABLE_STACK_MOVE) && (unbiased_sp < dump_fp)) { 554 values.describe(-1, unbiased_sp, "*saved_sp+STACK_BIAS"); 555 } 556 557 // Note: the unextended_sp may not be correct 558 tty->print_cr(" stack layout:"); 559 values.print(p); 560 if (has_mh && mh->is_oop()) { 561 mh->print(); 562 if (java_lang_invoke_MethodHandle::is_instance(mh)) { 563 if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0) 564 java_lang_invoke_MethodHandle::form(mh)->print(); 565 } 566 } 567 } 568} 569 570void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 571 if (!TraceMethodHandles) return; 572 BLOCK_COMMENT("trace_method_handle {"); 573 // save: Gargs, O5_savedSP 574 __ save_frame(16); // need space for saving required FPU state 575 576 __ set((intptr_t) adaptername, O0); 577 __ mov(G3_method_handle, O1); 578 __ mov(I5_savedSP, O2); 579 __ mov(Gargs, O3); 580 __ mov(I6, O4); // frame identifier for safe stack walking 581 582 // Save scratched registers that might be needed. Robustness is more 583 // important than optimizing the saves for this debug only code. 584 585 // save FP result, valid at some call sites (adapter_opt_return_float, ...) 586 Address d_save(FP, -sizeof(jdouble) + STACK_BIAS); 587 __ stf(FloatRegisterImpl::D, Ftos_d, d_save); 588 // Safely save all globals but G2 (handled by call_VM_leaf) and G7 589 // (OS reserved). 590 __ mov(G3_method_handle, L3); 591 __ mov(Gargs, L4); 592 __ mov(G5_method_type, L5); 593 __ mov(G6, L6); 594 __ mov(G1, L1); 595 596 __ call_VM_leaf(L2 /* for G2 */, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 597 598 __ mov(L3, G3_method_handle); 599 __ mov(L4, Gargs); 600 __ mov(L5, G5_method_type); 601 __ mov(L6, G6); 602 __ mov(L1, G1); 603 __ ldf(FloatRegisterImpl::D, d_save, Ftos_d); 604 605 __ restore(); 606 BLOCK_COMMENT("} trace_method_handle"); 607} 608#endif // PRODUCT 609