methodHandles_sparc.cpp revision 1484:61b2245abf36
1238384Sjkim/* 2280297Sjkim * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved. 3238384Sjkim * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4238384Sjkim * 5238384Sjkim * This code is free software; you can redistribute it and/or modify it 6238384Sjkim * under the terms of the GNU General Public License version 2 only, as 7238384Sjkim * published by the Free Software Foundation. 8238384Sjkim * 9238384Sjkim * This code is distributed in the hope that it will be useful, but WITHOUT 10238384Sjkim * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11280297Sjkim * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12280297Sjkim * version 2 for more details (a copy is included in the LICENSE file that 13280297Sjkim * accompanied this code). 14280297Sjkim * 15280297Sjkim * You should have received a copy of the GNU General Public License version 16280297Sjkim * 2 along with this work; if not, write to the Free Software Foundation, 17280297Sjkim * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18280297Sjkim * 19280297Sjkim * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 20280297Sjkim * CA 95054 USA or visit www.sun.com if you need additional information or 21238384Sjkim * have any questions. 22280297Sjkim * 23280297Sjkim */ 24280297Sjkim 25238384Sjkim#include "incls/_precompiled.incl" 26280297Sjkim#include "incls/_methodHandles_sparc.cpp.incl" 27280297Sjkim 28280297Sjkim#define __ _masm-> 29280297Sjkim 30280297Sjkimaddress MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, 31238384Sjkim address interpreted_entry) { 32238384Sjkim // Just before the actual machine code entry point, allocate space 33238384Sjkim // for a MethodHandleEntry::Data record, so that we can manage everything 34280297Sjkim // from one base pointer. 35280297Sjkim __ align(wordSize); 36280297Sjkim address target = __ pc() + sizeof(Data); 37238384Sjkim while (__ pc() < target) { 38238384Sjkim __ nop(); 39238384Sjkim __ align(wordSize); 40280297Sjkim } 41280297Sjkim 42238384Sjkim MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); 43280297Sjkim me->set_end_address(__ pc()); // set a temporary end_address 44280297Sjkim me->set_from_interpreted_entry(interpreted_entry); 45280297Sjkim me->set_type_checking_entry(NULL); 46238384Sjkim 47280297Sjkim return (address) me; 48280297Sjkim} 49280297Sjkim 50280297SjkimMethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, 51280297Sjkim address start_addr) { 52280297Sjkim MethodHandleEntry* me = (MethodHandleEntry*) start_addr; 53280297Sjkim assert(me->end_address() == start_addr, "valid ME"); 54238384Sjkim 55280297Sjkim // Fill in the real end_address: 56280297Sjkim __ align(wordSize); 57280297Sjkim me->set_end_address(__ pc()); 58280297Sjkim 59280297Sjkim return me; 60238384Sjkim} 61238384Sjkim 62238384Sjkim 63280297Sjkim// Code generation 64280297Sjkimaddress MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 65238384Sjkim // I5_savedSP: sender SP (must preserve) 66238384Sjkim // G4 (Gargs): incoming argument list (must preserve) 67238384Sjkim // G5_method: invoke methodOop; becomes method type. 68238384Sjkim // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 69238384Sjkim // O0, O1: garbage temps, blown away 70280297Sjkim Register O0_argslot = O0; 71280297Sjkim Register O1_scratch = O1; 72280297Sjkim 73238384Sjkim // emit WrongMethodType path first, to enable back-branch from main path 74238384Sjkim Label wrong_method_type; 75238384Sjkim __ bind(wrong_method_type); 76280297Sjkim __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 77238384Sjkim __ delayed()->nop(); 78280297Sjkim 79280297Sjkim // here's where control starts out: 80238384Sjkim __ align(CodeEntryAlignment); 81238384Sjkim address entry_point = __ pc(); 82238384Sjkim 83238384Sjkim // fetch the MethodType from the method handle into G5_method_type 84280297Sjkim { 85280297Sjkim Register tem = G5_method; 86238384Sjkim assert(tem == G5_method_type, "yes, it's the same register"); 87238384Sjkim for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 88280297Sjkim __ ld_ptr(Address(tem, *pchase), G5_method_type); 89280297Sjkim } 90280297Sjkim } 91280297Sjkim 92280297Sjkim // given the MethodType, find out where the MH argument is buried 93280297Sjkim __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O0_argslot); 94280297Sjkim __ ldsw( Address(O0_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot); 95280297Sjkim __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); 96280297Sjkim 97280297Sjkim __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type); 98280297Sjkim __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 99280297Sjkim 100280297Sjkim return entry_point; 101280297Sjkim} 102280297Sjkim 103280297Sjkim 104280297Sjkim#ifdef ASSERT 105238384Sjkimstatic void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { 106238384Sjkim // Verify that argslot lies within (Gargs, FP]. 107280297Sjkim Label L_ok, L_bad; 108280297Sjkim#ifdef _LP64 109280297Sjkim __ add(FP, STACK_BIAS, temp_reg); 110238384Sjkim __ cmp(argslot_reg, temp_reg); 111238384Sjkim#else 112238384Sjkim __ cmp(argslot_reg, FP); 113238384Sjkim#endif 114238384Sjkim __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad); 115280297Sjkim __ delayed()->nop(); 116280297Sjkim __ cmp(Gargs, argslot_reg); 117280297Sjkim __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 118238384Sjkim __ delayed()->nop(); 119238384Sjkim __ bind(L_bad); 120238384Sjkim __ stop(error_message); 121280297Sjkim __ bind(L_ok); 122280297Sjkim} 123280297Sjkim#endif 124280297Sjkim 125280297Sjkim 126280297Sjkim// Helper to insert argument slots into the stack. 127280297Sjkim// arg_slots must be a multiple of stack_move_unit() and <= 0 128280297Sjkimvoid MethodHandles::insert_arg_slots(MacroAssembler* _masm, 129238384Sjkim RegisterOrConstant arg_slots, 130280297Sjkim int arg_mask, 131280297Sjkim Register argslot_reg, 132280297Sjkim Register temp_reg, Register temp2_reg, Register temp3_reg) { 133238384Sjkim assert(temp3_reg != noreg, "temp3 required"); 134238384Sjkim assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 135238384Sjkim (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 136238384Sjkim 137238384Sjkim#ifdef ASSERT 138238384Sjkim verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); 139280297Sjkim if (arg_slots.is_register()) { 140280297Sjkim Label L_ok, L_bad; 141280297Sjkim __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 142280297Sjkim __ br(Assembler::greater, false, Assembler::pn, L_bad); 143280297Sjkim __ delayed()->nop(); 144238384Sjkim __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 145238384Sjkim __ br(Assembler::zero, false, Assembler::pt, L_ok); 146280297Sjkim __ delayed()->nop(); 147280297Sjkim __ bind(L_bad); 148280297Sjkim __ stop("assert arg_slots <= 0 and clear low bits"); 149238384Sjkim __ bind(L_ok); 150238384Sjkim } else { 151238384Sjkim assert(arg_slots.as_constant() <= 0, ""); 152280297Sjkim assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 153280297Sjkim } 154280297Sjkim#endif // ASSERT 155280297Sjkim 156280297Sjkim#ifdef _LP64 157280297Sjkim if (arg_slots.is_register()) { 158280297Sjkim // Was arg_slots register loaded as signed int? 159280297Sjkim Label L_ok; 160238384Sjkim __ sll(arg_slots.as_register(), BitsPerInt, temp_reg); 161238384Sjkim __ sra(temp_reg, BitsPerInt, temp_reg); 162238384Sjkim __ cmp(arg_slots.as_register(), temp_reg); 163238384Sjkim __ br(Assembler::equal, false, Assembler::pt, L_ok); 164238384Sjkim __ delayed()->nop(); 165238384Sjkim __ stop("arg_slots register not loaded as signed int"); 166238384Sjkim __ bind(L_ok); 167280297Sjkim } 168280297Sjkim#endif 169238384Sjkim 170238384Sjkim // Make space on the stack for the inserted argument(s). 171280297Sjkim // Then pull down everything shallower than argslot_reg. 172280297Sjkim // The stacked return address gets pulled down with everything else. 173280297Sjkim // That is, copy [sp, argslot) downward by -size words. In pseudo-code: 174238384Sjkim // sp -= size; 175280297Sjkim // for (temp = sp + size; temp < argslot; temp++) 176280297Sjkim // temp[-size] = temp[0] 177280297Sjkim // argslot -= size; 178238384Sjkim RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 179280297Sjkim 180280297Sjkim // Keep the stack pointer 2*wordSize aligned. 181280297Sjkim const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 182238384Sjkim RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 183280297Sjkim __ add(SP, masked_offset, SP); 184280297Sjkim 185280297Sjkim __ mov(Gargs, temp_reg); // source pointer for copy 186238384Sjkim __ add(Gargs, offset, Gargs); 187238384Sjkim 188280297Sjkim { 189280297Sjkim Label loop; 190238384Sjkim __ bind(loop); 191238384Sjkim // pull one word down each time through the loop 192238384Sjkim __ ld_ptr(Address(temp_reg, 0), temp2_reg); 193280297Sjkim __ st_ptr(temp2_reg, Address(temp_reg, offset)); 194238384Sjkim __ add(temp_reg, wordSize, temp_reg); 195280297Sjkim __ cmp(temp_reg, argslot_reg); 196280297Sjkim __ brx(Assembler::less, false, Assembler::pt, loop); 197238384Sjkim __ delayed()->nop(); // FILLME 198280297Sjkim } 199280297Sjkim 200238384Sjkim // Now move the argslot down, to point to the opened-up space. 201238384Sjkim __ add(argslot_reg, offset, argslot_reg); 202280297Sjkim} 203280297Sjkim 204280297Sjkim 205280297Sjkim// Helper to remove argument slots from the stack. 206280297Sjkim// arg_slots must be a multiple of stack_move_unit() and >= 0 207238384Sjkimvoid MethodHandles::remove_arg_slots(MacroAssembler* _masm, 208238384Sjkim RegisterOrConstant arg_slots, 209238384Sjkim Register argslot_reg, 210280297Sjkim Register temp_reg, Register temp2_reg, Register temp3_reg) { 211280297Sjkim assert(temp3_reg != noreg, "temp3 required"); 212280297Sjkim assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 213280297Sjkim (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 214280297Sjkim 215280297Sjkim RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 216280297Sjkim 217280297Sjkim#ifdef ASSERT 218238384Sjkim // Verify that [argslot..argslot+size) lies within (Gargs, FP). 219280297Sjkim __ add(argslot_reg, offset, temp2_reg); 220238384Sjkim verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame"); 221280297Sjkim if (arg_slots.is_register()) { 222238384Sjkim Label L_ok, L_bad; 223238384Sjkim __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 224238384Sjkim __ br(Assembler::less, false, Assembler::pn, L_bad); 225280297Sjkim __ delayed()->nop(); 226238384Sjkim __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 227280297Sjkim __ br(Assembler::zero, false, Assembler::pt, L_ok); 228238384Sjkim __ delayed()->nop(); 229238384Sjkim __ bind(L_bad); 230 __ stop("assert arg_slots >= 0 and clear low bits"); 231 __ bind(L_ok); 232 } else { 233 assert(arg_slots.as_constant() >= 0, ""); 234 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 235 } 236#endif // ASSERT 237 238 // Pull up everything shallower than argslot. 239 // Then remove the excess space on the stack. 240 // The stacked return address gets pulled up with everything else. 241 // That is, copy [sp, argslot) upward by size words. In pseudo-code: 242 // for (temp = argslot-1; temp >= sp; --temp) 243 // temp[size] = temp[0] 244 // argslot += size; 245 // sp += size; 246 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy 247 { 248 Label loop; 249 __ bind(loop); 250 // pull one word up each time through the loop 251 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 252 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 253 __ sub(temp_reg, wordSize, temp_reg); 254 __ cmp(temp_reg, Gargs); 255 __ brx(Assembler::greaterEqual, false, Assembler::pt, loop); 256 __ delayed()->nop(); // FILLME 257 } 258 259 // Now move the argslot up, to point to the just-copied block. 260 __ add(Gargs, offset, Gargs); 261 // And adjust the argslot address to point at the deletion point. 262 __ add(argslot_reg, offset, argslot_reg); 263 264 // Keep the stack pointer 2*wordSize aligned. 265 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 266 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 267 __ add(SP, masked_offset, SP); 268} 269 270 271#ifndef PRODUCT 272extern "C" void print_method_handle(oop mh); 273void trace_method_handle_stub(const char* adaptername, 274 oop mh) { 275#if 0 276 intptr_t* entry_sp, 277 intptr_t* saved_sp, 278 intptr_t* saved_bp) { 279 // called as a leaf from native code: do not block the JVM! 280 intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset]; 281 intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset]; 282 printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n", 283 adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp); 284 if (last_sp != saved_sp) 285 printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp); 286#endif 287 288 printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); 289 print_method_handle(mh); 290} 291#endif // PRODUCT 292 293// which conversion op types are implemented here? 294int MethodHandles::adapter_conversion_ops_supported_mask() { 295 return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY) 296 |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) 297 |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST) 298 |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM) 299 |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM) 300 |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS) 301 |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS) 302 |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS) 303 |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS) 304 //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! 305 ); 306 // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. 307} 308 309//------------------------------------------------------------------------------ 310// MethodHandles::generate_method_handle_stub 311// 312// Generate an "entry" field for a method handle. 313// This determines how the method handle will respond to calls. 314void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 315 // Here is the register state during an interpreted call, 316 // as set up by generate_method_handle_interpreter_entry(): 317 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) 318 // - G3: receiver method handle 319 // - O5_savedSP: sender SP (must preserve) 320 321 Register O0_argslot = O0; 322 Register O1_scratch = O1; 323 Register O2_scratch = O2; 324 Register O3_scratch = O3; 325 Register G5_index = G5; 326 327 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); 328 329 // Some handy addresses: 330 Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); 331 332 Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes()); 333 334 Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes()); 335 336 Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes()); 337 Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes()); 338 339 Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes()); 340 Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes()); 341 Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes()); 342 343 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 344 345 if (have_entry(ek)) { 346 __ nop(); // empty stubs make SG sick 347 return; 348 } 349 350 address interp_entry = __ pc(); 351 if (UseCompressedOops) __ unimplemented("UseCompressedOops"); 352 353#ifndef PRODUCT 354 if (TraceMethodHandles) { 355 // save: Gargs, O5_savedSP 356 __ save(SP, -16*wordSize, SP); 357 __ set((intptr_t) entry_name(ek), O0); 358 __ mov(G3_method_handle, O1); 359 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 360 __ restore(SP, 16*wordSize, SP); 361 } 362#endif // PRODUCT 363 364 switch ((int) ek) { 365 case _raise_exception: 366 { 367 // Not a real MH entry, but rather shared code for raising an 368 // exception. Extra local arguments are passed in scratch 369 // registers, as required type in O3, failing object (or NULL) 370 // in O2, failing bytecode type in O1. 371 372 __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. 373 374 // Push arguments as if coming from the interpreter. 375 Register O0_scratch = O0_argslot; 376 int stackElementSize = Interpreter::stackElementSize; 377 378 // Make space on the stack for the arguments and set Gargs 379 // correctly. 380 __ sub(SP, 4*stackElementSize, SP); // Keep stack aligned. 381 __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs); 382 383 // void raiseException(int code, Object actual, Object required) 384 __ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code 385 __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual 386 __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required 387 388 Label no_method; 389 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method 390 __ set(AddressLiteral((address) &_raise_exception_method), G5_method); 391 __ ld_ptr(Address(G5_method, 0), G5_method); 392 __ tst(G5_method); 393 __ brx(Assembler::zero, false, Assembler::pn, no_method); 394 __ delayed()->nop(); 395 396 int jobject_oop_offset = 0; 397 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); 398 __ tst(G5_method); 399 __ brx(Assembler::zero, false, Assembler::pn, no_method); 400 __ delayed()->nop(); 401 402 __ verify_oop(G5_method); 403 __ jump_indirect_to(G5_method_fie, O1_scratch); 404 __ delayed()->nop(); 405 406 // If we get here, the Java runtime did not do its job of creating the exception. 407 // Do something that is at least causes a valid throw from the interpreter. 408 __ bind(no_method); 409 __ unimplemented("_raise_exception no method"); 410 } 411 break; 412 413 case _invokestatic_mh: 414 case _invokespecial_mh: 415 { 416 __ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop 417 __ verify_oop(G5_method); 418 // Same as TemplateTable::invokestatic or invokespecial, 419 // minus the CP setup and profiling: 420 if (ek == _invokespecial_mh) { 421 // Must load & check the first argument before entering the target method. 422 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 423 __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); 424 __ null_check(G3_method_handle); 425 __ verify_oop(G3_method_handle); 426 } 427 __ jump_indirect_to(G5_method_fie, O1_scratch); 428 __ delayed()->nop(); 429 } 430 break; 431 432 case _invokevirtual_mh: 433 { 434 // Same as TemplateTable::invokevirtual, 435 // minus the CP setup and profiling: 436 437 // Pick out the vtable index and receiver offset from the MH, 438 // and then we can discard it: 439 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 440 __ ldsw(G3_dmh_vmindex, G5_index); 441 // Note: The verifier allows us to ignore G3_mh_vmtarget. 442 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 443 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 444 445 // Get receiver klass: 446 Register O0_klass = O0_argslot; 447 __ load_klass(G3_method_handle, O0_klass); 448 __ verify_oop(O0_klass); 449 450 // Get target methodOop & entry point: 451 const int base = instanceKlass::vtable_start_offset() * wordSize; 452 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 453 454 __ sll_ptr(G5_index, LogBytesPerWord, G5_index); 455 __ add(O0_klass, G5_index, O0_klass); 456 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); 457 __ ld_ptr(vtable_entry_addr, G5_method); 458 459 __ verify_oop(G5_method); 460 __ jump_indirect_to(G5_method_fie, O1_scratch); 461 __ delayed()->nop(); 462 } 463 break; 464 465 case _invokeinterface_mh: 466 { 467 // Same as TemplateTable::invokeinterface, 468 // minus the CP setup and profiling: 469 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 470 Register O1_intf = O1_scratch; 471 __ ld_ptr(G3_mh_vmtarget, O1_intf); 472 __ ldsw(G3_dmh_vmindex, G5_index); 473 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 474 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 475 476 // Get receiver klass: 477 Register O0_klass = O0_argslot; 478 __ load_klass(G3_method_handle, O0_klass); 479 __ verify_oop(O0_klass); 480 481 // Get interface: 482 Label no_such_interface; 483 __ verify_oop(O1_intf); 484 __ lookup_interface_method(O0_klass, O1_intf, 485 // Note: next two args must be the same: 486 G5_index, G5_method, 487 O2_scratch, 488 O3_scratch, 489 no_such_interface); 490 491 __ verify_oop(G5_method); 492 __ jump_indirect_to(G5_method_fie, O1_scratch); 493 __ delayed()->nop(); 494 495 __ bind(no_such_interface); 496 // Throw an exception. 497 // For historical reasons, it will be IncompatibleClassChangeError. 498 __ unimplemented("not tested yet"); 499 __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface 500 __ mov(O0_klass, O2_scratch); // bad receiver 501 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 502 __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining? 503 } 504 break; 505 506 case _bound_ref_mh: 507 case _bound_int_mh: 508 case _bound_long_mh: 509 case _bound_ref_direct_mh: 510 case _bound_int_direct_mh: 511 case _bound_long_direct_mh: 512 { 513 const bool direct_to_method = (ek >= _bound_ref_direct_mh); 514 BasicType arg_type = T_ILLEGAL; 515 int arg_mask = _INSERT_NO_MASK; 516 int arg_slots = -1; 517 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); 518 519 // Make room for the new argument: 520 __ ldsw(G3_bmh_vmargslot, O0_argslot); 521 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 522 523 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index); 524 525 // Store bound argument into the new stack slot: 526 __ ld_ptr(G3_bmh_argument, O1_scratch); 527 if (arg_type == T_OBJECT) { 528 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); 529 } else { 530 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 531 __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type)); 532 if (arg_slots == 2) { 533 __ unimplemented("not yet tested"); 534#ifndef _LP64 535 __ signx(O2_scratch, O3_scratch); // Sign extend 536#endif 537 __ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64 538 } else { 539 __ st_ptr( O2_scratch, Address(O0_argslot, 0)); 540 } 541 } 542 543 if (direct_to_method) { 544 __ ld_ptr(G3_mh_vmtarget, G5_method); // target is a methodOop 545 __ verify_oop(G5_method); 546 __ jump_indirect_to(G5_method_fie, O1_scratch); 547 __ delayed()->nop(); 548 } else { 549 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); // target is a methodOop 550 __ verify_oop(G3_method_handle); 551 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 552 } 553 } 554 break; 555 556 case _adapter_retype_only: 557 case _adapter_retype_raw: 558 // Immediately jump to the next MH layer: 559 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 560 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 561 // This is OK when all parameter types widen. 562 // It is also OK when a return type narrows. 563 break; 564 565 case _adapter_check_cast: 566 { 567 // Temps: 568 Register G5_klass = G5_index; // Interesting AMH data. 569 570 // Check a reference argument before jumping to the next layer of MH: 571 __ ldsw(G3_amh_vmargslot, O0_argslot); 572 Address vmarg = __ argument_address(O0_argslot); 573 574 // What class are we casting to? 575 __ ld_ptr(G3_amh_argument, G5_klass); // This is a Class object! 576 __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass); 577 578 Label done; 579 __ ld_ptr(vmarg, O1_scratch); 580 __ tst(O1_scratch); 581 __ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null. 582 __ delayed()->nop(); 583 __ load_klass(O1_scratch, O1_scratch); 584 585 // Live at this point: 586 // - G5_klass : klass required by the target method 587 // - O1_scratch : argument klass to test 588 // - G3_method_handle: adapter method handle 589 __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); 590 591 // If we get here, the type check failed! 592 __ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field 593 __ ld_ptr(G3_amh_argument, O3_scratch); // required class 594 __ ld_ptr(vmarg, O2_scratch); // bad object 595 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 596 __ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining? 597 598 __ bind(done); 599 // Get the new MH: 600 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 601 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 602 } 603 break; 604 605 case _adapter_prim_to_prim: 606 case _adapter_ref_to_prim: 607 // Handled completely by optimized cases. 608 __ stop("init_AdapterMethodHandle should not issue this"); 609 break; 610 611 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim 612//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim 613 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim 614 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim 615 { 616 // Perform an in-place conversion to int or an int subword. 617 __ ldsw(G3_amh_vmargslot, O0_argslot); 618 Address vmarg = __ argument_address(O0_argslot); 619 Address value; 620 bool value_left_justified = false; 621 622 switch (ek) { 623 case _adapter_opt_i2i: 624 case _adapter_opt_l2i: 625 __ unimplemented(entry_name(ek)); 626 value = vmarg; 627 break; 628 case _adapter_opt_unboxi: 629 { 630 // Load the value up from the heap. 631 __ ld_ptr(vmarg, O1_scratch); 632 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); 633#ifdef ASSERT 634 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { 635 if (is_subword_type(BasicType(bt))) 636 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); 637 } 638#endif 639 __ null_check(O1_scratch, value_offset); 640 value = Address(O1_scratch, value_offset); 641#ifdef _BIG_ENDIAN 642 // Values stored in objects are packed. 643 value_left_justified = true; 644#endif 645 } 646 break; 647 default: 648 ShouldNotReachHere(); 649 } 650 651 // This check is required on _BIG_ENDIAN 652 Register G5_vminfo = G5_index; 653 __ ldsw(G3_amh_conversion, G5_vminfo); 654 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 655 656 // Original 32-bit vmdata word must be of this form: 657 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | 658 __ lduw(value, O1_scratch); 659 if (!value_left_justified) 660 __ sll(O1_scratch, G5_vminfo, O1_scratch); 661 Label zero_extend, done; 662 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); 663 __ br(Assembler::zero, false, Assembler::pn, zero_extend); 664 __ delayed()->nop(); 665 666 // this path is taken for int->byte, int->short 667 __ sra(O1_scratch, G5_vminfo, O1_scratch); 668 __ ba(false, done); 669 __ delayed()->nop(); 670 671 __ bind(zero_extend); 672 // this is taken for int->char 673 __ srl(O1_scratch, G5_vminfo, O1_scratch); 674 675 __ bind(done); 676 __ st(O1_scratch, vmarg); 677 678 // Get the new MH: 679 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 680 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 681 } 682 break; 683 684 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim 685 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim 686 { 687 // Perform an in-place int-to-long or ref-to-long conversion. 688 __ ldsw(G3_amh_vmargslot, O0_argslot); 689 690 // On big-endian machine we duplicate the slot and store the MSW 691 // in the first slot. 692 __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot); 693 694 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index); 695 696 Address arg_lsw(O0_argslot, 0); 697 Address arg_msw(O0_argslot, -Interpreter::stackElementSize); 698 699 switch (ek) { 700 case _adapter_opt_i2l: 701 { 702 __ ldsw(arg_lsw, O2_scratch); // Load LSW 703#ifndef _LP64 704 __ signx(O2_scratch, O3_scratch); // Sign extend 705#endif 706 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 707 } 708 break; 709 case _adapter_opt_unboxl: 710 { 711 // Load the value up from the heap. 712 __ ld_ptr(arg_lsw, O1_scratch); 713 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); 714 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); 715 __ null_check(O1_scratch, value_offset); 716 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 717 __ st_long(O2_scratch, arg_msw); 718 } 719 break; 720 default: 721 ShouldNotReachHere(); 722 } 723 724 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 725 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 726 } 727 break; 728 729 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim 730 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim 731 { 732 // perform an in-place floating primitive conversion 733 __ unimplemented(entry_name(ek)); 734 } 735 break; 736 737 case _adapter_prim_to_ref: 738 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 739 break; 740 741 case _adapter_swap_args: 742 case _adapter_rot_args: 743 // handled completely by optimized cases 744 __ stop("init_AdapterMethodHandle should not issue this"); 745 break; 746 747 case _adapter_opt_swap_1: 748 case _adapter_opt_swap_2: 749 case _adapter_opt_rot_1_up: 750 case _adapter_opt_rot_1_down: 751 case _adapter_opt_rot_2_up: 752 case _adapter_opt_rot_2_down: 753 { 754 int swap_bytes = 0, rotate = 0; 755 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 756 757 // 'argslot' is the position of the first argument to swap. 758 __ ldsw(G3_amh_vmargslot, O0_argslot); 759 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 760 761 // 'vminfo' is the second. 762 Register O1_destslot = O1_scratch; 763 __ ldsw(G3_amh_conversion, O1_destslot); 764 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 765 __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot); 766 __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot); 767 768 if (!rotate) { 769 for (int i = 0; i < swap_bytes; i += wordSize) { 770 __ ld_ptr(Address(O0_argslot, i), O2_scratch); 771 __ ld_ptr(Address(O1_destslot, i), O3_scratch); 772 __ st_ptr(O3_scratch, Address(O0_argslot, i)); 773 __ st_ptr(O2_scratch, Address(O1_destslot, i)); 774 } 775 } else { 776 // Save the first chunk, which is going to get overwritten. 777 switch (swap_bytes) { 778 case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break; 779 case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru 780 case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break; 781 default: ShouldNotReachHere(); 782 } 783 784 if (rotate > 0) { 785 // Rorate upward. 786 __ sub(O0_argslot, swap_bytes, O0_argslot); 787#if ASSERT 788 { 789 // Verify that argslot > destslot, by at least swap_bytes. 790 Label L_ok; 791 __ cmp(O0_argslot, O1_destslot); 792 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok); 793 __ delayed()->nop(); 794 __ stop("source must be above destination (upward rotation)"); 795 __ bind(L_ok); 796 } 797#endif 798 // Work argslot down to destslot, copying contiguous data upwards. 799 // Pseudo-code: 800 // argslot = src_addr - swap_bytes 801 // destslot = dest_addr 802 // while (argslot >= destslot) { 803 // *(argslot + swap_bytes) = *(argslot + 0); 804 // argslot--; 805 // } 806 Label loop; 807 __ bind(loop); 808 __ ld_ptr(Address(O0_argslot, 0), G5_index); 809 __ st_ptr(G5_index, Address(O0_argslot, swap_bytes)); 810 __ sub(O0_argslot, wordSize, O0_argslot); 811 __ cmp(O0_argslot, O1_destslot); 812 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop); 813 __ delayed()->nop(); // FILLME 814 } else { 815 __ add(O0_argslot, swap_bytes, O0_argslot); 816#if ASSERT 817 { 818 // Verify that argslot < destslot, by at least swap_bytes. 819 Label L_ok; 820 __ cmp(O0_argslot, O1_destslot); 821 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 822 __ delayed()->nop(); 823 __ stop("source must be above destination (upward rotation)"); 824 __ bind(L_ok); 825 } 826#endif 827 // Work argslot up to destslot, copying contiguous data downwards. 828 // Pseudo-code: 829 // argslot = src_addr + swap_bytes 830 // destslot = dest_addr 831 // while (argslot >= destslot) { 832 // *(argslot - swap_bytes) = *(argslot + 0); 833 // argslot++; 834 // } 835 Label loop; 836 __ bind(loop); 837 __ ld_ptr(Address(O0_argslot, 0), G5_index); 838 __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes)); 839 __ add(O0_argslot, wordSize, O0_argslot); 840 __ cmp(O0_argslot, O1_destslot); 841 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop); 842 __ delayed()->nop(); // FILLME 843 } 844 845 // Store the original first chunk into the destination slot, now free. 846 switch (swap_bytes) { 847 case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break; 848 case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru 849 case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break; 850 default: ShouldNotReachHere(); 851 } 852 } 853 854 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 855 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 856 } 857 break; 858 859 case _adapter_dup_args: 860 { 861 // 'argslot' is the position of the first argument to duplicate. 862 __ ldsw(G3_amh_vmargslot, O0_argslot); 863 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 864 865 // 'stack_move' is negative number of words to duplicate. 866 Register G5_stack_move = G5_index; 867 __ ldsw(G3_amh_conversion, G5_stack_move); 868 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 869 870 // Remember the old Gargs (argslot[0]). 871 Register O1_oldarg = O1_scratch; 872 __ mov(Gargs, O1_oldarg); 873 874 // Move Gargs down to make room for dups. 875 __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move); 876 __ add(Gargs, G5_stack_move, Gargs); 877 878 // Compute the new Gargs (argslot[0]). 879 Register O2_newarg = O2_scratch; 880 __ mov(Gargs, O2_newarg); 881 882 // Copy from oldarg[0...] down to newarg[0...] 883 // Pseude-code: 884 // O1_oldarg = old-Gargs 885 // O2_newarg = new-Gargs 886 // O0_argslot = argslot 887 // while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++ 888 Label loop; 889 __ bind(loop); 890 __ ld_ptr(Address(O0_argslot, 0), O3_scratch); 891 __ st_ptr(O3_scratch, Address(O2_newarg, 0)); 892 __ add(O0_argslot, wordSize, O0_argslot); 893 __ add(O2_newarg, wordSize, O2_newarg); 894 __ cmp(O2_newarg, O1_oldarg); 895 __ brx(Assembler::less, false, Assembler::pt, loop); 896 __ delayed()->nop(); // FILLME 897 898 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 899 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 900 } 901 break; 902 903 case _adapter_drop_args: 904 { 905 // 'argslot' is the position of the first argument to nuke. 906 __ ldsw(G3_amh_vmargslot, O0_argslot); 907 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 908 909 // 'stack_move' is number of words to drop. 910 Register G5_stack_move = G5_index; 911 __ ldsw(G3_amh_conversion, G5_stack_move); 912 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 913 914 remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch); 915 916 __ ld_ptr(G3_mh_vmtarget, G3_method_handle); 917 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 918 } 919 break; 920 921 case _adapter_collect_args: 922 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 923 break; 924 925 case _adapter_spread_args: 926 // Handled completely by optimized cases. 927 __ stop("init_AdapterMethodHandle should not issue this"); 928 break; 929 930 case _adapter_opt_spread_0: 931 case _adapter_opt_spread_1: 932 case _adapter_opt_spread_more: 933 { 934 // spread an array out into a group of arguments 935 __ unimplemented(entry_name(ek)); 936 } 937 break; 938 939 case _adapter_flyby: 940 case _adapter_ricochet: 941 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 942 break; 943 944 default: 945 ShouldNotReachHere(); 946 } 947 948 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 949 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 950 951 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); 952} 953