methodHandles_sparc.cpp revision 1836:e62345fd6a46
1/* 2 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25#include "incls/_precompiled.incl" 26#include "incls/_methodHandles_sparc.cpp.incl" 27 28#define __ _masm-> 29 30#ifdef PRODUCT 31#define BLOCK_COMMENT(str) /* nothing */ 32#else 33#define BLOCK_COMMENT(str) __ block_comment(str) 34#endif 35 36#define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 37 38address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm, 39 address interpreted_entry) { 40 // Just before the actual machine code entry point, allocate space 41 // for a MethodHandleEntry::Data record, so that we can manage everything 42 // from one base pointer. 43 __ align(wordSize); 44 address target = __ pc() + sizeof(Data); 45 while (__ pc() < target) { 46 __ nop(); 47 __ align(wordSize); 48 } 49 50 MethodHandleEntry* me = (MethodHandleEntry*) __ pc(); 51 me->set_end_address(__ pc()); // set a temporary end_address 52 me->set_from_interpreted_entry(interpreted_entry); 53 me->set_type_checking_entry(NULL); 54 55 return (address) me; 56} 57 58MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm, 59 address start_addr) { 60 MethodHandleEntry* me = (MethodHandleEntry*) start_addr; 61 assert(me->end_address() == start_addr, "valid ME"); 62 63 // Fill in the real end_address: 64 __ align(wordSize); 65 me->set_end_address(__ pc()); 66 67 return me; 68} 69 70 71// Code generation 72address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) { 73 // I5_savedSP/O5_savedSP: sender SP (must preserve) 74 // G4 (Gargs): incoming argument list (must preserve) 75 // G5_method: invoke methodOop 76 // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots]) 77 // O0, O1, O2, O3, O4: garbage temps, blown away 78 Register O0_mtype = O0; 79 Register O1_scratch = O1; 80 Register O2_scratch = O2; 81 Register O3_scratch = O3; 82 Register O4_argslot = O4; 83 Register O4_argbase = O4; 84 85 // emit WrongMethodType path first, to enable back-branch from main path 86 Label wrong_method_type; 87 __ bind(wrong_method_type); 88 Label invoke_generic_slow_path; 89 assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");; 90 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); 91 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeExact); 92 __ brx(Assembler::notEqual, false, Assembler::pt, invoke_generic_slow_path); 93 __ delayed()->nop(); 94 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType 95 // mov(G3_method_handle, G3_method_handle); // already in this register 96 __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 97 __ delayed()->nop(); 98 99 // here's where control starts out: 100 __ align(CodeEntryAlignment); 101 address entry_point = __ pc(); 102 103 // fetch the MethodType from the method handle 104 { 105 Register tem = G5_method; 106 for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) { 107 __ ld_ptr(Address(tem, *pchase), O0_mtype); 108 tem = O0_mtype; // in case there is another indirection 109 } 110 } 111 112 // given the MethodType, find out where the MH argument is buried 113 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O4_argslot); 114 __ ldsw( Address(O4_argslot, __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O4_argslot); 115 __ add(Gargs, __ argument_offset(O4_argslot, 1), O4_argbase); 116 // Note: argument_address uses its input as a scratch register! 117 __ ld_ptr(Address(O4_argbase, -Interpreter::stackElementSize), G3_method_handle); 118 119 trace_method_handle(_masm, "invokeExact"); 120 121 __ check_method_handle_type(O0_mtype, G3_method_handle, O1_scratch, wrong_method_type); 122 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 123 124 // for invokeGeneric (only), apply argument and result conversions on the fly 125 __ bind(invoke_generic_slow_path); 126#ifdef ASSERT 127 { Label L; 128 __ ldub(Address(G5_method, methodOopDesc::intrinsic_id_offset_in_bytes()), O1_scratch); 129 __ cmp(O1_scratch, (int) vmIntrinsics::_invokeGeneric); 130 __ brx(Assembler::equal, false, Assembler::pt, L); 131 __ delayed()->nop(); 132 __ stop("bad methodOop::intrinsic_id"); 133 __ bind(L); 134 } 135#endif //ASSERT 136 137 // make room on the stack for another pointer: 138 insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK, O4_argbase, O1_scratch, O2_scratch, O3_scratch); 139 // load up an adapter from the calling type (Java weaves this) 140 Register O2_form = O2_scratch; 141 Register O3_adapter = O3_scratch; 142 __ load_heap_oop(Address(O0_mtype, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)), O2_form); 143 // load_heap_oop(Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); 144 // deal with old JDK versions: 145 __ add( Address(O2_form, __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, O1_scratch)), O3_adapter); 146 __ cmp(O3_adapter, O2_form); 147 Label sorry_no_invoke_generic; 148 __ brx(Assembler::lessUnsigned, false, Assembler::pn, sorry_no_invoke_generic); 149 __ delayed()->nop(); 150 151 __ load_heap_oop(Address(O3_adapter, 0), O3_adapter); 152 __ tst(O3_adapter); 153 __ brx(Assembler::zero, false, Assembler::pn, sorry_no_invoke_generic); 154 __ delayed()->nop(); 155 __ st_ptr(O3_adapter, Address(O4_argbase, 1 * Interpreter::stackElementSize)); 156 // As a trusted first argument, pass the type being called, so the adapter knows 157 // the actual types of the arguments and return values. 158 // (Generic invokers are shared among form-families of method-type.) 159 __ st_ptr(O0_mtype, Address(O4_argbase, 0 * Interpreter::stackElementSize)); 160 // FIXME: assert that O3_adapter is of the right method-type. 161 __ mov(O3_adapter, G3_method_handle); 162 trace_method_handle(_masm, "invokeGeneric"); 163 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 164 165 __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available! 166 __ mov(O0_mtype, G5_method_type); // required by throw_WrongMethodType 167 // mov(G3_method_handle, G3_method_handle); // already in this register 168 __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch); 169 __ delayed()->nop(); 170 171 return entry_point; 172} 173 174 175#ifdef ASSERT 176static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) { 177 // Verify that argslot lies within (Gargs, FP]. 178 Label L_ok, L_bad; 179 BLOCK_COMMENT("{ verify_argslot"); 180#ifdef _LP64 181 __ add(FP, STACK_BIAS, temp_reg); 182 __ cmp(argslot_reg, temp_reg); 183#else 184 __ cmp(argslot_reg, FP); 185#endif 186 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad); 187 __ delayed()->nop(); 188 __ cmp(Gargs, argslot_reg); 189 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 190 __ delayed()->nop(); 191 __ bind(L_bad); 192 __ stop(error_message); 193 __ bind(L_ok); 194 BLOCK_COMMENT("} verify_argslot"); 195} 196#endif 197 198 199// Helper to insert argument slots into the stack. 200// arg_slots must be a multiple of stack_move_unit() and <= 0 201void MethodHandles::insert_arg_slots(MacroAssembler* _masm, 202 RegisterOrConstant arg_slots, 203 int arg_mask, 204 Register argslot_reg, 205 Register temp_reg, Register temp2_reg, Register temp3_reg) { 206 assert(temp3_reg != noreg, "temp3 required"); 207 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 208 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 209 210#ifdef ASSERT 211 verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame"); 212 if (arg_slots.is_register()) { 213 Label L_ok, L_bad; 214 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 215 __ br(Assembler::greater, false, Assembler::pn, L_bad); 216 __ delayed()->nop(); 217 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 218 __ br(Assembler::zero, false, Assembler::pt, L_ok); 219 __ delayed()->nop(); 220 __ bind(L_bad); 221 __ stop("assert arg_slots <= 0 and clear low bits"); 222 __ bind(L_ok); 223 } else { 224 assert(arg_slots.as_constant() <= 0, ""); 225 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 226 } 227#endif // ASSERT 228 229#ifdef _LP64 230 if (arg_slots.is_register()) { 231 // Was arg_slots register loaded as signed int? 232 Label L_ok; 233 __ sll(arg_slots.as_register(), BitsPerInt, temp_reg); 234 __ sra(temp_reg, BitsPerInt, temp_reg); 235 __ cmp(arg_slots.as_register(), temp_reg); 236 __ br(Assembler::equal, false, Assembler::pt, L_ok); 237 __ delayed()->nop(); 238 __ stop("arg_slots register not loaded as signed int"); 239 __ bind(L_ok); 240 } 241#endif 242 243 // Make space on the stack for the inserted argument(s). 244 // Then pull down everything shallower than argslot_reg. 245 // The stacked return address gets pulled down with everything else. 246 // That is, copy [sp, argslot) downward by -size words. In pseudo-code: 247 // sp -= size; 248 // for (temp = sp + size; temp < argslot; temp++) 249 // temp[-size] = temp[0] 250 // argslot -= size; 251 BLOCK_COMMENT("insert_arg_slots {"); 252 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 253 254 // Keep the stack pointer 2*wordSize aligned. 255 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 256 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 257 __ add(SP, masked_offset, SP); 258 259 __ mov(Gargs, temp_reg); // source pointer for copy 260 __ add(Gargs, offset, Gargs); 261 262 { 263 Label loop; 264 __ BIND(loop); 265 // pull one word down each time through the loop 266 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 267 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 268 __ add(temp_reg, wordSize, temp_reg); 269 __ cmp(temp_reg, argslot_reg); 270 __ brx(Assembler::less, false, Assembler::pt, loop); 271 __ delayed()->nop(); // FILLME 272 } 273 274 // Now move the argslot down, to point to the opened-up space. 275 __ add(argslot_reg, offset, argslot_reg); 276 BLOCK_COMMENT("} insert_arg_slots"); 277} 278 279 280// Helper to remove argument slots from the stack. 281// arg_slots must be a multiple of stack_move_unit() and >= 0 282void MethodHandles::remove_arg_slots(MacroAssembler* _masm, 283 RegisterOrConstant arg_slots, 284 Register argslot_reg, 285 Register temp_reg, Register temp2_reg, Register temp3_reg) { 286 assert(temp3_reg != noreg, "temp3 required"); 287 assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg, 288 (!arg_slots.is_register() ? Gargs : arg_slots.as_register())); 289 290 RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg); 291 292#ifdef ASSERT 293 // Verify that [argslot..argslot+size) lies within (Gargs, FP). 294 __ add(argslot_reg, offset, temp2_reg); 295 verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame"); 296 if (arg_slots.is_register()) { 297 Label L_ok, L_bad; 298 __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD); 299 __ br(Assembler::less, false, Assembler::pn, L_bad); 300 __ delayed()->nop(); 301 __ btst(-stack_move_unit() - 1, arg_slots.as_register()); 302 __ br(Assembler::zero, false, Assembler::pt, L_ok); 303 __ delayed()->nop(); 304 __ bind(L_bad); 305 __ stop("assert arg_slots >= 0 and clear low bits"); 306 __ bind(L_ok); 307 } else { 308 assert(arg_slots.as_constant() >= 0, ""); 309 assert(arg_slots.as_constant() % -stack_move_unit() == 0, ""); 310 } 311#endif // ASSERT 312 313 BLOCK_COMMENT("remove_arg_slots {"); 314 // Pull up everything shallower than argslot. 315 // Then remove the excess space on the stack. 316 // The stacked return address gets pulled up with everything else. 317 // That is, copy [sp, argslot) upward by size words. In pseudo-code: 318 // for (temp = argslot-1; temp >= sp; --temp) 319 // temp[size] = temp[0] 320 // argslot += size; 321 // sp += size; 322 __ sub(argslot_reg, wordSize, temp_reg); // source pointer for copy 323 { 324 Label loop; 325 __ BIND(loop); 326 // pull one word up each time through the loop 327 __ ld_ptr(Address(temp_reg, 0), temp2_reg); 328 __ st_ptr(temp2_reg, Address(temp_reg, offset)); 329 __ sub(temp_reg, wordSize, temp_reg); 330 __ cmp(temp_reg, Gargs); 331 __ brx(Assembler::greaterEqual, false, Assembler::pt, loop); 332 __ delayed()->nop(); // FILLME 333 } 334 335 // Now move the argslot up, to point to the just-copied block. 336 __ add(Gargs, offset, Gargs); 337 // And adjust the argslot address to point at the deletion point. 338 __ add(argslot_reg, offset, argslot_reg); 339 340 // Keep the stack pointer 2*wordSize aligned. 341 const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1); 342 RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg); 343 __ add(SP, masked_offset, SP); 344 BLOCK_COMMENT("} remove_arg_slots"); 345} 346 347 348#ifndef PRODUCT 349extern "C" void print_method_handle(oop mh); 350void trace_method_handle_stub(const char* adaptername, 351 oopDesc* mh) { 352 printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh); 353 print_method_handle(mh); 354} 355void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) { 356 if (!TraceMethodHandles) return; 357 BLOCK_COMMENT("trace_method_handle {"); 358 // save: Gargs, O5_savedSP 359 __ save_frame(16); 360 __ set((intptr_t) adaptername, O0); 361 __ mov(G3_method_handle, O1); 362 __ mov(G3_method_handle, L3); 363 __ mov(Gargs, L4); 364 __ mov(G5_method_type, L5); 365 __ call_VM_leaf(L7, CAST_FROM_FN_PTR(address, trace_method_handle_stub)); 366 367 __ mov(L3, G3_method_handle); 368 __ mov(L4, Gargs); 369 __ mov(L5, G5_method_type); 370 __ restore(); 371 BLOCK_COMMENT("} trace_method_handle"); 372} 373#endif // PRODUCT 374 375// which conversion op types are implemented here? 376int MethodHandles::adapter_conversion_ops_supported_mask() { 377 return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY) 378 |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) 379 |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST) 380 |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM) 381 |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM) 382 |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS) 383 |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS) 384 |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS) 385 |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS) 386 //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG! 387 ); 388 // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS. 389} 390 391//------------------------------------------------------------------------------ 392// MethodHandles::generate_method_handle_stub 393// 394// Generate an "entry" field for a method handle. 395// This determines how the method handle will respond to calls. 396void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) { 397 // Here is the register state during an interpreted call, 398 // as set up by generate_method_handle_interpreter_entry(): 399 // - G5: garbage temp (was MethodHandle.invoke methodOop, unused) 400 // - G3: receiver method handle 401 // - O5_savedSP: sender SP (must preserve) 402 403 Register O0_argslot = O0; 404 Register O1_scratch = O1; 405 Register O2_scratch = O2; 406 Register O3_scratch = O3; 407 Register G5_index = G5; 408 409 guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets"); 410 411 // Some handy addresses: 412 Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); 413 414 Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes()); 415 416 Address G3_dmh_vmindex( G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes()); 417 418 Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes()); 419 Address G3_bmh_argument( G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes()); 420 421 Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes()); 422 Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes()); 423 Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes()); 424 425 const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes(); 426 427 if (have_entry(ek)) { 428 __ nop(); // empty stubs make SG sick 429 return; 430 } 431 432 address interp_entry = __ pc(); 433 434 trace_method_handle(_masm, entry_name(ek)); 435 436 switch ((int) ek) { 437 case _raise_exception: 438 { 439 // Not a real MH entry, but rather shared code for raising an 440 // exception. Extra local arguments are passed in scratch 441 // registers, as required type in O3, failing object (or NULL) 442 // in O2, failing bytecode type in O1. 443 444 __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. 445 446 // Push arguments as if coming from the interpreter. 447 Register O0_scratch = O0_argslot; 448 int stackElementSize = Interpreter::stackElementSize; 449 450 // Make space on the stack for the arguments and set Gargs 451 // correctly. 452 __ sub(SP, 4*stackElementSize, SP); // Keep stack aligned. 453 __ add(SP, (frame::varargs_offset)*wordSize - 1*Interpreter::stackElementSize + STACK_BIAS + BytesPerWord, Gargs); 454 455 // void raiseException(int code, Object actual, Object required) 456 __ st( O1_scratch, Address(Gargs, 2*stackElementSize)); // code 457 __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize)); // actual 458 __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize)); // required 459 460 Label no_method; 461 // FIXME: fill in _raise_exception_method with a suitable sun.dyn method 462 __ set(AddressLiteral((address) &_raise_exception_method), G5_method); 463 __ ld_ptr(Address(G5_method, 0), G5_method); 464 __ tst(G5_method); 465 __ brx(Assembler::zero, false, Assembler::pn, no_method); 466 __ delayed()->nop(); 467 468 int jobject_oop_offset = 0; 469 __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method); 470 __ tst(G5_method); 471 __ brx(Assembler::zero, false, Assembler::pn, no_method); 472 __ delayed()->nop(); 473 474 __ verify_oop(G5_method); 475 __ jump_indirect_to(G5_method_fie, O1_scratch); 476 __ delayed()->nop(); 477 478 // If we get here, the Java runtime did not do its job of creating the exception. 479 // Do something that is at least causes a valid throw from the interpreter. 480 __ bind(no_method); 481 __ unimplemented("_raise_exception no method"); 482 } 483 break; 484 485 case _invokestatic_mh: 486 case _invokespecial_mh: 487 { 488 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop 489 __ verify_oop(G5_method); 490 // Same as TemplateTable::invokestatic or invokespecial, 491 // minus the CP setup and profiling: 492 if (ek == _invokespecial_mh) { 493 // Must load & check the first argument before entering the target method. 494 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 495 __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle); 496 __ null_check(G3_method_handle); 497 __ verify_oop(G3_method_handle); 498 } 499 __ jump_indirect_to(G5_method_fie, O1_scratch); 500 __ delayed()->nop(); 501 } 502 break; 503 504 case _invokevirtual_mh: 505 { 506 // Same as TemplateTable::invokevirtual, 507 // minus the CP setup and profiling: 508 509 // Pick out the vtable index and receiver offset from the MH, 510 // and then we can discard it: 511 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 512 __ ldsw(G3_dmh_vmindex, G5_index); 513 // Note: The verifier allows us to ignore G3_mh_vmtarget. 514 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 515 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 516 517 // Get receiver klass: 518 Register O0_klass = O0_argslot; 519 __ load_klass(G3_method_handle, O0_klass); 520 __ verify_oop(O0_klass); 521 522 // Get target methodOop & entry point: 523 const int base = instanceKlass::vtable_start_offset() * wordSize; 524 assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 525 526 __ sll_ptr(G5_index, LogBytesPerWord, G5_index); 527 __ add(O0_klass, G5_index, O0_klass); 528 Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes()); 529 __ ld_ptr(vtable_entry_addr, G5_method); 530 531 __ verify_oop(G5_method); 532 __ jump_indirect_to(G5_method_fie, O1_scratch); 533 __ delayed()->nop(); 534 } 535 break; 536 537 case _invokeinterface_mh: 538 { 539 // Same as TemplateTable::invokeinterface, 540 // minus the CP setup and profiling: 541 __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch); 542 Register O1_intf = O1_scratch; 543 __ load_heap_oop(G3_mh_vmtarget, O1_intf); 544 __ ldsw(G3_dmh_vmindex, G5_index); 545 __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle); 546 __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes()); 547 548 // Get receiver klass: 549 Register O0_klass = O0_argslot; 550 __ load_klass(G3_method_handle, O0_klass); 551 __ verify_oop(O0_klass); 552 553 // Get interface: 554 Label no_such_interface; 555 __ verify_oop(O1_intf); 556 __ lookup_interface_method(O0_klass, O1_intf, 557 // Note: next two args must be the same: 558 G5_index, G5_method, 559 O2_scratch, 560 O3_scratch, 561 no_such_interface); 562 563 __ verify_oop(G5_method); 564 __ jump_indirect_to(G5_method_fie, O1_scratch); 565 __ delayed()->nop(); 566 567 __ bind(no_such_interface); 568 // Throw an exception. 569 // For historical reasons, it will be IncompatibleClassChangeError. 570 __ unimplemented("not tested yet"); 571 __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch); // required interface 572 __ mov(O0_klass, O2_scratch); // bad receiver 573 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 574 __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch); // who is complaining? 575 } 576 break; 577 578 case _bound_ref_mh: 579 case _bound_int_mh: 580 case _bound_long_mh: 581 case _bound_ref_direct_mh: 582 case _bound_int_direct_mh: 583 case _bound_long_direct_mh: 584 { 585 const bool direct_to_method = (ek >= _bound_ref_direct_mh); 586 BasicType arg_type = T_ILLEGAL; 587 int arg_mask = _INSERT_NO_MASK; 588 int arg_slots = -1; 589 get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots); 590 591 // Make room for the new argument: 592 __ ldsw(G3_bmh_vmargslot, O0_argslot); 593 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 594 595 insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index); 596 597 // Store bound argument into the new stack slot: 598 __ load_heap_oop(G3_bmh_argument, O1_scratch); 599 if (arg_type == T_OBJECT) { 600 __ st_ptr(O1_scratch, Address(O0_argslot, 0)); 601 } else { 602 Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type)); 603 __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type)); 604 if (arg_slots == 2) { 605 __ unimplemented("not yet tested"); 606#ifndef _LP64 607 __ signx(O2_scratch, O3_scratch); // Sign extend 608#endif 609 __ st_long(O2_scratch, Address(O0_argslot, 0)); // Uses O2/O3 on !_LP64 610 } else { 611 __ st_ptr( O2_scratch, Address(O0_argslot, 0)); 612 } 613 } 614 615 if (direct_to_method) { 616 __ load_heap_oop(G3_mh_vmtarget, G5_method); // target is a methodOop 617 __ verify_oop(G5_method); 618 __ jump_indirect_to(G5_method_fie, O1_scratch); 619 __ delayed()->nop(); 620 } else { 621 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); // target is a methodOop 622 __ verify_oop(G3_method_handle); 623 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 624 } 625 } 626 break; 627 628 case _adapter_retype_only: 629 case _adapter_retype_raw: 630 // Immediately jump to the next MH layer: 631 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 632 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 633 // This is OK when all parameter types widen. 634 // It is also OK when a return type narrows. 635 break; 636 637 case _adapter_check_cast: 638 { 639 // Temps: 640 Register G5_klass = G5_index; // Interesting AMH data. 641 642 // Check a reference argument before jumping to the next layer of MH: 643 __ ldsw(G3_amh_vmargslot, O0_argslot); 644 Address vmarg = __ argument_address(O0_argslot); 645 646 // What class are we casting to? 647 __ load_heap_oop(G3_amh_argument, G5_klass); // This is a Class object! 648 __ load_heap_oop(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass); 649 650 Label done; 651 __ ld_ptr(vmarg, O1_scratch); 652 __ tst(O1_scratch); 653 __ brx(Assembler::zero, false, Assembler::pn, done); // No cast if null. 654 __ delayed()->nop(); 655 __ load_klass(O1_scratch, O1_scratch); 656 657 // Live at this point: 658 // - G5_klass : klass required by the target method 659 // - O1_scratch : argument klass to test 660 // - G3_method_handle: adapter method handle 661 __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done); 662 663 // If we get here, the type check failed! 664 __ ldsw(G3_amh_vmargslot, O0_argslot); // reload argslot field 665 __ load_heap_oop(G3_amh_argument, O3_scratch); // required class 666 __ ld_ptr(vmarg, O2_scratch); // bad object 667 __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot); 668 __ delayed()->mov(Bytecodes::_checkcast, O1_scratch); // who is complaining? 669 670 __ bind(done); 671 // Get the new MH: 672 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 673 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 674 } 675 break; 676 677 case _adapter_prim_to_prim: 678 case _adapter_ref_to_prim: 679 // Handled completely by optimized cases. 680 __ stop("init_AdapterMethodHandle should not issue this"); 681 break; 682 683 case _adapter_opt_i2i: // optimized subcase of adapt_prim_to_prim 684//case _adapter_opt_f2i: // optimized subcase of adapt_prim_to_prim 685 case _adapter_opt_l2i: // optimized subcase of adapt_prim_to_prim 686 case _adapter_opt_unboxi: // optimized subcase of adapt_ref_to_prim 687 { 688 // Perform an in-place conversion to int or an int subword. 689 __ ldsw(G3_amh_vmargslot, O0_argslot); 690 Address vmarg = __ argument_address(O0_argslot); 691 Address value; 692 bool value_left_justified = false; 693 694 switch (ek) { 695 case _adapter_opt_i2i: 696 value = vmarg; 697 break; 698 case _adapter_opt_l2i: 699 { 700 // just delete the extra slot 701 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 702 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 703 value = vmarg = Address(O0_argslot, 0); 704 } 705 break; 706 case _adapter_opt_unboxi: 707 { 708 // Load the value up from the heap. 709 __ ld_ptr(vmarg, O1_scratch); 710 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT); 711#ifdef ASSERT 712 for (int bt = T_BOOLEAN; bt < T_INT; bt++) { 713 if (is_subword_type(BasicType(bt))) 714 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), ""); 715 } 716#endif 717 __ null_check(O1_scratch, value_offset); 718 value = Address(O1_scratch, value_offset); 719#ifdef _BIG_ENDIAN 720 // Values stored in objects are packed. 721 value_left_justified = true; 722#endif 723 } 724 break; 725 default: 726 ShouldNotReachHere(); 727 } 728 729 // This check is required on _BIG_ENDIAN 730 Register G5_vminfo = G5_index; 731 __ ldsw(G3_amh_conversion, G5_vminfo); 732 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 733 734 // Original 32-bit vmdata word must be of this form: 735 // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 | 736 __ lduw(value, O1_scratch); 737 if (!value_left_justified) 738 __ sll(O1_scratch, G5_vminfo, O1_scratch); 739 Label zero_extend, done; 740 __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo); 741 __ br(Assembler::zero, false, Assembler::pn, zero_extend); 742 __ delayed()->nop(); 743 744 // this path is taken for int->byte, int->short 745 __ sra(O1_scratch, G5_vminfo, O1_scratch); 746 __ ba(false, done); 747 __ delayed()->nop(); 748 749 __ bind(zero_extend); 750 // this is taken for int->char 751 __ srl(O1_scratch, G5_vminfo, O1_scratch); 752 753 __ bind(done); 754 __ st(O1_scratch, vmarg); 755 756 // Get the new MH: 757 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 758 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 759 } 760 break; 761 762 case _adapter_opt_i2l: // optimized subcase of adapt_prim_to_prim 763 case _adapter_opt_unboxl: // optimized subcase of adapt_ref_to_prim 764 { 765 // Perform an in-place int-to-long or ref-to-long conversion. 766 __ ldsw(G3_amh_vmargslot, O0_argslot); 767 768 // On big-endian machine we duplicate the slot and store the MSW 769 // in the first slot. 770 __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot); 771 772 insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index); 773 774 Address arg_lsw(O0_argslot, 0); 775 Address arg_msw(O0_argslot, -Interpreter::stackElementSize); 776 777 switch (ek) { 778 case _adapter_opt_i2l: 779 { 780 __ ldsw(arg_lsw, O2_scratch); // Load LSW 781#ifndef _LP64 782 __ signx(O2_scratch, O3_scratch); // Sign extend 783#endif 784 __ st_long(O2_scratch, arg_msw); // Uses O2/O3 on !_LP64 785 } 786 break; 787 case _adapter_opt_unboxl: 788 { 789 // Load the value up from the heap. 790 __ ld_ptr(arg_lsw, O1_scratch); 791 int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG); 792 assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), ""); 793 __ null_check(O1_scratch, value_offset); 794 __ ld_long(Address(O1_scratch, value_offset), O2_scratch); // Uses O2/O3 on !_LP64 795 __ st_long(O2_scratch, arg_msw); 796 } 797 break; 798 default: 799 ShouldNotReachHere(); 800 } 801 802 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 803 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 804 } 805 break; 806 807 case _adapter_opt_f2d: // optimized subcase of adapt_prim_to_prim 808 case _adapter_opt_d2f: // optimized subcase of adapt_prim_to_prim 809 { 810 // perform an in-place floating primitive conversion 811 __ unimplemented(entry_name(ek)); 812 } 813 break; 814 815 case _adapter_prim_to_ref: 816 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 817 break; 818 819 case _adapter_swap_args: 820 case _adapter_rot_args: 821 // handled completely by optimized cases 822 __ stop("init_AdapterMethodHandle should not issue this"); 823 break; 824 825 case _adapter_opt_swap_1: 826 case _adapter_opt_swap_2: 827 case _adapter_opt_rot_1_up: 828 case _adapter_opt_rot_1_down: 829 case _adapter_opt_rot_2_up: 830 case _adapter_opt_rot_2_down: 831 { 832 int swap_bytes = 0, rotate = 0; 833 get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate); 834 835 // 'argslot' is the position of the first argument to swap. 836 __ ldsw(G3_amh_vmargslot, O0_argslot); 837 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 838 839 // 'vminfo' is the second. 840 Register O1_destslot = O1_scratch; 841 __ ldsw(G3_amh_conversion, O1_destslot); 842 assert(CONV_VMINFO_SHIFT == 0, "preshifted"); 843 __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot); 844 __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot); 845 846 if (!rotate) { 847 for (int i = 0; i < swap_bytes; i += wordSize) { 848 __ ld_ptr(Address(O0_argslot, i), O2_scratch); 849 __ ld_ptr(Address(O1_destslot, i), O3_scratch); 850 __ st_ptr(O3_scratch, Address(O0_argslot, i)); 851 __ st_ptr(O2_scratch, Address(O1_destslot, i)); 852 } 853 } else { 854 // Save the first chunk, which is going to get overwritten. 855 switch (swap_bytes) { 856 case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break; 857 case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru 858 case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break; 859 default: ShouldNotReachHere(); 860 } 861 862 if (rotate > 0) { 863 // Rorate upward. 864 __ sub(O0_argslot, swap_bytes, O0_argslot); 865#if ASSERT 866 { 867 // Verify that argslot > destslot, by at least swap_bytes. 868 Label L_ok; 869 __ cmp(O0_argslot, O1_destslot); 870 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok); 871 __ delayed()->nop(); 872 __ stop("source must be above destination (upward rotation)"); 873 __ bind(L_ok); 874 } 875#endif 876 // Work argslot down to destslot, copying contiguous data upwards. 877 // Pseudo-code: 878 // argslot = src_addr - swap_bytes 879 // destslot = dest_addr 880 // while (argslot >= destslot) { 881 // *(argslot + swap_bytes) = *(argslot + 0); 882 // argslot--; 883 // } 884 Label loop; 885 __ bind(loop); 886 __ ld_ptr(Address(O0_argslot, 0), G5_index); 887 __ st_ptr(G5_index, Address(O0_argslot, swap_bytes)); 888 __ sub(O0_argslot, wordSize, O0_argslot); 889 __ cmp(O0_argslot, O1_destslot); 890 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop); 891 __ delayed()->nop(); // FILLME 892 } else { 893 __ add(O0_argslot, swap_bytes, O0_argslot); 894#if ASSERT 895 { 896 // Verify that argslot < destslot, by at least swap_bytes. 897 Label L_ok; 898 __ cmp(O0_argslot, O1_destslot); 899 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok); 900 __ delayed()->nop(); 901 __ stop("source must be above destination (upward rotation)"); 902 __ bind(L_ok); 903 } 904#endif 905 // Work argslot up to destslot, copying contiguous data downwards. 906 // Pseudo-code: 907 // argslot = src_addr + swap_bytes 908 // destslot = dest_addr 909 // while (argslot >= destslot) { 910 // *(argslot - swap_bytes) = *(argslot + 0); 911 // argslot++; 912 // } 913 Label loop; 914 __ bind(loop); 915 __ ld_ptr(Address(O0_argslot, 0), G5_index); 916 __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes)); 917 __ add(O0_argslot, wordSize, O0_argslot); 918 __ cmp(O0_argslot, O1_destslot); 919 __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop); 920 __ delayed()->nop(); // FILLME 921 } 922 923 // Store the original first chunk into the destination slot, now free. 924 switch (swap_bytes) { 925 case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break; 926 case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru 927 case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break; 928 default: ShouldNotReachHere(); 929 } 930 } 931 932 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 933 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 934 } 935 break; 936 937 case _adapter_dup_args: 938 { 939 // 'argslot' is the position of the first argument to duplicate. 940 __ ldsw(G3_amh_vmargslot, O0_argslot); 941 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 942 943 // 'stack_move' is negative number of words to duplicate. 944 Register G5_stack_move = G5_index; 945 __ ldsw(G3_amh_conversion, G5_stack_move); 946 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 947 948 // Remember the old Gargs (argslot[0]). 949 Register O1_oldarg = O1_scratch; 950 __ mov(Gargs, O1_oldarg); 951 952 // Move Gargs down to make room for dups. 953 __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move); 954 __ add(Gargs, G5_stack_move, Gargs); 955 956 // Compute the new Gargs (argslot[0]). 957 Register O2_newarg = O2_scratch; 958 __ mov(Gargs, O2_newarg); 959 960 // Copy from oldarg[0...] down to newarg[0...] 961 // Pseude-code: 962 // O1_oldarg = old-Gargs 963 // O2_newarg = new-Gargs 964 // O0_argslot = argslot 965 // while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++ 966 Label loop; 967 __ bind(loop); 968 __ ld_ptr(Address(O0_argslot, 0), O3_scratch); 969 __ st_ptr(O3_scratch, Address(O2_newarg, 0)); 970 __ add(O0_argslot, wordSize, O0_argslot); 971 __ add(O2_newarg, wordSize, O2_newarg); 972 __ cmp(O2_newarg, O1_oldarg); 973 __ brx(Assembler::less, false, Assembler::pt, loop); 974 __ delayed()->nop(); // FILLME 975 976 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 977 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 978 } 979 break; 980 981 case _adapter_drop_args: 982 { 983 // 'argslot' is the position of the first argument to nuke. 984 __ ldsw(G3_amh_vmargslot, O0_argslot); 985 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 986 987 // 'stack_move' is number of words to drop. 988 Register G5_stack_move = G5_index; 989 __ ldsw(G3_amh_conversion, G5_stack_move); 990 __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move); 991 992 remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch); 993 994 __ load_heap_oop(G3_mh_vmtarget, G3_method_handle); 995 __ jump_to_method_handle_entry(G3_method_handle, O1_scratch); 996 } 997 break; 998 999 case _adapter_collect_args: 1000 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1001 break; 1002 1003 case _adapter_spread_args: 1004 // Handled completely by optimized cases. 1005 __ stop("init_AdapterMethodHandle should not issue this"); 1006 break; 1007 1008 case _adapter_opt_spread_0: 1009 case _adapter_opt_spread_1: 1010 case _adapter_opt_spread_more: 1011 { 1012 // spread an array out into a group of arguments 1013 __ unimplemented(entry_name(ek)); 1014 } 1015 break; 1016 1017 case _adapter_flyby: 1018 case _adapter_ricochet: 1019 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1020 break; 1021 1022 default: 1023 ShouldNotReachHere(); 1024 } 1025 1026 address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry); 1027 __ unimplemented(entry_name(ek)); // %%% FIXME: NYI 1028 1029 init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie)); 1030} 1031