methodHandles_sparc.cpp revision 1472:c18cbe5936b8
1/*
2 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_methodHandles_sparc.cpp.incl"
27
28#define __ _masm->
29
30address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
31                                                address interpreted_entry) {
32  // Just before the actual machine code entry point, allocate space
33  // for a MethodHandleEntry::Data record, so that we can manage everything
34  // from one base pointer.
35  __ align(wordSize);
36  address target = __ pc() + sizeof(Data);
37  while (__ pc() < target) {
38    __ nop();
39    __ align(wordSize);
40  }
41
42  MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
43  me->set_end_address(__ pc());         // set a temporary end_address
44  me->set_from_interpreted_entry(interpreted_entry);
45  me->set_type_checking_entry(NULL);
46
47  return (address) me;
48}
49
50MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
51                                                address start_addr) {
52  MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
53  assert(me->end_address() == start_addr, "valid ME");
54
55  // Fill in the real end_address:
56  __ align(wordSize);
57  me->set_end_address(__ pc());
58
59  return me;
60}
61
62
63// Code generation
64address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
65  // I5_savedSP: sender SP (must preserve)
66  // G4 (Gargs): incoming argument list (must preserve)
67  // G5_method:  invoke methodOop; becomes method type.
68  // G3_method_handle: receiver method handle (must load from sp[MethodTypeForm.vmslots])
69  // O0, O1: garbage temps, blown away
70  Register O0_argslot = O0;
71  Register O1_scratch = O1;
72
73  // emit WrongMethodType path first, to enable back-branch from main path
74  Label wrong_method_type;
75  __ bind(wrong_method_type);
76  __ jump_to(AddressLiteral(Interpreter::throw_WrongMethodType_entry()), O1_scratch);
77  __ delayed()->nop();
78
79  // here's where control starts out:
80  __ align(CodeEntryAlignment);
81  address entry_point = __ pc();
82
83  // fetch the MethodType from the method handle into G5_method_type
84  {
85    Register tem = G5_method;
86    assert(tem == G5_method_type, "yes, it's the same register");
87    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
88      __ ld_ptr(Address(tem, *pchase), G5_method_type);
89    }
90  }
91
92  // given the MethodType, find out where the MH argument is buried
93  __ ld_ptr(Address(G5_method_type, __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, O1_scratch)),        O0_argslot);
94  __ ldsw(  Address(O0_argslot,     __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, O1_scratch)), O0_argslot);
95  __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
96
97  __ check_method_handle_type(G5_method_type, G3_method_handle, O1_scratch, wrong_method_type);
98  __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
99
100  return entry_point;
101}
102
103
104#ifdef ASSERT
105static void verify_argslot(MacroAssembler* _masm, Register argslot_reg, Register temp_reg, const char* error_message) {
106  // Verify that argslot lies within (Gargs, FP].
107  Label L_ok, L_bad;
108#ifdef _LP64
109  __ add(FP, STACK_BIAS, temp_reg);
110  __ cmp(argslot_reg, temp_reg);
111#else
112  __ cmp(argslot_reg, FP);
113#endif
114  __ brx(Assembler::greaterUnsigned, false, Assembler::pn, L_bad);
115  __ delayed()->nop();
116  __ cmp(Gargs, argslot_reg);
117  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
118  __ delayed()->nop();
119  __ bind(L_bad);
120  __ stop(error_message);
121  __ bind(L_ok);
122}
123#endif
124
125
126// Helper to insert argument slots into the stack.
127// arg_slots must be a multiple of stack_move_unit() and <= 0
128void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
129                                     RegisterOrConstant arg_slots,
130                                     int arg_mask,
131                                     Register argslot_reg,
132                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
133  assert(temp3_reg != noreg, "temp3 required");
134  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
135                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
136
137#ifdef ASSERT
138  verify_argslot(_masm, argslot_reg, temp_reg, "insertion point must fall within current frame");
139  if (arg_slots.is_register()) {
140    Label L_ok, L_bad;
141    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
142    __ br(Assembler::greater, false, Assembler::pn, L_bad);
143    __ delayed()->nop();
144    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
145    __ br(Assembler::zero, false, Assembler::pt, L_ok);
146    __ delayed()->nop();
147    __ bind(L_bad);
148    __ stop("assert arg_slots <= 0 and clear low bits");
149    __ bind(L_ok);
150  } else {
151    assert(arg_slots.as_constant() <= 0, "");
152    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
153  }
154#endif // ASSERT
155
156#ifdef _LP64
157  if (arg_slots.is_register()) {
158    // Was arg_slots register loaded as signed int?
159    Label L_ok;
160    __ sll(arg_slots.as_register(), BitsPerInt, temp_reg);
161    __ sra(temp_reg, BitsPerInt, temp_reg);
162    __ cmp(arg_slots.as_register(), temp_reg);
163    __ br(Assembler::equal, false, Assembler::pt, L_ok);
164    __ delayed()->nop();
165    __ stop("arg_slots register not loaded as signed int");
166    __ bind(L_ok);
167  }
168#endif
169
170  // Make space on the stack for the inserted argument(s).
171  // Then pull down everything shallower than argslot_reg.
172  // The stacked return address gets pulled down with everything else.
173  // That is, copy [sp, argslot) downward by -size words.  In pseudo-code:
174  //   sp -= size;
175  //   for (temp = sp + size; temp < argslot; temp++)
176  //     temp[-size] = temp[0]
177  //   argslot -= size;
178  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
179
180  // Keep the stack pointer 2*wordSize aligned.
181  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
182  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
183  __ add(SP, masked_offset, SP);
184
185  __ mov(Gargs, temp_reg);  // source pointer for copy
186  __ add(Gargs, offset, Gargs);
187
188  {
189    Label loop;
190    __ bind(loop);
191    // pull one word down each time through the loop
192    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
193    __ st_ptr(temp2_reg, Address(temp_reg, offset));
194    __ add(temp_reg, wordSize, temp_reg);
195    __ cmp(temp_reg, argslot_reg);
196    __ brx(Assembler::less, false, Assembler::pt, loop);
197    __ delayed()->nop();  // FILLME
198  }
199
200  // Now move the argslot down, to point to the opened-up space.
201  __ add(argslot_reg, offset, argslot_reg);
202}
203
204
205// Helper to remove argument slots from the stack.
206// arg_slots must be a multiple of stack_move_unit() and >= 0
207void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
208                                     RegisterOrConstant arg_slots,
209                                     Register argslot_reg,
210                                     Register temp_reg, Register temp2_reg, Register temp3_reg) {
211  assert(temp3_reg != noreg, "temp3 required");
212  assert_different_registers(argslot_reg, temp_reg, temp2_reg, temp3_reg,
213                             (!arg_slots.is_register() ? Gargs : arg_slots.as_register()));
214
215  RegisterOrConstant offset = __ regcon_sll_ptr(arg_slots, LogBytesPerWord, temp3_reg);
216
217#ifdef ASSERT
218  // Verify that [argslot..argslot+size) lies within (Gargs, FP).
219  __ add(argslot_reg, offset, temp2_reg);
220  verify_argslot(_masm, temp2_reg, temp_reg, "deleted argument(s) must fall within current frame");
221  if (arg_slots.is_register()) {
222    Label L_ok, L_bad;
223    __ cmp(arg_slots.as_register(), (int32_t) NULL_WORD);
224    __ br(Assembler::less, false, Assembler::pn, L_bad);
225    __ delayed()->nop();
226    __ btst(-stack_move_unit() - 1, arg_slots.as_register());
227    __ br(Assembler::zero, false, Assembler::pt, L_ok);
228    __ delayed()->nop();
229    __ bind(L_bad);
230    __ stop("assert arg_slots >= 0 and clear low bits");
231    __ bind(L_ok);
232  } else {
233    assert(arg_slots.as_constant() >= 0, "");
234    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
235  }
236#endif // ASSERT
237
238  // Pull up everything shallower than argslot.
239  // Then remove the excess space on the stack.
240  // The stacked return address gets pulled up with everything else.
241  // That is, copy [sp, argslot) upward by size words.  In pseudo-code:
242  //   for (temp = argslot-1; temp >= sp; --temp)
243  //     temp[size] = temp[0]
244  //   argslot += size;
245  //   sp += size;
246  __ sub(argslot_reg, wordSize, temp_reg);  // source pointer for copy
247  {
248    Label loop;
249    __ bind(loop);
250    // pull one word up each time through the loop
251    __ ld_ptr(Address(temp_reg, 0), temp2_reg);
252    __ st_ptr(temp2_reg, Address(temp_reg, offset));
253    __ sub(temp_reg, wordSize, temp_reg);
254    __ cmp(temp_reg, Gargs);
255    __ brx(Assembler::greaterEqual, false, Assembler::pt, loop);
256    __ delayed()->nop();  // FILLME
257  }
258
259  // Now move the argslot up, to point to the just-copied block.
260  __ add(Gargs, offset, Gargs);
261  // And adjust the argslot address to point at the deletion point.
262  __ add(argslot_reg, offset, argslot_reg);
263
264  // Keep the stack pointer 2*wordSize aligned.
265  const int TwoWordAlignmentMask = right_n_bits(LogBytesPerWord + 1);
266  RegisterOrConstant masked_offset = __ regcon_andn_ptr(offset, TwoWordAlignmentMask, temp_reg);
267  __ add(SP, masked_offset, SP);
268}
269
270
271#ifndef PRODUCT
272extern "C" void print_method_handle(oop mh);
273void trace_method_handle_stub(const char* adaptername,
274                              oop mh) {
275#if 0
276                              intptr_t* entry_sp,
277                              intptr_t* saved_sp,
278                              intptr_t* saved_bp) {
279  // called as a leaf from native code: do not block the JVM!
280  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
281  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
282  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
283         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
284  if (last_sp != saved_sp)
285    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
286#endif
287
288  printf("MH %s mh="INTPTR_FORMAT"\n", adaptername, (intptr_t) mh);
289  print_method_handle(mh);
290}
291#endif // PRODUCT
292
293// which conversion op types are implemented here?
294int MethodHandles::adapter_conversion_ops_supported_mask() {
295  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
296         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
297         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
298         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
299         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
300         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
301         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
302         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
303         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
304         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
305         );
306  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
307}
308
309//------------------------------------------------------------------------------
310// MethodHandles::generate_method_handle_stub
311//
312// Generate an "entry" field for a method handle.
313// This determines how the method handle will respond to calls.
314void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
315  // Here is the register state during an interpreted call,
316  // as set up by generate_method_handle_interpreter_entry():
317  // - G5: garbage temp (was MethodHandle.invoke methodOop, unused)
318  // - G3: receiver method handle
319  // - O5_savedSP: sender SP (must preserve)
320
321  Register O0_argslot = O0;
322  Register O1_scratch = O1;
323  Register O2_scratch = O2;
324  Register O3_scratch = O3;
325  Register G5_index   = G5;
326
327  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
328
329  // Some handy addresses:
330  Address G5_method_fie(    G5_method,        in_bytes(methodOopDesc::from_interpreted_offset()));
331
332  Address G3_mh_vmtarget(   G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes());
333
334  Address G3_dmh_vmindex(   G3_method_handle, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes());
335
336  Address G3_bmh_vmargslot( G3_method_handle, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes());
337  Address G3_bmh_argument(  G3_method_handle, sun_dyn_BoundMethodHandle::argument_offset_in_bytes());
338
339  Address G3_amh_vmargslot( G3_method_handle, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes());
340  Address G3_amh_argument ( G3_method_handle, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes());
341  Address G3_amh_conversion(G3_method_handle, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes());
342
343  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
344
345  if (have_entry(ek)) {
346    __ nop();  // empty stubs make SG sick
347    return;
348  }
349
350  address interp_entry = __ pc();
351  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
352
353#ifndef PRODUCT
354  if (TraceMethodHandles) {
355    // save: Gargs, O5_savedSP
356    __ save(SP, -16*wordSize, SP);
357    __ set((intptr_t) entry_name(ek), O0);
358    __ mov(G3_method_handle, O1);
359    __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, trace_method_handle_stub));
360    __ restore(SP, 16*wordSize, SP);
361  }
362#endif // PRODUCT
363
364  switch ((int) ek) {
365  case _raise_exception:
366    {
367      // Not a real MH entry, but rather shared code for raising an
368      // exception.  Extra local arguments are passed in scratch
369      // registers, as required type in O3, failing object (or NULL)
370      // in O2, failing bytecode type in O1.
371
372      __ mov(O5_savedSP, SP);  // Cut the stack back to where the caller started.
373
374      // Push arguments as if coming from the interpreter.
375      Register O0_scratch = O0_argslot;
376      int stackElementSize = Interpreter::stackElementSize;
377
378      // Make space on the stack for the arguments.
379      __ sub(SP,    4*stackElementSize, SP);
380      __ sub(Gargs, 3*stackElementSize, Gargs);
381      //__ sub(Lesp,  3*stackElementSize, Lesp);
382
383      // void raiseException(int code, Object actual, Object required)
384      __ st(    O1_scratch, Address(Gargs, 2*stackElementSize));  // code
385      __ st_ptr(O2_scratch, Address(Gargs, 1*stackElementSize));  // actual
386      __ st_ptr(O3_scratch, Address(Gargs, 0*stackElementSize));  // required
387
388      Label no_method;
389      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
390      __ set(AddressLiteral((address) &_raise_exception_method), G5_method);
391      __ ld_ptr(Address(G5_method, 0), G5_method);
392      __ tst(G5_method);
393      __ brx(Assembler::zero, false, Assembler::pn, no_method);
394      __ delayed()->nop();
395
396      int jobject_oop_offset = 0;
397      __ ld_ptr(Address(G5_method, jobject_oop_offset), G5_method);
398      __ tst(G5_method);
399      __ brx(Assembler::zero, false, Assembler::pn, no_method);
400      __ delayed()->nop();
401
402      __ verify_oop(G5_method);
403      __ jump_indirect_to(G5_method_fie, O1_scratch);
404      __ delayed()->nop();
405
406      // If we get here, the Java runtime did not do its job of creating the exception.
407      // Do something that is at least causes a valid throw from the interpreter.
408      __ bind(no_method);
409      __ unimplemented("_raise_exception no method");
410    }
411    break;
412
413  case _invokestatic_mh:
414  case _invokespecial_mh:
415    {
416      __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
417      __ verify_oop(G5_method);
418      // Same as TemplateTable::invokestatic or invokespecial,
419      // minus the CP setup and profiling:
420      if (ek == _invokespecial_mh) {
421        // Must load & check the first argument before entering the target method.
422        __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
423        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
424        __ null_check(G3_method_handle);
425        __ verify_oop(G3_method_handle);
426      }
427      __ jump_indirect_to(G5_method_fie, O1_scratch);
428      __ delayed()->nop();
429    }
430    break;
431
432  case _invokevirtual_mh:
433    {
434      // Same as TemplateTable::invokevirtual,
435      // minus the CP setup and profiling:
436
437      // Pick out the vtable index and receiver offset from the MH,
438      // and then we can discard it:
439      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
440      __ ldsw(G3_dmh_vmindex, G5_index);
441      // Note:  The verifier allows us to ignore G3_mh_vmtarget.
442      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
443      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
444
445      // Get receiver klass:
446      Register O0_klass = O0_argslot;
447      __ load_klass(G3_method_handle, O0_klass);
448      __ verify_oop(O0_klass);
449
450      // Get target methodOop & entry point:
451      const int base = instanceKlass::vtable_start_offset() * wordSize;
452      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
453
454      __ sll_ptr(G5_index, LogBytesPerWord, G5_index);
455      __ add(O0_klass, G5_index, O0_klass);
456      Address vtable_entry_addr(O0_klass, base + vtableEntry::method_offset_in_bytes());
457      __ ld_ptr(vtable_entry_addr, G5_method);
458
459      __ verify_oop(G5_method);
460      __ jump_indirect_to(G5_method_fie, O1_scratch);
461      __ delayed()->nop();
462    }
463    break;
464
465  case _invokeinterface_mh:
466    {
467      // Same as TemplateTable::invokeinterface,
468      // minus the CP setup and profiling:
469      __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
470      Register O1_intf  = O1_scratch;
471      __ ld_ptr(G3_mh_vmtarget, O1_intf);
472      __ ldsw(G3_dmh_vmindex, G5_index);
473      __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
474      __ null_check(G3_method_handle, oopDesc::klass_offset_in_bytes());
475
476      // Get receiver klass:
477      Register O0_klass = O0_argslot;
478      __ load_klass(G3_method_handle, O0_klass);
479      __ verify_oop(O0_klass);
480
481      // Get interface:
482      Label no_such_interface;
483      __ verify_oop(O1_intf);
484      __ lookup_interface_method(O0_klass, O1_intf,
485                                 // Note: next two args must be the same:
486                                 G5_index, G5_method,
487                                 O2_scratch,
488                                 O3_scratch,
489                                 no_such_interface);
490
491      __ verify_oop(G5_method);
492      __ jump_indirect_to(G5_method_fie, O1_scratch);
493      __ delayed()->nop();
494
495      __ bind(no_such_interface);
496      // Throw an exception.
497      // For historical reasons, it will be IncompatibleClassChangeError.
498      __ unimplemented("not tested yet");
499      __ ld_ptr(Address(O1_intf, java_mirror_offset), O3_scratch);  // required interface
500      __ mov(O0_klass, O2_scratch);  // bad receiver
501      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
502      __ delayed()->mov(Bytecodes::_invokeinterface, O1_scratch);  // who is complaining?
503    }
504    break;
505
506  case _bound_ref_mh:
507  case _bound_int_mh:
508  case _bound_long_mh:
509  case _bound_ref_direct_mh:
510  case _bound_int_direct_mh:
511  case _bound_long_direct_mh:
512    {
513      const bool direct_to_method = (ek >= _bound_ref_direct_mh);
514      BasicType arg_type  = T_ILLEGAL;
515      int       arg_mask  = _INSERT_NO_MASK;
516      int       arg_slots = -1;
517      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
518
519      // Make room for the new argument:
520      __ ldsw(G3_bmh_vmargslot, O0_argslot);
521      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
522
523      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, O0_argslot, O1_scratch, O2_scratch, G5_index);
524
525      // Store bound argument into the new stack slot:
526      __ ld_ptr(G3_bmh_argument, O1_scratch);
527      if (arg_type == T_OBJECT) {
528        __ st_ptr(O1_scratch, Address(O0_argslot, 0));
529      } else {
530        Address prim_value_addr(O1_scratch, java_lang_boxing_object::value_offset_in_bytes(arg_type));
531        __ load_sized_value(prim_value_addr, O2_scratch, type2aelembytes(arg_type), is_signed_subword_type(arg_type));
532        if (arg_slots == 2) {
533          __ unimplemented("not yet tested");
534#ifndef _LP64
535          __ signx(O2_scratch, O3_scratch);  // Sign extend
536#endif
537          __ st_long(O2_scratch, Address(O0_argslot, 0));  // Uses O2/O3 on !_LP64
538        } else {
539          __ st_ptr( O2_scratch, Address(O0_argslot, 0));
540        }
541      }
542
543      if (direct_to_method) {
544        __ ld_ptr(G3_mh_vmtarget, G5_method);  // target is a methodOop
545        __ verify_oop(G5_method);
546        __ jump_indirect_to(G5_method_fie, O1_scratch);
547        __ delayed()->nop();
548      } else {
549        __ ld_ptr(G3_mh_vmtarget, G3_method_handle);  // target is a methodOop
550        __ verify_oop(G3_method_handle);
551        __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
552      }
553    }
554    break;
555
556  case _adapter_retype_only:
557  case _adapter_retype_raw:
558    // Immediately jump to the next MH layer:
559    __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
560    __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
561    // This is OK when all parameter types widen.
562    // It is also OK when a return type narrows.
563    break;
564
565  case _adapter_check_cast:
566    {
567      // Temps:
568      Register G5_klass = G5_index;  // Interesting AMH data.
569
570      // Check a reference argument before jumping to the next layer of MH:
571      __ ldsw(G3_amh_vmargslot, O0_argslot);
572      Address vmarg = __ argument_address(O0_argslot);
573
574      // What class are we casting to?
575      __ ld_ptr(G3_amh_argument, G5_klass);  // This is a Class object!
576      __ ld_ptr(Address(G5_klass, java_lang_Class::klass_offset_in_bytes()), G5_klass);
577
578      Label done;
579      __ ld_ptr(vmarg, O1_scratch);
580      __ tst(O1_scratch);
581      __ brx(Assembler::zero, false, Assembler::pn, done);  // No cast if null.
582      __ delayed()->nop();
583      __ load_klass(O1_scratch, O1_scratch);
584
585      // Live at this point:
586      // - G5_klass        :  klass required by the target method
587      // - O1_scratch      :  argument klass to test
588      // - G3_method_handle:  adapter method handle
589      __ check_klass_subtype(O1_scratch, G5_klass, O0_argslot, O2_scratch, done);
590
591      // If we get here, the type check failed!
592      __ ldsw(G3_amh_vmargslot, O0_argslot);  // reload argslot field
593      __ ld_ptr(G3_amh_argument, O3_scratch);  // required class
594      __ ld_ptr(vmarg, O2_scratch);  // bad object
595      __ jump_to(AddressLiteral(from_interpreted_entry(_raise_exception)), O0_argslot);
596      __ delayed()->mov(Bytecodes::_checkcast, O1_scratch);  // who is complaining?
597
598      __ bind(done);
599      // Get the new MH:
600      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
601      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
602    }
603    break;
604
605  case _adapter_prim_to_prim:
606  case _adapter_ref_to_prim:
607    // Handled completely by optimized cases.
608    __ stop("init_AdapterMethodHandle should not issue this");
609    break;
610
611  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
612//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
613  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
614  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
615    {
616      // Perform an in-place conversion to int or an int subword.
617      __ ldsw(G3_amh_vmargslot, O0_argslot);
618      Address vmarg = __ argument_address(O0_argslot);
619      Address value;
620      bool value_left_justified = false;
621
622      switch (ek) {
623      case _adapter_opt_i2i:
624      case _adapter_opt_l2i:
625        __ unimplemented(entry_name(ek));
626        value = vmarg;
627        break;
628      case _adapter_opt_unboxi:
629        {
630          // Load the value up from the heap.
631          __ ld_ptr(vmarg, O1_scratch);
632          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
633#ifdef ASSERT
634          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
635            if (is_subword_type(BasicType(bt)))
636              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
637          }
638#endif
639          __ null_check(O1_scratch, value_offset);
640          value = Address(O1_scratch, value_offset);
641#ifdef _BIG_ENDIAN
642          // Values stored in objects are packed.
643          value_left_justified = true;
644#endif
645        }
646        break;
647      default:
648        ShouldNotReachHere();
649      }
650
651      // This check is required on _BIG_ENDIAN
652      Register G5_vminfo = G5_index;
653      __ ldsw(G3_amh_conversion, G5_vminfo);
654      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
655
656      // Original 32-bit vmdata word must be of this form:
657      // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
658      __ lduw(value, O1_scratch);
659      if (!value_left_justified)
660        __ sll(O1_scratch, G5_vminfo, O1_scratch);
661      Label zero_extend, done;
662      __ btst(CONV_VMINFO_SIGN_FLAG, G5_vminfo);
663      __ br(Assembler::zero, false, Assembler::pn, zero_extend);
664      __ delayed()->nop();
665
666      // this path is taken for int->byte, int->short
667      __ sra(O1_scratch, G5_vminfo, O1_scratch);
668      __ ba(false, done);
669      __ delayed()->nop();
670
671      __ bind(zero_extend);
672      // this is taken for int->char
673      __ srl(O1_scratch, G5_vminfo, O1_scratch);
674
675      __ bind(done);
676      __ st(O1_scratch, vmarg);
677
678      // Get the new MH:
679      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
680      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
681    }
682    break;
683
684  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
685  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
686    {
687      // Perform an in-place int-to-long or ref-to-long conversion.
688      __ ldsw(G3_amh_vmargslot, O0_argslot);
689
690      // On big-endian machine we duplicate the slot and store the MSW
691      // in the first slot.
692      __ add(Gargs, __ argument_offset(O0_argslot, 1), O0_argslot);
693
694      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK, O0_argslot, O1_scratch, O2_scratch, G5_index);
695
696      Address arg_lsw(O0_argslot, 0);
697      Address arg_msw(O0_argslot, -Interpreter::stackElementSize);
698
699      switch (ek) {
700      case _adapter_opt_i2l:
701        {
702          __ ldsw(arg_lsw, O2_scratch);      // Load LSW
703#ifndef _LP64
704          __ signx(O2_scratch, O3_scratch);  // Sign extend
705#endif
706          __ st_long(O2_scratch, arg_msw);   // Uses O2/O3 on !_LP64
707        }
708        break;
709      case _adapter_opt_unboxl:
710        {
711          // Load the value up from the heap.
712          __ ld_ptr(arg_lsw, O1_scratch);
713          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
714          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
715          __ null_check(O1_scratch, value_offset);
716          __ ld_long(Address(O1_scratch, value_offset), O2_scratch);  // Uses O2/O3 on !_LP64
717          __ st_long(O2_scratch, arg_msw);
718        }
719        break;
720      default:
721        ShouldNotReachHere();
722      }
723
724      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
725      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
726    }
727    break;
728
729  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
730  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
731    {
732      // perform an in-place floating primitive conversion
733      __ unimplemented(entry_name(ek));
734    }
735    break;
736
737  case _adapter_prim_to_ref:
738    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
739    break;
740
741  case _adapter_swap_args:
742  case _adapter_rot_args:
743    // handled completely by optimized cases
744    __ stop("init_AdapterMethodHandle should not issue this");
745    break;
746
747  case _adapter_opt_swap_1:
748  case _adapter_opt_swap_2:
749  case _adapter_opt_rot_1_up:
750  case _adapter_opt_rot_1_down:
751  case _adapter_opt_rot_2_up:
752  case _adapter_opt_rot_2_down:
753    {
754      int swap_bytes = 0, rotate = 0;
755      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
756
757      // 'argslot' is the position of the first argument to swap.
758      __ ldsw(G3_amh_vmargslot, O0_argslot);
759      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
760
761      // 'vminfo' is the second.
762      Register O1_destslot = O1_scratch;
763      __ ldsw(G3_amh_conversion, O1_destslot);
764      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
765      __ and3(O1_destslot, CONV_VMINFO_MASK, O1_destslot);
766      __ add(Gargs, __ argument_offset(O1_destslot), O1_destslot);
767
768      if (!rotate) {
769        for (int i = 0; i < swap_bytes; i += wordSize) {
770          __ ld_ptr(Address(O0_argslot,  i), O2_scratch);
771          __ ld_ptr(Address(O1_destslot, i), O3_scratch);
772          __ st_ptr(O3_scratch, Address(O0_argslot,  i));
773          __ st_ptr(O2_scratch, Address(O1_destslot, i));
774        }
775      } else {
776        // Save the first chunk, which is going to get overwritten.
777        switch (swap_bytes) {
778        case 4 : __ lduw(Address(O0_argslot, 0), O2_scratch); break;
779        case 16: __ ldx( Address(O0_argslot, 8), O3_scratch); //fall-thru
780        case 8 : __ ldx( Address(O0_argslot, 0), O2_scratch); break;
781        default: ShouldNotReachHere();
782        }
783
784        if (rotate > 0) {
785          // Rorate upward.
786          __ sub(O0_argslot, swap_bytes, O0_argslot);
787#if ASSERT
788          {
789            // Verify that argslot > destslot, by at least swap_bytes.
790            Label L_ok;
791            __ cmp(O0_argslot, O1_destslot);
792            __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, L_ok);
793            __ delayed()->nop();
794            __ stop("source must be above destination (upward rotation)");
795            __ bind(L_ok);
796          }
797#endif
798          // Work argslot down to destslot, copying contiguous data upwards.
799          // Pseudo-code:
800          //   argslot  = src_addr - swap_bytes
801          //   destslot = dest_addr
802          //   while (argslot >= destslot) {
803          //     *(argslot + swap_bytes) = *(argslot + 0);
804          //     argslot--;
805          //   }
806          Label loop;
807          __ bind(loop);
808          __ ld_ptr(Address(O0_argslot, 0), G5_index);
809          __ st_ptr(G5_index, Address(O0_argslot, swap_bytes));
810          __ sub(O0_argslot, wordSize, O0_argslot);
811          __ cmp(O0_argslot, O1_destslot);
812          __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, loop);
813          __ delayed()->nop();  // FILLME
814        } else {
815          __ add(O0_argslot, swap_bytes, O0_argslot);
816#if ASSERT
817          {
818            // Verify that argslot < destslot, by at least swap_bytes.
819            Label L_ok;
820            __ cmp(O0_argslot, O1_destslot);
821            __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, L_ok);
822            __ delayed()->nop();
823            __ stop("source must be above destination (upward rotation)");
824            __ bind(L_ok);
825          }
826#endif
827          // Work argslot up to destslot, copying contiguous data downwards.
828          // Pseudo-code:
829          //   argslot  = src_addr + swap_bytes
830          //   destslot = dest_addr
831          //   while (argslot >= destslot) {
832          //     *(argslot - swap_bytes) = *(argslot + 0);
833          //     argslot++;
834          //   }
835          Label loop;
836          __ bind(loop);
837          __ ld_ptr(Address(O0_argslot, 0), G5_index);
838          __ st_ptr(G5_index, Address(O0_argslot, -swap_bytes));
839          __ add(O0_argslot, wordSize, O0_argslot);
840          __ cmp(O0_argslot, O1_destslot);
841          __ brx(Assembler::lessEqualUnsigned, false, Assembler::pt, loop);
842          __ delayed()->nop();  // FILLME
843        }
844
845        // Store the original first chunk into the destination slot, now free.
846        switch (swap_bytes) {
847        case 4 : __ stw(O2_scratch, Address(O1_destslot, 0)); break;
848        case 16: __ stx(O3_scratch, Address(O1_destslot, 8)); // fall-thru
849        case 8 : __ stx(O2_scratch, Address(O1_destslot, 0)); break;
850        default: ShouldNotReachHere();
851        }
852      }
853
854      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
855      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
856    }
857    break;
858
859  case _adapter_dup_args:
860    {
861      // 'argslot' is the position of the first argument to duplicate.
862      __ ldsw(G3_amh_vmargslot, O0_argslot);
863      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
864
865      // 'stack_move' is negative number of words to duplicate.
866      Register G5_stack_move = G5_index;
867      __ ldsw(G3_amh_conversion, G5_stack_move);
868      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
869
870      // Remember the old Gargs (argslot[0]).
871      Register O1_oldarg = O1_scratch;
872      __ mov(Gargs, O1_oldarg);
873
874      // Move Gargs down to make room for dups.
875      __ sll_ptr(G5_stack_move, LogBytesPerWord, G5_stack_move);
876      __ add(Gargs, G5_stack_move, Gargs);
877
878      // Compute the new Gargs (argslot[0]).
879      Register O2_newarg = O2_scratch;
880      __ mov(Gargs, O2_newarg);
881
882      // Copy from oldarg[0...] down to newarg[0...]
883      // Pseude-code:
884      //   O1_oldarg  = old-Gargs
885      //   O2_newarg  = new-Gargs
886      //   O0_argslot = argslot
887      //   while (O2_newarg < O1_oldarg) *O2_newarg = *O0_argslot++
888      Label loop;
889      __ bind(loop);
890      __ ld_ptr(Address(O0_argslot, 0), O3_scratch);
891      __ st_ptr(O3_scratch, Address(O2_newarg, 0));
892      __ add(O0_argslot, wordSize, O0_argslot);
893      __ add(O2_newarg,  wordSize, O2_newarg);
894      __ cmp(O2_newarg, O1_oldarg);
895      __ brx(Assembler::less, false, Assembler::pt, loop);
896      __ delayed()->nop();  // FILLME
897
898      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
899      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
900    }
901    break;
902
903  case _adapter_drop_args:
904    {
905      // 'argslot' is the position of the first argument to nuke.
906      __ ldsw(G3_amh_vmargslot, O0_argslot);
907      __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
908
909      // 'stack_move' is number of words to drop.
910      Register G5_stack_move = G5_index;
911      __ ldsw(G3_amh_conversion, G5_stack_move);
912      __ sra(G5_stack_move, CONV_STACK_MOVE_SHIFT, G5_stack_move);
913
914      remove_arg_slots(_masm, G5_stack_move, O0_argslot, O1_scratch, O2_scratch, O3_scratch);
915
916      __ ld_ptr(G3_mh_vmtarget, G3_method_handle);
917      __ jump_to_method_handle_entry(G3_method_handle, O1_scratch);
918    }
919    break;
920
921  case _adapter_collect_args:
922    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
923    break;
924
925  case _adapter_spread_args:
926    // Handled completely by optimized cases.
927    __ stop("init_AdapterMethodHandle should not issue this");
928    break;
929
930  case _adapter_opt_spread_0:
931  case _adapter_opt_spread_1:
932  case _adapter_opt_spread_more:
933    {
934      // spread an array out into a group of arguments
935      __ unimplemented(entry_name(ek));
936    }
937    break;
938
939  case _adapter_flyby:
940  case _adapter_ricochet:
941    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
942    break;
943
944  default:
945    ShouldNotReachHere();
946  }
947
948  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
949  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
950
951  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
952}
953