methodHandles_x86.cpp revision 1787:b6aedd1acdc0
1/*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "incls/_precompiled.incl"
26#include "incls/_methodHandles_x86.cpp.incl"
27
28#define __ _masm->
29
30#ifdef PRODUCT
31#define BLOCK_COMMENT(str) /* nothing */
32#else
33#define BLOCK_COMMENT(str) __ block_comment(str)
34#endif
35
36#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
37
38address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
39                                                address interpreted_entry) {
40  // Just before the actual machine code entry point, allocate space
41  // for a MethodHandleEntry::Data record, so that we can manage everything
42  // from one base pointer.
43  __ align(wordSize);
44  address target = __ pc() + sizeof(Data);
45  while (__ pc() < target) {
46    __ nop();
47    __ align(wordSize);
48  }
49
50  MethodHandleEntry* me = (MethodHandleEntry*) __ pc();
51  me->set_end_address(__ pc());         // set a temporary end_address
52  me->set_from_interpreted_entry(interpreted_entry);
53  me->set_type_checking_entry(NULL);
54
55  return (address) me;
56}
57
58MethodHandleEntry* MethodHandleEntry::finish_compiled_entry(MacroAssembler* _masm,
59                                                address start_addr) {
60  MethodHandleEntry* me = (MethodHandleEntry*) start_addr;
61  assert(me->end_address() == start_addr, "valid ME");
62
63  // Fill in the real end_address:
64  __ align(wordSize);
65  me->set_end_address(__ pc());
66
67  return me;
68}
69
70#ifdef ASSERT
71static void verify_argslot(MacroAssembler* _masm, Register argslot_reg,
72                           const char* error_message) {
73  // Verify that argslot lies within (rsp, rbp].
74  Label L_ok, L_bad;
75  BLOCK_COMMENT("{ verify_argslot");
76  __ cmpptr(argslot_reg, rbp);
77  __ jccb(Assembler::above, L_bad);
78  __ cmpptr(rsp, argslot_reg);
79  __ jccb(Assembler::below, L_ok);
80  __ bind(L_bad);
81  __ stop(error_message);
82  __ bind(L_ok);
83  BLOCK_COMMENT("} verify_argslot");
84}
85#endif
86
87
88// Code generation
89address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm) {
90  // rbx: methodOop
91  // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
92  // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
93  // rdx, rdi: garbage temp, blown away
94
95  Register rbx_method = rbx;
96  Register rcx_recv   = rcx;
97  Register rax_mtype  = rax;
98  Register rdx_temp   = rdx;
99  Register rdi_temp   = rdi;
100
101  // emit WrongMethodType path first, to enable jccb back-branch from main path
102  Label wrong_method_type;
103  __ bind(wrong_method_type);
104  Label invoke_generic_slow_path;
105  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
106  __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
107  __ jcc(Assembler::notEqual, invoke_generic_slow_path);
108  __ push(rax_mtype);       // required mtype
109  __ push(rcx_recv);        // bad mh (1st stacked argument)
110  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
111
112  // here's where control starts out:
113  __ align(CodeEntryAlignment);
114  address entry_point = __ pc();
115
116  // fetch the MethodType from the method handle into rax (the 'check' register)
117  {
118    Register tem = rbx_method;
119    for (jint* pchase = methodOopDesc::method_type_offsets_chain(); (*pchase) != -1; pchase++) {
120      __ movptr(rax_mtype, Address(tem, *pchase));
121      tem = rax_mtype;          // in case there is another indirection
122    }
123  }
124
125  // given the MethodType, find out where the MH argument is buried
126  __ movptr(rdx_temp, Address(rax_mtype,
127                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
128  Register rdx_vmslots = rdx_temp;
129  __ movl(rdx_vmslots, Address(rdx_temp,
130                               __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
131  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
132
133  trace_method_handle(_masm, "invokeExact");
134
135  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
136  __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
137
138  // for invokeGeneric (only), apply argument and result conversions on the fly
139  __ bind(invoke_generic_slow_path);
140#ifdef ASSERT
141  { Label L;
142    __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
143    __ jcc(Assembler::equal, L);
144    __ stop("bad methodOop::intrinsic_id");
145    __ bind(L);
146  }
147#endif //ASSERT
148  Register rbx_temp = rbx_method;  // don't need it now
149
150  // make room on the stack for another pointer:
151  Register rcx_argslot = rcx_recv;
152  __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
153  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
154                   rcx_argslot, rbx_temp, rdx_temp);
155
156  // load up an adapter from the calling type (Java weaves this)
157  __ movptr(rdx_temp, Address(rax_mtype,
158                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
159  Register rdx_adapter = rdx_temp;
160  // movptr(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
161  // deal with old JDK versions:
162  __ lea(rdi_temp, Address(rdx_temp,
163                           __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
164  __ cmpptr(rdi_temp, rdx_temp);
165  Label sorry_no_invoke_generic;
166  __ jccb(Assembler::below, sorry_no_invoke_generic);
167
168  __ movptr(rdx_adapter, Address(rdi_temp, 0));
169  __ testptr(rdx_adapter, rdx_adapter);
170  __ jccb(Assembler::zero, sorry_no_invoke_generic);
171  __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
172  // As a trusted first argument, pass the type being called, so the adapter knows
173  // the actual types of the arguments and return values.
174  // (Generic invokers are shared among form-families of method-type.)
175  __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
176  // FIXME: assert that rdx_adapter is of the right method-type.
177  __ mov(rcx, rdx_adapter);
178  trace_method_handle(_masm, "invokeGeneric");
179  __ jump_to_method_handle_entry(rcx, rdi_temp);
180
181  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
182  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
183  __ push(rax_mtype);       // required mtype
184  __ push(rcx_recv);        // bad mh (1st stacked argument)
185  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
186
187  return entry_point;
188}
189
190// Helper to insert argument slots into the stack.
191// arg_slots must be a multiple of stack_move_unit() and <= 0
192void MethodHandles::insert_arg_slots(MacroAssembler* _masm,
193                                     RegisterOrConstant arg_slots,
194                                     int arg_mask,
195                                     Register rax_argslot,
196                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
197  assert(temp3_reg == noreg, "temp3 not required");
198  assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
199                             (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
200
201#ifdef ASSERT
202  verify_argslot(_masm, rax_argslot, "insertion point must fall within current frame");
203  if (arg_slots.is_register()) {
204    Label L_ok, L_bad;
205    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
206    __ jccb(Assembler::greater, L_bad);
207    __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
208    __ jccb(Assembler::zero, L_ok);
209    __ bind(L_bad);
210    __ stop("assert arg_slots <= 0 and clear low bits");
211    __ bind(L_ok);
212  } else {
213    assert(arg_slots.as_constant() <= 0, "");
214    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
215  }
216#endif //ASSERT
217
218#ifdef _LP64
219  if (arg_slots.is_register()) {
220    // clean high bits of stack motion register (was loaded as an int)
221    __ movslq(arg_slots.as_register(), arg_slots.as_register());
222  }
223#endif
224
225  // Make space on the stack for the inserted argument(s).
226  // Then pull down everything shallower than rax_argslot.
227  // The stacked return address gets pulled down with everything else.
228  // That is, copy [rsp, argslot) downward by -size words.  In pseudo-code:
229  //   rsp -= size;
230  //   for (rdx = rsp + size; rdx < argslot; rdx++)
231  //     rdx[-size] = rdx[0]
232  //   argslot -= size;
233  BLOCK_COMMENT("insert_arg_slots {");
234  __ mov(rdx_temp, rsp);                        // source pointer for copy
235  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
236  {
237    Label loop;
238    __ BIND(loop);
239    // pull one word down each time through the loop
240    __ movptr(rbx_temp, Address(rdx_temp, 0));
241    __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
242    __ addptr(rdx_temp, wordSize);
243    __ cmpptr(rdx_temp, rax_argslot);
244    __ jccb(Assembler::less, loop);
245  }
246
247  // Now move the argslot down, to point to the opened-up space.
248  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
249  BLOCK_COMMENT("} insert_arg_slots");
250}
251
252// Helper to remove argument slots from the stack.
253// arg_slots must be a multiple of stack_move_unit() and >= 0
254void MethodHandles::remove_arg_slots(MacroAssembler* _masm,
255                                    RegisterOrConstant arg_slots,
256                                    Register rax_argslot,
257                                     Register rbx_temp, Register rdx_temp, Register temp3_reg) {
258  assert(temp3_reg == noreg, "temp3 not required");
259  assert_different_registers(rax_argslot, rbx_temp, rdx_temp,
260                             (!arg_slots.is_register() ? rsp : arg_slots.as_register()));
261
262#ifdef ASSERT
263  // Verify that [argslot..argslot+size) lies within (rsp, rbp).
264  __ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
265  verify_argslot(_masm, rbx_temp, "deleted argument(s) must fall within current frame");
266  if (arg_slots.is_register()) {
267    Label L_ok, L_bad;
268    __ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
269    __ jccb(Assembler::less, L_bad);
270    __ testl(arg_slots.as_register(), -stack_move_unit() - 1);
271    __ jccb(Assembler::zero, L_ok);
272    __ bind(L_bad);
273    __ stop("assert arg_slots >= 0 and clear low bits");
274    __ bind(L_ok);
275  } else {
276    assert(arg_slots.as_constant() >= 0, "");
277    assert(arg_slots.as_constant() % -stack_move_unit() == 0, "");
278  }
279#endif //ASSERT
280
281#ifdef _LP64
282  if (false) {                  // not needed, since register is positive
283    // clean high bits of stack motion register (was loaded as an int)
284    if (arg_slots.is_register())
285      __ movslq(arg_slots.as_register(), arg_slots.as_register());
286  }
287#endif
288
289  BLOCK_COMMENT("remove_arg_slots {");
290  // Pull up everything shallower than rax_argslot.
291  // Then remove the excess space on the stack.
292  // The stacked return address gets pulled up with everything else.
293  // That is, copy [rsp, argslot) upward by size words.  In pseudo-code:
294  //   for (rdx = argslot-1; rdx >= rsp; --rdx)
295  //     rdx[size] = rdx[0]
296  //   argslot += size;
297  //   rsp += size;
298  __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
299  {
300    Label loop;
301    __ BIND(loop);
302    // pull one word up each time through the loop
303    __ movptr(rbx_temp, Address(rdx_temp, 0));
304    __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
305    __ addptr(rdx_temp, -wordSize);
306    __ cmpptr(rdx_temp, rsp);
307    __ jccb(Assembler::greaterEqual, loop);
308  }
309
310  // Now move the argslot up, to point to the just-copied block.
311  __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
312  // And adjust the argslot address to point at the deletion point.
313  __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
314  BLOCK_COMMENT("} remove_arg_slots");
315}
316
317#ifndef PRODUCT
318extern "C" void print_method_handle(oop mh);
319void trace_method_handle_stub(const char* adaptername,
320                              oop mh,
321                              intptr_t* saved_regs,
322                              intptr_t* entry_sp,
323                              intptr_t* saved_sp,
324                              intptr_t* saved_bp) {
325  // called as a leaf from native code: do not block the JVM!
326  intptr_t* last_sp = (intptr_t*) saved_bp[frame::interpreter_frame_last_sp_offset];
327  intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
328  printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
329         adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
330  if (last_sp != saved_sp && last_sp != NULL)
331    printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
332  if (Verbose) {
333    printf(" reg dump: ");
334    int saved_regs_count = (entry_sp-1) - saved_regs;
335    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
336    int i;
337    for (i = 0; i <= saved_regs_count; i++) {
338      if (i > 0 && i % 4 == 0 && i != saved_regs_count)
339        printf("\n   + dump: ");
340      printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
341    }
342    printf("\n");
343    int stack_dump_count = 16;
344    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
345      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
346    if (stack_dump_count > 64)  stack_dump_count = 48;
347    for (i = 0; i < stack_dump_count; i += 4) {
348      printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
349             i, (intptr_t)&entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
350    }
351    print_method_handle(mh);
352  }
353}
354void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
355  if (!TraceMethodHandles)  return;
356  BLOCK_COMMENT("trace_method_handle {");
357  __ push(rax);
358  __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
359  __ pusha();
360  // arguments:
361  __ push(rbp);               // interpreter frame pointer
362  __ push(rsi);               // saved_sp
363  __ push(rax);               // entry_sp
364  __ push(rcx);               // mh
365  __ push(rcx);
366  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
367  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
368  __ popa();
369  __ pop(rax);
370  BLOCK_COMMENT("} trace_method_handle");
371}
372#endif //PRODUCT
373
374// which conversion op types are implemented here?
375int MethodHandles::adapter_conversion_ops_supported_mask() {
376  return ((1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY)
377         |(1<<sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW)
378         |(1<<sun_dyn_AdapterMethodHandle::OP_CHECK_CAST)
379         |(1<<sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM)
380         |(1<<sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM)
381         |(1<<sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS)
382         |(1<<sun_dyn_AdapterMethodHandle::OP_ROT_ARGS)
383         |(1<<sun_dyn_AdapterMethodHandle::OP_DUP_ARGS)
384         |(1<<sun_dyn_AdapterMethodHandle::OP_DROP_ARGS)
385         //|(1<<sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
386         );
387  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
388}
389
390// Generate an "entry" field for a method handle.
391// This determines how the method handle will respond to calls.
392void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
393  // Here is the register state during an interpreted call,
394  // as set up by generate_method_handle_interpreter_entry():
395  // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
396  // - rcx: receiver method handle
397  // - rax: method handle type (only used by the check_mtype entry point)
398  // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
399  // - rdx: garbage temp, can blow away
400
401  Register rcx_recv    = rcx;
402  Register rax_argslot = rax;
403  Register rbx_temp    = rbx;
404  Register rdx_temp    = rdx;
405
406  // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
407  // and gen_c2i_adapter (from compiled calls):
408  Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);
409
410  guarantee(java_dyn_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");
411
412  // some handy addresses
413  Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
414
415  Address rcx_mh_vmtarget(    rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() );
416  Address rcx_dmh_vmindex(    rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() );
417
418  Address rcx_bmh_vmargslot(  rcx_recv, sun_dyn_BoundMethodHandle::vmargslot_offset_in_bytes() );
419  Address rcx_bmh_argument(   rcx_recv, sun_dyn_BoundMethodHandle::argument_offset_in_bytes() );
420
421  Address rcx_amh_vmargslot(  rcx_recv, sun_dyn_AdapterMethodHandle::vmargslot_offset_in_bytes() );
422  Address rcx_amh_argument(   rcx_recv, sun_dyn_AdapterMethodHandle::argument_offset_in_bytes() );
423  Address rcx_amh_conversion( rcx_recv, sun_dyn_AdapterMethodHandle::conversion_offset_in_bytes() );
424  Address vmarg;                // __ argument_address(vmargslot)
425
426  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
427
428  if (have_entry(ek)) {
429    __ nop();                   // empty stubs make SG sick
430    return;
431  }
432
433  address interp_entry = __ pc();
434  if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
435
436  trace_method_handle(_masm, entry_name(ek));
437
438  BLOCK_COMMENT(entry_name(ek));
439
440  switch ((int) ek) {
441  case _raise_exception:
442    {
443      // Not a real MH entry, but rather shared code for raising an exception.
444      // Extra local arguments are pushed on stack, as required type at TOS+8,
445      // failing object (or NULL) at TOS+4, failing bytecode type at TOS.
446      // Beyond those local arguments are the PC, of course.
447      Register rdx_code = rdx_temp;
448      Register rcx_fail = rcx_recv;
449      Register rax_want = rax_argslot;
450      Register rdi_pc   = rdi;
451      __ pop(rdx_code);  // TOS+0
452      __ pop(rcx_fail);  // TOS+4
453      __ pop(rax_want);  // TOS+8
454      __ pop(rdi_pc);    // caller PC
455
456      __ mov(rsp, rsi);   // cut the stack back to where the caller started
457
458      // Repush the arguments as if coming from the interpreter.
459      __ push(rdx_code);
460      __ push(rcx_fail);
461      __ push(rax_want);
462
463      Register rbx_method = rbx_temp;
464      Label no_method;
465      // FIXME: fill in _raise_exception_method with a suitable sun.dyn method
466      __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
467      __ testptr(rbx_method, rbx_method);
468      __ jccb(Assembler::zero, no_method);
469      int jobject_oop_offset = 0;
470      __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
471      __ testptr(rbx_method, rbx_method);
472      __ jccb(Assembler::zero, no_method);
473      __ verify_oop(rbx_method);
474      __ push(rdi_pc);          // and restore caller PC
475      __ jmp(rbx_method_fie);
476
477      // If we get here, the Java runtime did not do its job of creating the exception.
478      // Do something that is at least causes a valid throw from the interpreter.
479      __ bind(no_method);
480      __ pop(rax_want);
481      __ pop(rcx_fail);
482      __ push(rax_want);
483      __ push(rcx_fail);
484      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
485    }
486    break;
487
488  case _invokestatic_mh:
489  case _invokespecial_mh:
490    {
491      Register rbx_method = rbx_temp;
492      __ movptr(rbx_method, rcx_mh_vmtarget); // target is a methodOop
493      __ verify_oop(rbx_method);
494      // same as TemplateTable::invokestatic or invokespecial,
495      // minus the CP setup and profiling:
496      if (ek == _invokespecial_mh) {
497        // Must load & check the first argument before entering the target method.
498        __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
499        __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
500        __ null_check(rcx_recv);
501        __ verify_oop(rcx_recv);
502      }
503      __ jmp(rbx_method_fie);
504    }
505    break;
506
507  case _invokevirtual_mh:
508    {
509      // same as TemplateTable::invokevirtual,
510      // minus the CP setup and profiling:
511
512      // pick out the vtable index and receiver offset from the MH,
513      // and then we can discard it:
514      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
515      Register rbx_index = rbx_temp;
516      __ movl(rbx_index, rcx_dmh_vmindex);
517      // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
518      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
519      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
520
521      // get receiver klass
522      Register rax_klass = rax_argslot;
523      __ load_klass(rax_klass, rcx_recv);
524      __ verify_oop(rax_klass);
525
526      // get target methodOop & entry point
527      const int base = instanceKlass::vtable_start_offset() * wordSize;
528      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
529      Address vtable_entry_addr(rax_klass,
530                                rbx_index, Address::times_ptr,
531                                base + vtableEntry::method_offset_in_bytes());
532      Register rbx_method = rbx_temp;
533      __ movptr(rbx_method, vtable_entry_addr);
534
535      __ verify_oop(rbx_method);
536      __ jmp(rbx_method_fie);
537    }
538    break;
539
540  case _invokeinterface_mh:
541    {
542      // same as TemplateTable::invokeinterface,
543      // minus the CP setup and profiling:
544
545      // pick out the interface and itable index from the MH.
546      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
547      Register rdx_intf  = rdx_temp;
548      Register rbx_index = rbx_temp;
549      __ movptr(rdx_intf,  rcx_mh_vmtarget);
550      __ movl(rbx_index,   rcx_dmh_vmindex);
551      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
552      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());
553
554      // get receiver klass
555      Register rax_klass = rax_argslot;
556      __ load_klass(rax_klass, rcx_recv);
557      __ verify_oop(rax_klass);
558
559      Register rdi_temp   = rdi;
560      Register rbx_method = rbx_index;
561
562      // get interface klass
563      Label no_such_interface;
564      __ verify_oop(rdx_intf);
565      __ lookup_interface_method(rax_klass, rdx_intf,
566                                 // note: next two args must be the same:
567                                 rbx_index, rbx_method,
568                                 rdi_temp,
569                                 no_such_interface);
570
571      __ verify_oop(rbx_method);
572      __ jmp(rbx_method_fie);
573      __ hlt();
574
575      __ bind(no_such_interface);
576      // Throw an exception.
577      // For historical reasons, it will be IncompatibleClassChangeError.
578      __ pushptr(Address(rdx_intf, java_mirror_offset));  // required interface
579      __ push(rcx_recv);        // bad receiver
580      __ push((int)Bytecodes::_invokeinterface);  // who is complaining?
581      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
582    }
583    break;
584
585  case _bound_ref_mh:
586  case _bound_int_mh:
587  case _bound_long_mh:
588  case _bound_ref_direct_mh:
589  case _bound_int_direct_mh:
590  case _bound_long_direct_mh:
591    {
592      bool direct_to_method = (ek >= _bound_ref_direct_mh);
593      BasicType arg_type  = T_ILLEGAL;
594      int       arg_mask  = _INSERT_NO_MASK;
595      int       arg_slots = -1;
596      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);
597
598      // make room for the new argument:
599      __ movl(rax_argslot, rcx_bmh_vmargslot);
600      __ lea(rax_argslot, __ argument_address(rax_argslot));
601      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask,
602                       rax_argslot, rbx_temp, rdx_temp);
603
604      // store bound argument into the new stack slot:
605      __ movptr(rbx_temp, rcx_bmh_argument);
606      Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
607      if (arg_type == T_OBJECT) {
608        __ movptr(Address(rax_argslot, 0), rbx_temp);
609      } else {
610        __ load_sized_value(rdx_temp, prim_value_addr,
611                            type2aelembytes(arg_type), is_signed_subword_type(arg_type));
612        __ movptr(Address(rax_argslot, 0), rdx_temp);
613#ifndef _LP64
614        if (arg_slots == 2) {
615          __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
616          __ movl(Address(rax_argslot, Interpreter::stackElementSize), rdx_temp);
617        }
618#endif //_LP64
619      }
620
621      if (direct_to_method) {
622        Register rbx_method = rbx_temp;
623        __ movptr(rbx_method, rcx_mh_vmtarget);
624        __ verify_oop(rbx_method);
625        __ jmp(rbx_method_fie);
626      } else {
627        __ movptr(rcx_recv, rcx_mh_vmtarget);
628        __ verify_oop(rcx_recv);
629        __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
630      }
631    }
632    break;
633
634  case _adapter_retype_only:
635  case _adapter_retype_raw:
636    // immediately jump to the next MH layer:
637    __ movptr(rcx_recv, rcx_mh_vmtarget);
638    __ verify_oop(rcx_recv);
639    __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
640    // This is OK when all parameter types widen.
641    // It is also OK when a return type narrows.
642    break;
643
644  case _adapter_check_cast:
645    {
646      // temps:
647      Register rbx_klass = rbx_temp; // interesting AMH data
648
649      // check a reference argument before jumping to the next layer of MH:
650      __ movl(rax_argslot, rcx_amh_vmargslot);
651      vmarg = __ argument_address(rax_argslot);
652
653      // What class are we casting to?
654      __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
655      __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
656
657      Label done;
658      __ movptr(rdx_temp, vmarg);
659      __ testptr(rdx_temp, rdx_temp);
660      __ jccb(Assembler::zero, done);         // no cast if null
661      __ load_klass(rdx_temp, rdx_temp);
662
663      // live at this point:
664      // - rbx_klass:  klass required by the target method
665      // - rdx_temp:   argument klass to test
666      // - rcx_recv:   adapter method handle
667      __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);
668
669      // If we get here, the type check failed!
670      // Call the wrong_method_type stub, passing the failing argument type in rax.
671      Register rax_mtype = rax_argslot;
672      __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
673      __ movptr(rdx_temp, vmarg);
674
675      __ pushptr(rcx_amh_argument); // required class
676      __ push(rdx_temp);            // bad object
677      __ push((int)Bytecodes::_checkcast);  // who is complaining?
678      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
679
680      __ bind(done);
681      // get the new MH:
682      __ movptr(rcx_recv, rcx_mh_vmtarget);
683      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
684    }
685    break;
686
687  case _adapter_prim_to_prim:
688  case _adapter_ref_to_prim:
689    // handled completely by optimized cases
690    __ stop("init_AdapterMethodHandle should not issue this");
691    break;
692
693  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
694//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
695  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
696  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
697    {
698      // perform an in-place conversion to int or an int subword
699      __ movl(rax_argslot, rcx_amh_vmargslot);
700      vmarg = __ argument_address(rax_argslot);
701
702      switch (ek) {
703      case _adapter_opt_i2i:
704        __ movl(rdx_temp, vmarg);
705        break;
706      case _adapter_opt_l2i:
707        {
708          // just delete the extra slot; on a little-endian machine we keep the first
709          __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
710          remove_arg_slots(_masm, -stack_move_unit(),
711                           rax_argslot, rbx_temp, rdx_temp);
712          vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
713          __ movl(rdx_temp, vmarg);
714        }
715        break;
716      case _adapter_opt_unboxi:
717        {
718          // Load the value up from the heap.
719          __ movptr(rdx_temp, vmarg);
720          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
721#ifdef ASSERT
722          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
723            if (is_subword_type(BasicType(bt)))
724              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
725          }
726#endif
727          __ null_check(rdx_temp, value_offset);
728          __ movl(rdx_temp, Address(rdx_temp, value_offset));
729          // We load this as a word.  Because we are little-endian,
730          // the low bits will be correct, but the high bits may need cleaning.
731          // The vminfo will guide us to clean those bits.
732        }
733        break;
734      default:
735        ShouldNotReachHere();
736      }
737
738      // Do the requested conversion and store the value.
739      Register rbx_vminfo = rbx_temp;
740      __ movl(rbx_vminfo, rcx_amh_conversion);
741      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
742
743      // get the new MH:
744      __ movptr(rcx_recv, rcx_mh_vmtarget);
745      // (now we are done with the old MH)
746
747      // original 32-bit vmdata word must be of this form:
748      //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
749      __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
750      __ shll(rdx_temp /*, rcx*/);
751      Label zero_extend, done;
752      __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
753      __ jccb(Assembler::zero, zero_extend);
754
755      // this path is taken for int->byte, int->short
756      __ sarl(rdx_temp /*, rcx*/);
757      __ jmpb(done);
758
759      __ bind(zero_extend);
760      // this is taken for int->char
761      __ shrl(rdx_temp /*, rcx*/);
762
763      __ bind(done);
764      __ movl(vmarg, rdx_temp);  // Store the value.
765      __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv
766
767      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
768    }
769    break;
770
771  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
772  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
773    {
774      // perform an in-place int-to-long or ref-to-long conversion
775      __ movl(rax_argslot, rcx_amh_vmargslot);
776
777      // on a little-endian machine we keep the first slot and add another after
778      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
779      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
780                       rax_argslot, rbx_temp, rdx_temp);
781      Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
782      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);
783
784      switch (ek) {
785      case _adapter_opt_i2l:
786        {
787#ifdef _LP64
788          __ movslq(rdx_temp, vmarg1);  // Load sign-extended
789          __ movq(vmarg1, rdx_temp);    // Store into first slot
790#else
791          __ movl(rdx_temp, vmarg1);
792          __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
793          __ movl(vmarg2, rdx_temp); // store second word
794#endif
795        }
796        break;
797      case _adapter_opt_unboxl:
798        {
799          // Load the value up from the heap.
800          __ movptr(rdx_temp, vmarg1);
801          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
802          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
803          __ null_check(rdx_temp, value_offset);
804#ifdef _LP64
805          __ movq(rbx_temp, Address(rdx_temp, value_offset));
806          __ movq(vmarg1, rbx_temp);
807#else
808          __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
809          __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
810          __ movl(vmarg1, rbx_temp);
811          __ movl(vmarg2, rdx_temp);
812#endif
813        }
814        break;
815      default:
816        ShouldNotReachHere();
817      }
818
819      __ movptr(rcx_recv, rcx_mh_vmtarget);
820      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
821    }
822    break;
823
824  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
825  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
826    {
827      // perform an in-place floating primitive conversion
828      __ movl(rax_argslot, rcx_amh_vmargslot);
829      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
830      if (ek == _adapter_opt_f2d) {
831        insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
832                         rax_argslot, rbx_temp, rdx_temp);
833      }
834      Address vmarg(rax_argslot, -Interpreter::stackElementSize);
835
836#ifdef _LP64
837      if (ek == _adapter_opt_f2d) {
838        __ movflt(xmm0, vmarg);
839        __ cvtss2sd(xmm0, xmm0);
840        __ movdbl(vmarg, xmm0);
841      } else {
842        __ movdbl(xmm0, vmarg);
843        __ cvtsd2ss(xmm0, xmm0);
844        __ movflt(vmarg, xmm0);
845      }
846#else //_LP64
847      if (ek == _adapter_opt_f2d) {
848        __ fld_s(vmarg);        // load float to ST0
849        __ fstp_s(vmarg);       // store single
850      } else {
851        __ fld_d(vmarg);        // load double to ST0
852        __ fstp_s(vmarg);       // store single
853      }
854#endif //_LP64
855
856      if (ek == _adapter_opt_d2f) {
857        remove_arg_slots(_masm, -stack_move_unit(),
858                         rax_argslot, rbx_temp, rdx_temp);
859      }
860
861      __ movptr(rcx_recv, rcx_mh_vmtarget);
862      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
863    }
864    break;
865
866  case _adapter_prim_to_ref:
867    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
868    break;
869
870  case _adapter_swap_args:
871  case _adapter_rot_args:
872    // handled completely by optimized cases
873    __ stop("init_AdapterMethodHandle should not issue this");
874    break;
875
876  case _adapter_opt_swap_1:
877  case _adapter_opt_swap_2:
878  case _adapter_opt_rot_1_up:
879  case _adapter_opt_rot_1_down:
880  case _adapter_opt_rot_2_up:
881  case _adapter_opt_rot_2_down:
882    {
883      int swap_bytes = 0, rotate = 0;
884      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);
885
886      // 'argslot' is the position of the first argument to swap
887      __ movl(rax_argslot, rcx_amh_vmargslot);
888      __ lea(rax_argslot, __ argument_address(rax_argslot));
889
890      // 'vminfo' is the second
891      Register rbx_destslot = rbx_temp;
892      __ movl(rbx_destslot, rcx_amh_conversion);
893      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
894      __ andl(rbx_destslot, CONV_VMINFO_MASK);
895      __ lea(rbx_destslot, __ argument_address(rbx_destslot));
896      DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));
897
898      if (!rotate) {
899        for (int i = 0; i < swap_bytes; i += wordSize) {
900          __ movptr(rdx_temp, Address(rax_argslot , i));
901          __ push(rdx_temp);
902          __ movptr(rdx_temp, Address(rbx_destslot, i));
903          __ movptr(Address(rax_argslot, i), rdx_temp);
904          __ pop(rdx_temp);
905          __ movptr(Address(rbx_destslot, i), rdx_temp);
906        }
907      } else {
908        // push the first chunk, which is going to get overwritten
909        for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
910          __ movptr(rdx_temp, Address(rax_argslot, i));
911          __ push(rdx_temp);
912        }
913
914        if (rotate > 0) {
915          // rotate upward
916          __ subptr(rax_argslot, swap_bytes);
917#ifdef ASSERT
918          {
919            // Verify that argslot > destslot, by at least swap_bytes.
920            Label L_ok;
921            __ cmpptr(rax_argslot, rbx_destslot);
922            __ jccb(Assembler::aboveEqual, L_ok);
923            __ stop("source must be above destination (upward rotation)");
924            __ bind(L_ok);
925          }
926#endif
927          // work argslot down to destslot, copying contiguous data upwards
928          // pseudo-code:
929          //   rax = src_addr - swap_bytes
930          //   rbx = dest_addr
931          //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
932          Label loop;
933          __ bind(loop);
934          __ movptr(rdx_temp, Address(rax_argslot, 0));
935          __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
936          __ addptr(rax_argslot, -wordSize);
937          __ cmpptr(rax_argslot, rbx_destslot);
938          __ jccb(Assembler::aboveEqual, loop);
939        } else {
940          __ addptr(rax_argslot, swap_bytes);
941#ifdef ASSERT
942          {
943            // Verify that argslot < destslot, by at least swap_bytes.
944            Label L_ok;
945            __ cmpptr(rax_argslot, rbx_destslot);
946            __ jccb(Assembler::belowEqual, L_ok);
947            __ stop("source must be below destination (downward rotation)");
948            __ bind(L_ok);
949          }
950#endif
951          // work argslot up to destslot, copying contiguous data downwards
952          // pseudo-code:
953          //   rax = src_addr + swap_bytes
954          //   rbx = dest_addr
955          //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
956          Label loop;
957          __ bind(loop);
958          __ movptr(rdx_temp, Address(rax_argslot, 0));
959          __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
960          __ addptr(rax_argslot, wordSize);
961          __ cmpptr(rax_argslot, rbx_destslot);
962          __ jccb(Assembler::belowEqual, loop);
963        }
964
965        // pop the original first chunk into the destination slot, now free
966        for (int i = 0; i < swap_bytes; i += wordSize) {
967          __ pop(rdx_temp);
968          __ movptr(Address(rbx_destslot, i), rdx_temp);
969        }
970      }
971
972      __ movptr(rcx_recv, rcx_mh_vmtarget);
973      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
974    }
975    break;
976
977  case _adapter_dup_args:
978    {
979      // 'argslot' is the position of the first argument to duplicate
980      __ movl(rax_argslot, rcx_amh_vmargslot);
981      __ lea(rax_argslot, __ argument_address(rax_argslot));
982
983      // 'stack_move' is negative number of words to duplicate
984      Register rdx_stack_move = rdx_temp;
985      __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
986      __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);
987
988      int argslot0_num = 0;
989      Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
990      assert(argslot0.base() == rsp, "");
991      int pre_arg_size = argslot0.disp();
992      assert(pre_arg_size % wordSize == 0, "");
993      assert(pre_arg_size > 0, "must include PC");
994
995      // remember the old rsp+1 (argslot[0])
996      Register rbx_oldarg = rbx_temp;
997      __ lea(rbx_oldarg, argslot0);
998
999      // move rsp down to make room for dups
1000      __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));
1001
1002      // compute the new rsp+1 (argslot[0])
1003      Register rdx_newarg = rdx_temp;
1004      __ lea(rdx_newarg, argslot0);
1005
1006      __ push(rdi);             // need a temp
1007      // (preceding push must be done after arg addresses are taken!)
1008
1009      // pull down the pre_arg_size data (PC)
1010      for (int i = -pre_arg_size; i < 0; i += wordSize) {
1011        __ movptr(rdi, Address(rbx_oldarg, i));
1012        __ movptr(Address(rdx_newarg, i), rdi);
1013      }
1014
1015      // copy from rax_argslot[0...] down to new_rsp[1...]
1016      // pseudo-code:
1017      //   rbx = old_rsp+1
1018      //   rdx = new_rsp+1
1019      //   rax = argslot
1020      //   while (rdx < rbx) *rdx++ = *rax++
1021      Label loop;
1022      __ bind(loop);
1023      __ movptr(rdi, Address(rax_argslot, 0));
1024      __ movptr(Address(rdx_newarg, 0), rdi);
1025      __ addptr(rax_argslot, wordSize);
1026      __ addptr(rdx_newarg, wordSize);
1027      __ cmpptr(rdx_newarg, rbx_oldarg);
1028      __ jccb(Assembler::less, loop);
1029
1030      __ pop(rdi);              // restore temp
1031
1032      __ movptr(rcx_recv, rcx_mh_vmtarget);
1033      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1034    }
1035    break;
1036
1037  case _adapter_drop_args:
1038    {
1039      // 'argslot' is the position of the first argument to nuke
1040      __ movl(rax_argslot, rcx_amh_vmargslot);
1041      __ lea(rax_argslot, __ argument_address(rax_argslot));
1042
1043      __ push(rdi);             // need a temp
1044      // (must do previous push after argslot address is taken)
1045
1046      // 'stack_move' is number of words to drop
1047      Register rdi_stack_move = rdi;
1048      __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
1049      __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1050      remove_arg_slots(_masm, rdi_stack_move,
1051                       rax_argslot, rbx_temp, rdx_temp);
1052
1053      __ pop(rdi);              // restore temp
1054
1055      __ movptr(rcx_recv, rcx_mh_vmtarget);
1056      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1057    }
1058    break;
1059
1060  case _adapter_collect_args:
1061    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1062    break;
1063
1064  case _adapter_spread_args:
1065    // handled completely by optimized cases
1066    __ stop("init_AdapterMethodHandle should not issue this");
1067    break;
1068
1069  case _adapter_opt_spread_0:
1070  case _adapter_opt_spread_1:
1071  case _adapter_opt_spread_more:
1072    {
1073      // spread an array out into a group of arguments
1074      int length_constant = get_ek_adapter_opt_spread_info(ek);
1075
1076      // find the address of the array argument
1077      __ movl(rax_argslot, rcx_amh_vmargslot);
1078      __ lea(rax_argslot, __ argument_address(rax_argslot));
1079
1080      // grab some temps
1081      { __ push(rsi); __ push(rdi); }
1082      // (preceding pushes must be done after argslot address is taken!)
1083#define UNPUSH_RSI_RDI \
1084      { __ pop(rdi); __ pop(rsi); }
1085
1086      // arx_argslot points both to the array and to the first output arg
1087      vmarg = Address(rax_argslot, 0);
1088
1089      // Get the array value.
1090      Register  rsi_array       = rsi;
1091      Register  rdx_array_klass = rdx_temp;
1092      BasicType elem_type       = T_OBJECT;
1093      int       length_offset   = arrayOopDesc::length_offset_in_bytes();
1094      int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
1095      __ movptr(rsi_array, vmarg);
1096      Label skip_array_check;
1097      if (length_constant == 0) {
1098        __ testptr(rsi_array, rsi_array);
1099        __ jcc(Assembler::zero, skip_array_check);
1100      }
1101      __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
1102      __ load_klass(rdx_array_klass, rsi_array);
1103
1104      // Check the array type.
1105      Register rbx_klass = rbx_temp;
1106      __ movptr(rbx_klass, rcx_amh_argument); // this is a Class object!
1107      __ movptr(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));
1108
1109      Label ok_array_klass, bad_array_klass, bad_array_length;
1110      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
1111      // If we get here, the type check failed!
1112      __ jmp(bad_array_klass);
1113      __ bind(ok_array_klass);
1114
1115      // Check length.
1116      if (length_constant >= 0) {
1117        __ cmpl(Address(rsi_array, length_offset), length_constant);
1118      } else {
1119        Register rbx_vminfo = rbx_temp;
1120        __ movl(rbx_vminfo, rcx_amh_conversion);
1121        assert(CONV_VMINFO_SHIFT == 0, "preshifted");
1122        __ andl(rbx_vminfo, CONV_VMINFO_MASK);
1123        __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
1124      }
1125      __ jcc(Assembler::notEqual, bad_array_length);
1126
1127      Register rdx_argslot_limit = rdx_temp;
1128
1129      // Array length checks out.  Now insert any required stack slots.
1130      if (length_constant == -1) {
1131        // Form a pointer to the end of the affected region.
1132        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
1133        // 'stack_move' is negative number of words to insert
1134        Register rdi_stack_move = rdi;
1135        __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
1136        __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
1137        Register rsi_temp = rsi_array;  // spill this
1138        insert_arg_slots(_masm, rdi_stack_move, -1,
1139                         rax_argslot, rbx_temp, rsi_temp);
1140        // reload the array (since rsi was killed)
1141        __ movptr(rsi_array, vmarg);
1142      } else if (length_constant > 1) {
1143        int arg_mask = 0;
1144        int new_slots = (length_constant - 1);
1145        for (int i = 0; i < new_slots; i++) {
1146          arg_mask <<= 1;
1147          arg_mask |= _INSERT_REF_MASK;
1148        }
1149        insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
1150                         rax_argslot, rbx_temp, rdx_temp);
1151      } else if (length_constant == 1) {
1152        // no stack resizing required
1153      } else if (length_constant == 0) {
1154        remove_arg_slots(_masm, -stack_move_unit(),
1155                         rax_argslot, rbx_temp, rdx_temp);
1156      }
1157
1158      // Copy from the array to the new slots.
1159      // Note: Stack change code preserves integrity of rax_argslot pointer.
1160      // So even after slot insertions, rax_argslot still points to first argument.
1161      if (length_constant == -1) {
1162        // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
1163        Register rsi_source = rsi_array;
1164        __ lea(rsi_source, Address(rsi_array, elem0_offset));
1165        Label loop;
1166        __ bind(loop);
1167        __ movptr(rbx_temp, Address(rsi_source, 0));
1168        __ movptr(Address(rax_argslot, 0), rbx_temp);
1169        __ addptr(rsi_source, type2aelembytes(elem_type));
1170        __ addptr(rax_argslot, Interpreter::stackElementSize);
1171        __ cmpptr(rax_argslot, rdx_argslot_limit);
1172        __ jccb(Assembler::less, loop);
1173      } else if (length_constant == 0) {
1174        __ bind(skip_array_check);
1175        // nothing to copy
1176      } else {
1177        int elem_offset = elem0_offset;
1178        int slot_offset = 0;
1179        for (int index = 0; index < length_constant; index++) {
1180          __ movptr(rbx_temp, Address(rsi_array, elem_offset));
1181          __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
1182          elem_offset += type2aelembytes(elem_type);
1183           slot_offset += Interpreter::stackElementSize;
1184        }
1185      }
1186
1187      // Arguments are spread.  Move to next method handle.
1188      UNPUSH_RSI_RDI;
1189      __ movptr(rcx_recv, rcx_mh_vmtarget);
1190      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
1191
1192      __ bind(bad_array_klass);
1193      UNPUSH_RSI_RDI;
1194      __ pushptr(Address(rdx_array_klass, java_mirror_offset)); // required type
1195      __ pushptr(vmarg);                // bad array
1196      __ push((int)Bytecodes::_aaload); // who is complaining?
1197      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1198
1199      __ bind(bad_array_length);
1200      UNPUSH_RSI_RDI;
1201      __ push(rcx_recv);        // AMH requiring a certain length
1202      __ pushptr(vmarg);        // bad array
1203      __ push((int)Bytecodes::_arraylength); // who is complaining?
1204      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
1205
1206#undef UNPUSH_RSI_RDI
1207    }
1208    break;
1209
1210  case _adapter_flyby:
1211  case _adapter_ricochet:
1212    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1213    break;
1214
1215  default:  ShouldNotReachHere();
1216  }
1217  __ hlt();
1218
1219  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
1220  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
1221
1222  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
1223}
1224