sharedRuntime_x86_64.cpp revision 9898:2794bc7859f5
1/*
2 * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#ifndef _WINDOWS
27#include "alloca.h"
28#endif
29#include "asm/macroAssembler.hpp"
30#include "asm/macroAssembler.inline.hpp"
31#include "code/debugInfoRec.hpp"
32#include "code/icBuffer.hpp"
33#include "code/vtableStubs.hpp"
34#include "interpreter/interpreter.hpp"
35#include "oops/compiledICHolder.hpp"
36#include "prims/jvmtiRedefineClassesTrace.hpp"
37#include "runtime/sharedRuntime.hpp"
38#include "runtime/vframeArray.hpp"
39#include "vmreg_x86.inline.hpp"
40#ifdef COMPILER1
41#include "c1/c1_Runtime1.hpp"
42#endif
43#ifdef COMPILER2
44#include "opto/runtime.hpp"
45#endif
46#if INCLUDE_JVMCI
47#include "jvmci/jvmciJavaClasses.hpp"
48#endif
49
50#define __ masm->
51
52const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
53
54class SimpleRuntimeFrame {
55
56  public:
57
58  // Most of the runtime stubs have this simple frame layout.
59  // This class exists to make the layout shared in one place.
60  // Offsets are for compiler stack slots, which are jints.
61  enum layout {
62    // The frame sender code expects that rbp will be in the "natural" place and
63    // will override any oopMap setting for it. We must therefore force the layout
64    // so that it agrees with the frame sender code.
65    rbp_off = frame::arg_reg_save_area_bytes/BytesPerInt,
66    rbp_off2,
67    return_off, return_off2,
68    framesize
69  };
70};
71
72class RegisterSaver {
73  // Capture info about frame layout.  Layout offsets are in jint
74  // units because compiler frame slots are jints.
75#define XSAVE_AREA_BEGIN 160
76#define XSAVE_AREA_YMM_BEGIN 576
77#define XSAVE_AREA_ZMM_BEGIN 1152
78#define XSAVE_AREA_UPPERBANK 1664
79#define DEF_XMM_OFFS(regnum) xmm ## regnum ## _off = xmm_off + (regnum)*16/BytesPerInt, xmm ## regnum ## H_off
80#define DEF_YMM_OFFS(regnum) ymm ## regnum ## _off = ymm_off + (regnum)*16/BytesPerInt, ymm ## regnum ## H_off
81#define DEF_ZMM_OFFS(regnum) zmm ## regnum ## _off = zmm_off + (regnum-16)*64/BytesPerInt, zmm ## regnum ## H_off
82  enum layout {
83    fpu_state_off = frame::arg_reg_save_area_bytes/BytesPerInt, // fxsave save area
84    xmm_off       = fpu_state_off + XSAVE_AREA_BEGIN/BytesPerInt,            // offset in fxsave save area
85    DEF_XMM_OFFS(0),
86    DEF_XMM_OFFS(1),
87    // 2..15 are implied in range usage
88    ymm_off = xmm_off + (XSAVE_AREA_YMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
89    DEF_YMM_OFFS(0),
90    DEF_YMM_OFFS(1),
91    // 2..15 are implied in range usage
92    zmm_high = xmm_off + (XSAVE_AREA_ZMM_BEGIN - XSAVE_AREA_BEGIN)/BytesPerInt,
93    zmm_off = xmm_off + (XSAVE_AREA_UPPERBANK - XSAVE_AREA_BEGIN)/BytesPerInt,
94    DEF_ZMM_OFFS(16),
95    DEF_ZMM_OFFS(17),
96    // 18..31 are implied in range usage
97    fpu_state_end = fpu_state_off + ((FPUStateSizeInWords-1)*wordSize / BytesPerInt),
98    fpu_stateH_end,
99    r15_off, r15H_off,
100    r14_off, r14H_off,
101    r13_off, r13H_off,
102    r12_off, r12H_off,
103    r11_off, r11H_off,
104    r10_off, r10H_off,
105    r9_off,  r9H_off,
106    r8_off,  r8H_off,
107    rdi_off, rdiH_off,
108    rsi_off, rsiH_off,
109    ignore_off, ignoreH_off,  // extra copy of rbp
110    rsp_off, rspH_off,
111    rbx_off, rbxH_off,
112    rdx_off, rdxH_off,
113    rcx_off, rcxH_off,
114    rax_off, raxH_off,
115    // 16-byte stack alignment fill word: see MacroAssembler::push/pop_IU_state
116    align_off, alignH_off,
117    flags_off, flagsH_off,
118    // The frame sender code expects that rbp will be in the "natural" place and
119    // will override any oopMap setting for it. We must therefore force the layout
120    // so that it agrees with the frame sender code.
121    rbp_off, rbpH_off,        // copy of rbp we will restore
122    return_off, returnH_off,  // slot for return address
123    reg_save_size             // size in compiler stack slots
124  };
125
126 public:
127  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors = false);
128  static void restore_live_registers(MacroAssembler* masm, bool restore_vectors = false);
129
130  // Offsets into the register save area
131  // Used by deoptimization when it is managing result register
132  // values on its own
133
134  static int rax_offset_in_bytes(void)    { return BytesPerInt * rax_off; }
135  static int rdx_offset_in_bytes(void)    { return BytesPerInt * rdx_off; }
136  static int rbx_offset_in_bytes(void)    { return BytesPerInt * rbx_off; }
137  static int xmm0_offset_in_bytes(void)   { return BytesPerInt * xmm0_off; }
138  static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
139
140  // During deoptimization only the result registers need to be restored,
141  // all the other values have already been extracted.
142  static void restore_result_registers(MacroAssembler* masm);
143};
144
145OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words, bool save_vectors) {
146  int off = 0;
147  int num_xmm_regs = XMMRegisterImpl::number_of_registers;
148  if (UseAVX < 3) {
149    num_xmm_regs = num_xmm_regs/2;
150  }
151#if defined(COMPILER2) || INCLUDE_JVMCI
152  if (save_vectors) {
153    assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
154    assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
155  }
156#else
157  assert(!save_vectors, "vectors are generated only by C2 and JVMCI");
158#endif
159
160  // Always make the frame size 16-byte aligned, both vector and non vector stacks are always allocated
161  int frame_size_in_bytes = round_to(reg_save_size*BytesPerInt, num_xmm_regs);
162  // OopMap frame size is in compiler stack slots (jint's) not bytes or words
163  int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
164  // CodeBlob frame size is in words.
165  int frame_size_in_words = frame_size_in_bytes / wordSize;
166  *total_frame_words = frame_size_in_words;
167
168  // Save registers, fpu state, and flags.
169  // We assume caller has already pushed the return address onto the
170  // stack, so rsp is 8-byte aligned here.
171  // We push rpb twice in this sequence because we want the real rbp
172  // to be under the return like a normal enter.
173
174  __ enter();          // rsp becomes 16-byte aligned here
175  __ push_CPU_state(); // Push a multiple of 16 bytes
176
177  // push cpu state handles this on EVEX enabled targets
178  if (save_vectors) {
179    // Save upper half of YMM registes(0..15)
180    int base_addr = XSAVE_AREA_YMM_BEGIN;
181    for (int n = 0; n < 16; n++) {
182      __ vextractf128h(Address(rsp, base_addr+n*16), as_XMMRegister(n));
183    }
184    if (VM_Version::supports_evex()) {
185      // Save upper half of ZMM registes(0..15)
186      base_addr = XSAVE_AREA_ZMM_BEGIN;
187      for (int n = 0; n < 16; n++) {
188        __ vextractf64x4h(Address(rsp, base_addr+n*32), as_XMMRegister(n), 1);
189      }
190      // Save full ZMM registes(16..num_xmm_regs)
191      base_addr = XSAVE_AREA_UPPERBANK;
192      off = 0;
193      int vector_len = Assembler::AVX_512bit;
194      for (int n = 16; n < num_xmm_regs; n++) {
195        __ evmovdqul(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n), vector_len);
196      }
197    }
198  } else {
199    if (VM_Version::supports_evex()) {
200      // Save upper bank of ZMM registers(16..31) for double/float usage
201      int base_addr = XSAVE_AREA_UPPERBANK;
202      off = 0;
203      for (int n = 16; n < num_xmm_regs; n++) {
204        __ movsd(Address(rsp, base_addr+(off++*64)), as_XMMRegister(n));
205      }
206    }
207  }
208  if (frame::arg_reg_save_area_bytes != 0) {
209    // Allocate argument register save area
210    __ subptr(rsp, frame::arg_reg_save_area_bytes);
211  }
212
213  // Set an oopmap for the call site.  This oopmap will map all
214  // oop-registers and debug-info registers as callee-saved.  This
215  // will allow deoptimization at this safepoint to find all possible
216  // debug-info recordings, as well as let GC find all oops.
217
218  OopMapSet *oop_maps = new OopMapSet();
219  OopMap* map = new OopMap(frame_size_in_slots, 0);
220
221#define STACK_OFFSET(x) VMRegImpl::stack2reg((x))
222
223  map->set_callee_saved(STACK_OFFSET( rax_off ), rax->as_VMReg());
224  map->set_callee_saved(STACK_OFFSET( rcx_off ), rcx->as_VMReg());
225  map->set_callee_saved(STACK_OFFSET( rdx_off ), rdx->as_VMReg());
226  map->set_callee_saved(STACK_OFFSET( rbx_off ), rbx->as_VMReg());
227  // rbp location is known implicitly by the frame sender code, needs no oopmap
228  // and the location where rbp was saved by is ignored
229  map->set_callee_saved(STACK_OFFSET( rsi_off ), rsi->as_VMReg());
230  map->set_callee_saved(STACK_OFFSET( rdi_off ), rdi->as_VMReg());
231  map->set_callee_saved(STACK_OFFSET( r8_off  ), r8->as_VMReg());
232  map->set_callee_saved(STACK_OFFSET( r9_off  ), r9->as_VMReg());
233  map->set_callee_saved(STACK_OFFSET( r10_off ), r10->as_VMReg());
234  map->set_callee_saved(STACK_OFFSET( r11_off ), r11->as_VMReg());
235  map->set_callee_saved(STACK_OFFSET( r12_off ), r12->as_VMReg());
236  map->set_callee_saved(STACK_OFFSET( r13_off ), r13->as_VMReg());
237  map->set_callee_saved(STACK_OFFSET( r14_off ), r14->as_VMReg());
238  map->set_callee_saved(STACK_OFFSET( r15_off ), r15->as_VMReg());
239  // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
240  // on EVEX enabled targets, we get it included in the xsave area
241  off = xmm0_off;
242  int delta = xmm1_off - off;
243  for (int n = 0; n < 16; n++) {
244    XMMRegister xmm_name = as_XMMRegister(n);
245    map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg());
246    off += delta;
247  }
248  if(UseAVX > 2) {
249    // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
250    off = zmm16_off;
251    delta = zmm17_off - off;
252    for (int n = 16; n < num_xmm_regs; n++) {
253      XMMRegister zmm_name = as_XMMRegister(n);
254      map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg());
255      off += delta;
256    }
257  }
258
259#if defined(COMPILER2) || INCLUDE_JVMCI
260  if (save_vectors) {
261    off = ymm0_off;
262    int delta = ymm1_off - off;
263    for (int n = 0; n < 16; n++) {
264      XMMRegister ymm_name = as_XMMRegister(n);
265      map->set_callee_saved(STACK_OFFSET(off), ymm_name->as_VMReg()->next(4));
266      off += delta;
267    }
268  }
269#endif // COMPILER2 || INCLUDE_JVMCI
270
271  // %%% These should all be a waste but we'll keep things as they were for now
272  if (true) {
273    map->set_callee_saved(STACK_OFFSET( raxH_off ), rax->as_VMReg()->next());
274    map->set_callee_saved(STACK_OFFSET( rcxH_off ), rcx->as_VMReg()->next());
275    map->set_callee_saved(STACK_OFFSET( rdxH_off ), rdx->as_VMReg()->next());
276    map->set_callee_saved(STACK_OFFSET( rbxH_off ), rbx->as_VMReg()->next());
277    // rbp location is known implicitly by the frame sender code, needs no oopmap
278    map->set_callee_saved(STACK_OFFSET( rsiH_off ), rsi->as_VMReg()->next());
279    map->set_callee_saved(STACK_OFFSET( rdiH_off ), rdi->as_VMReg()->next());
280    map->set_callee_saved(STACK_OFFSET( r8H_off  ), r8->as_VMReg()->next());
281    map->set_callee_saved(STACK_OFFSET( r9H_off  ), r9->as_VMReg()->next());
282    map->set_callee_saved(STACK_OFFSET( r10H_off ), r10->as_VMReg()->next());
283    map->set_callee_saved(STACK_OFFSET( r11H_off ), r11->as_VMReg()->next());
284    map->set_callee_saved(STACK_OFFSET( r12H_off ), r12->as_VMReg()->next());
285    map->set_callee_saved(STACK_OFFSET( r13H_off ), r13->as_VMReg()->next());
286    map->set_callee_saved(STACK_OFFSET( r14H_off ), r14->as_VMReg()->next());
287    map->set_callee_saved(STACK_OFFSET( r15H_off ), r15->as_VMReg()->next());
288    // For both AVX and EVEX we will use the legacy FXSAVE area for xmm0..xmm15,
289    // on EVEX enabled targets, we get it included in the xsave area
290    off = xmm0H_off;
291    delta = xmm1H_off - off;
292    for (int n = 0; n < 16; n++) {
293      XMMRegister xmm_name = as_XMMRegister(n);
294      map->set_callee_saved(STACK_OFFSET(off), xmm_name->as_VMReg()->next());
295      off += delta;
296    }
297    if (UseAVX > 2) {
298      // Obtain xmm16..xmm31 from the XSAVE area on EVEX enabled targets
299      off = zmm16H_off;
300      delta = zmm17H_off - off;
301      for (int n = 16; n < num_xmm_regs; n++) {
302        XMMRegister zmm_name = as_XMMRegister(n);
303        map->set_callee_saved(STACK_OFFSET(off), zmm_name->as_VMReg()->next());
304        off += delta;
305      }
306    }
307  }
308
309  return map;
310}
311
312void RegisterSaver::restore_live_registers(MacroAssembler* masm, bool restore_vectors) {
313  int num_xmm_regs = XMMRegisterImpl::number_of_registers;
314  if (UseAVX < 3) {
315    num_xmm_regs = num_xmm_regs/2;
316  }
317  if (frame::arg_reg_save_area_bytes != 0) {
318    // Pop arg register save area
319    __ addptr(rsp, frame::arg_reg_save_area_bytes);
320  }
321
322#if defined(COMPILER2) || INCLUDE_JVMCI
323  if (restore_vectors) {
324    assert(UseAVX > 0, "512bit vectors are supported only with EVEX");
325    assert(MaxVectorSize == 64, "only 512bit vectors are supported now");
326  }
327#else
328  assert(!restore_vectors, "vectors are generated only by C2");
329#endif
330
331  // On EVEX enabled targets everything is handled in pop fpu state
332  if (restore_vectors) {
333    // Restore upper half of YMM registes (0..15)
334    int base_addr = XSAVE_AREA_YMM_BEGIN;
335    for (int n = 0; n < 16; n++) {
336      __ vinsertf128h(as_XMMRegister(n), Address(rsp,  base_addr+n*16));
337    }
338    if (VM_Version::supports_evex()) {
339      // Restore upper half of ZMM registes (0..15)
340      base_addr = XSAVE_AREA_ZMM_BEGIN;
341      for (int n = 0; n < 16; n++) {
342        __ vinsertf64x4h(as_XMMRegister(n), Address(rsp, base_addr+n*32), 1);
343      }
344      // Restore full ZMM registes(16..num_xmm_regs)
345      base_addr = XSAVE_AREA_UPPERBANK;
346      int vector_len = Assembler::AVX_512bit;
347      int off = 0;
348      for (int n = 16; n < num_xmm_regs; n++) {
349        __ evmovdqul(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)), vector_len);
350      }
351    }
352  } else {
353    if (VM_Version::supports_evex()) {
354      // Restore upper bank of ZMM registes(16..31) for double/float usage
355      int base_addr = XSAVE_AREA_UPPERBANK;
356      int off = 0;
357      for (int n = 16; n < num_xmm_regs; n++) {
358        __ movsd(as_XMMRegister(n), Address(rsp, base_addr+(off++*64)));
359      }
360    }
361  }
362
363  // Recover CPU state
364  __ pop_CPU_state();
365  // Get the rbp described implicitly by the calling convention (no oopMap)
366  __ pop(rbp);
367}
368
369void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
370
371  // Just restore result register. Only used by deoptimization. By
372  // now any callee save register that needs to be restored to a c2
373  // caller of the deoptee has been extracted into the vframeArray
374  // and will be stuffed into the c2i adapter we create for later
375  // restoration so only result registers need to be restored here.
376
377  // Restore fp result register
378  __ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
379  // Restore integer result register
380  __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
381  __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
382
383  // Pop all of the register save are off the stack except the return address
384  __ addptr(rsp, return_offset_in_bytes());
385}
386
387// Is vector's size (in bytes) bigger than a size saved by default?
388// 16 bytes XMM registers are saved by default using fxsave/fxrstor instructions.
389bool SharedRuntime::is_wide_vector(int size) {
390  return size > 16;
391}
392
393// The java_calling_convention describes stack locations as ideal slots on
394// a frame with no abi restrictions. Since we must observe abi restrictions
395// (like the placement of the register window) the slots must be biased by
396// the following value.
397static int reg2offset_in(VMReg r) {
398  // Account for saved rbp and return address
399  // This should really be in_preserve_stack_slots
400  return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size;
401}
402
403static int reg2offset_out(VMReg r) {
404  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
405}
406
407// ---------------------------------------------------------------------------
408// Read the array of BasicTypes from a signature, and compute where the
409// arguments should go.  Values in the VMRegPair regs array refer to 4-byte
410// quantities.  Values less than VMRegImpl::stack0 are registers, those above
411// refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
412// as framesizes are fixed.
413// VMRegImpl::stack0 refers to the first slot 0(sp).
414// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
415// up to RegisterImpl::number_of_registers) are the 64-bit
416// integer registers.
417
418// Note: the INPUTS in sig_bt are in units of Java argument words, which are
419// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
420// units regardless of build. Of course for i486 there is no 64 bit build
421
422// The Java calling convention is a "shifted" version of the C ABI.
423// By skipping the first C ABI register we can call non-static jni methods
424// with small numbers of arguments without having to shuffle the arguments
425// at all. Since we control the java ABI we ought to at least get some
426// advantage out of it.
427
428int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
429                                           VMRegPair *regs,
430                                           int total_args_passed,
431                                           int is_outgoing) {
432
433  // Create the mapping between argument positions and
434  // registers.
435  static const Register INT_ArgReg[Argument::n_int_register_parameters_j] = {
436    j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5
437  };
438  static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_j] = {
439    j_farg0, j_farg1, j_farg2, j_farg3,
440    j_farg4, j_farg5, j_farg6, j_farg7
441  };
442
443
444  uint int_args = 0;
445  uint fp_args = 0;
446  uint stk_args = 0; // inc by 2 each time
447
448  for (int i = 0; i < total_args_passed; i++) {
449    switch (sig_bt[i]) {
450    case T_BOOLEAN:
451    case T_CHAR:
452    case T_BYTE:
453    case T_SHORT:
454    case T_INT:
455      if (int_args < Argument::n_int_register_parameters_j) {
456        regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
457      } else {
458        regs[i].set1(VMRegImpl::stack2reg(stk_args));
459        stk_args += 2;
460      }
461      break;
462    case T_VOID:
463      // halves of T_LONG or T_DOUBLE
464      assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
465      regs[i].set_bad();
466      break;
467    case T_LONG:
468      assert(sig_bt[i + 1] == T_VOID, "expecting half");
469      // fall through
470    case T_OBJECT:
471    case T_ARRAY:
472    case T_ADDRESS:
473      if (int_args < Argument::n_int_register_parameters_j) {
474        regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
475      } else {
476        regs[i].set2(VMRegImpl::stack2reg(stk_args));
477        stk_args += 2;
478      }
479      break;
480    case T_FLOAT:
481      if (fp_args < Argument::n_float_register_parameters_j) {
482        regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
483      } else {
484        regs[i].set1(VMRegImpl::stack2reg(stk_args));
485        stk_args += 2;
486      }
487      break;
488    case T_DOUBLE:
489      assert(sig_bt[i + 1] == T_VOID, "expecting half");
490      if (fp_args < Argument::n_float_register_parameters_j) {
491        regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
492      } else {
493        regs[i].set2(VMRegImpl::stack2reg(stk_args));
494        stk_args += 2;
495      }
496      break;
497    default:
498      ShouldNotReachHere();
499      break;
500    }
501  }
502
503  return round_to(stk_args, 2);
504}
505
506// Patch the callers callsite with entry to compiled code if it exists.
507static void patch_callers_callsite(MacroAssembler *masm) {
508  Label L;
509  __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
510  __ jcc(Assembler::equal, L);
511
512  // Save the current stack pointer
513  __ mov(r13, rsp);
514  // Schedule the branch target address early.
515  // Call into the VM to patch the caller, then jump to compiled callee
516  // rax isn't live so capture return address while we easily can
517  __ movptr(rax, Address(rsp, 0));
518
519  // align stack so push_CPU_state doesn't fault
520  __ andptr(rsp, -(StackAlignmentInBytes));
521  __ push_CPU_state();
522
523  // VM needs caller's callsite
524  // VM needs target method
525  // This needs to be a long call since we will relocate this adapter to
526  // the codeBuffer and it may not reach
527
528  // Allocate argument register save area
529  if (frame::arg_reg_save_area_bytes != 0) {
530    __ subptr(rsp, frame::arg_reg_save_area_bytes);
531  }
532  __ mov(c_rarg0, rbx);
533  __ mov(c_rarg1, rax);
534  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
535
536  // De-allocate argument register save area
537  if (frame::arg_reg_save_area_bytes != 0) {
538    __ addptr(rsp, frame::arg_reg_save_area_bytes);
539  }
540
541  __ pop_CPU_state();
542  // restore sp
543  __ mov(rsp, r13);
544  __ bind(L);
545}
546
547
548static void gen_c2i_adapter(MacroAssembler *masm,
549                            int total_args_passed,
550                            int comp_args_on_stack,
551                            const BasicType *sig_bt,
552                            const VMRegPair *regs,
553                            Label& skip_fixup) {
554  // Before we get into the guts of the C2I adapter, see if we should be here
555  // at all.  We've come from compiled code and are attempting to jump to the
556  // interpreter, which means the caller made a static call to get here
557  // (vcalls always get a compiled target if there is one).  Check for a
558  // compiled target.  If there is one, we need to patch the caller's call.
559  patch_callers_callsite(masm);
560
561  __ bind(skip_fixup);
562
563  // Since all args are passed on the stack, total_args_passed *
564  // Interpreter::stackElementSize is the space we need. Plus 1 because
565  // we also account for the return address location since
566  // we store it first rather than hold it in rax across all the shuffling
567
568  int extraspace = (total_args_passed * Interpreter::stackElementSize) + wordSize;
569
570  // stack is aligned, keep it that way
571  extraspace = round_to(extraspace, 2*wordSize);
572
573  // Get return address
574  __ pop(rax);
575
576  // set senderSP value
577  __ mov(r13, rsp);
578
579  __ subptr(rsp, extraspace);
580
581  // Store the return address in the expected location
582  __ movptr(Address(rsp, 0), rax);
583
584  // Now write the args into the outgoing interpreter space
585  for (int i = 0; i < total_args_passed; i++) {
586    if (sig_bt[i] == T_VOID) {
587      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
588      continue;
589    }
590
591    // offset to start parameters
592    int st_off   = (total_args_passed - i) * Interpreter::stackElementSize;
593    int next_off = st_off - Interpreter::stackElementSize;
594
595    // Say 4 args:
596    // i   st_off
597    // 0   32 T_LONG
598    // 1   24 T_VOID
599    // 2   16 T_OBJECT
600    // 3    8 T_BOOL
601    // -    0 return address
602    //
603    // However to make thing extra confusing. Because we can fit a long/double in
604    // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
605    // leaves one slot empty and only stores to a single slot. In this case the
606    // slot that is occupied is the T_VOID slot. See I said it was confusing.
607
608    VMReg r_1 = regs[i].first();
609    VMReg r_2 = regs[i].second();
610    if (!r_1->is_valid()) {
611      assert(!r_2->is_valid(), "");
612      continue;
613    }
614    if (r_1->is_stack()) {
615      // memory to memory use rax
616      int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
617      if (!r_2->is_valid()) {
618        // sign extend??
619        __ movl(rax, Address(rsp, ld_off));
620        __ movptr(Address(rsp, st_off), rax);
621
622      } else {
623
624        __ movq(rax, Address(rsp, ld_off));
625
626        // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
627        // T_DOUBLE and T_LONG use two slots in the interpreter
628        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
629          // ld_off == LSW, ld_off+wordSize == MSW
630          // st_off == MSW, next_off == LSW
631          __ movq(Address(rsp, next_off), rax);
632#ifdef ASSERT
633          // Overwrite the unused slot with known junk
634          __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
635          __ movptr(Address(rsp, st_off), rax);
636#endif /* ASSERT */
637        } else {
638          __ movq(Address(rsp, st_off), rax);
639        }
640      }
641    } else if (r_1->is_Register()) {
642      Register r = r_1->as_Register();
643      if (!r_2->is_valid()) {
644        // must be only an int (or less ) so move only 32bits to slot
645        // why not sign extend??
646        __ movl(Address(rsp, st_off), r);
647      } else {
648        // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
649        // T_DOUBLE and T_LONG use two slots in the interpreter
650        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
651          // long/double in gpr
652#ifdef ASSERT
653          // Overwrite the unused slot with known junk
654          __ mov64(rax, CONST64(0xdeadffffdeadaaab));
655          __ movptr(Address(rsp, st_off), rax);
656#endif /* ASSERT */
657          __ movq(Address(rsp, next_off), r);
658        } else {
659          __ movptr(Address(rsp, st_off), r);
660        }
661      }
662    } else {
663      assert(r_1->is_XMMRegister(), "");
664      if (!r_2->is_valid()) {
665        // only a float use just part of the slot
666        __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
667      } else {
668#ifdef ASSERT
669        // Overwrite the unused slot with known junk
670        __ mov64(rax, CONST64(0xdeadffffdeadaaac));
671        __ movptr(Address(rsp, st_off), rax);
672#endif /* ASSERT */
673        __ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
674      }
675    }
676  }
677
678  // Schedule the branch target address early.
679  __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset())));
680  __ jmp(rcx);
681}
682
683static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg,
684                        address code_start, address code_end,
685                        Label& L_ok) {
686  Label L_fail;
687  __ lea(temp_reg, ExternalAddress(code_start));
688  __ cmpptr(pc_reg, temp_reg);
689  __ jcc(Assembler::belowEqual, L_fail);
690  __ lea(temp_reg, ExternalAddress(code_end));
691  __ cmpptr(pc_reg, temp_reg);
692  __ jcc(Assembler::below, L_ok);
693  __ bind(L_fail);
694}
695
696void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
697                                    int total_args_passed,
698                                    int comp_args_on_stack,
699                                    const BasicType *sig_bt,
700                                    const VMRegPair *regs) {
701
702  // Note: r13 contains the senderSP on entry. We must preserve it since
703  // we may do a i2c -> c2i transition if we lose a race where compiled
704  // code goes non-entrant while we get args ready.
705  // In addition we use r13 to locate all the interpreter args as
706  // we must align the stack to 16 bytes on an i2c entry else we
707  // lose alignment we expect in all compiled code and register
708  // save code can segv when fxsave instructions find improperly
709  // aligned stack pointer.
710
711  // Adapters can be frameless because they do not require the caller
712  // to perform additional cleanup work, such as correcting the stack pointer.
713  // An i2c adapter is frameless because the *caller* frame, which is interpreted,
714  // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
715  // even if a callee has modified the stack pointer.
716  // A c2i adapter is frameless because the *callee* frame, which is interpreted,
717  // routinely repairs its caller's stack pointer (from sender_sp, which is set
718  // up via the senderSP register).
719  // In other words, if *either* the caller or callee is interpreted, we can
720  // get the stack pointer repaired after a call.
721  // This is why c2i and i2c adapters cannot be indefinitely composed.
722  // In particular, if a c2i adapter were to somehow call an i2c adapter,
723  // both caller and callee would be compiled methods, and neither would
724  // clean up the stack pointer changes performed by the two adapters.
725  // If this happens, control eventually transfers back to the compiled
726  // caller, but with an uncorrected stack, causing delayed havoc.
727
728  // Pick up the return address
729  __ movptr(rax, Address(rsp, 0));
730
731  if (VerifyAdapterCalls &&
732      (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
733    // So, let's test for cascading c2i/i2c adapters right now.
734    //  assert(Interpreter::contains($return_addr) ||
735    //         StubRoutines::contains($return_addr),
736    //         "i2c adapter must return to an interpreter frame");
737    __ block_comment("verify_i2c { ");
738    Label L_ok;
739    if (Interpreter::code() != NULL)
740      range_check(masm, rax, r11,
741                  Interpreter::code()->code_start(), Interpreter::code()->code_end(),
742                  L_ok);
743    if (StubRoutines::code1() != NULL)
744      range_check(masm, rax, r11,
745                  StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
746                  L_ok);
747    if (StubRoutines::code2() != NULL)
748      range_check(masm, rax, r11,
749                  StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
750                  L_ok);
751    const char* msg = "i2c adapter must return to an interpreter frame";
752    __ block_comment(msg);
753    __ stop(msg);
754    __ bind(L_ok);
755    __ block_comment("} verify_i2ce ");
756  }
757
758  // Must preserve original SP for loading incoming arguments because
759  // we need to align the outgoing SP for compiled code.
760  __ movptr(r11, rsp);
761
762  // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
763  // in registers, we will occasionally have no stack args.
764  int comp_words_on_stack = 0;
765  if (comp_args_on_stack) {
766    // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
767    // registers are below.  By subtracting stack0, we either get a negative
768    // number (all values in registers) or the maximum stack slot accessed.
769
770    // Convert 4-byte c2 stack slots to words.
771    comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
772    // Round up to miminum stack alignment, in wordSize
773    comp_words_on_stack = round_to(comp_words_on_stack, 2);
774    __ subptr(rsp, comp_words_on_stack * wordSize);
775  }
776
777
778  // Ensure compiled code always sees stack at proper alignment
779  __ andptr(rsp, -16);
780
781  // push the return address and misalign the stack that youngest frame always sees
782  // as far as the placement of the call instruction
783  __ push(rax);
784
785  // Put saved SP in another register
786  const Register saved_sp = rax;
787  __ movptr(saved_sp, r11);
788
789  // Will jump to the compiled code just as if compiled code was doing it.
790  // Pre-load the register-jump target early, to schedule it better.
791  __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
792
793#if INCLUDE_JVMCI
794  if (EnableJVMCI) {
795    // check if this call should be routed towards a specific entry point
796    __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
797    Label no_alternative_target;
798    __ jcc(Assembler::equal, no_alternative_target);
799    __ movptr(r11, Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
800    __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
801    __ bind(no_alternative_target);
802  }
803#endif // INCLUDE_JVMCI
804
805  // Now generate the shuffle code.  Pick up all register args and move the
806  // rest through the floating point stack top.
807  for (int i = 0; i < total_args_passed; i++) {
808    if (sig_bt[i] == T_VOID) {
809      // Longs and doubles are passed in native word order, but misaligned
810      // in the 32-bit build.
811      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
812      continue;
813    }
814
815    // Pick up 0, 1 or 2 words from SP+offset.
816
817    assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
818            "scrambled load targets?");
819    // Load in argument order going down.
820    int ld_off = (total_args_passed - i)*Interpreter::stackElementSize;
821    // Point to interpreter value (vs. tag)
822    int next_off = ld_off - Interpreter::stackElementSize;
823    //
824    //
825    //
826    VMReg r_1 = regs[i].first();
827    VMReg r_2 = regs[i].second();
828    if (!r_1->is_valid()) {
829      assert(!r_2->is_valid(), "");
830      continue;
831    }
832    if (r_1->is_stack()) {
833      // Convert stack slot to an SP offset (+ wordSize to account for return address )
834      int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
835
836      // We can use r13 as a temp here because compiled code doesn't need r13 as an input
837      // and if we end up going thru a c2i because of a miss a reasonable value of r13
838      // will be generated.
839      if (!r_2->is_valid()) {
840        // sign extend???
841        __ movl(r13, Address(saved_sp, ld_off));
842        __ movptr(Address(rsp, st_off), r13);
843      } else {
844        //
845        // We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
846        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
847        // So we must adjust where to pick up the data to match the interpreter.
848        //
849        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
850        // are accessed as negative so LSW is at LOW address
851
852        // ld_off is MSW so get LSW
853        const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
854                           next_off : ld_off;
855        __ movq(r13, Address(saved_sp, offset));
856        // st_off is LSW (i.e. reg.first())
857        __ movq(Address(rsp, st_off), r13);
858      }
859    } else if (r_1->is_Register()) {  // Register argument
860      Register r = r_1->as_Register();
861      assert(r != rax, "must be different");
862      if (r_2->is_valid()) {
863        //
864        // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
865        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
866        // So we must adjust where to pick up the data to match the interpreter.
867
868        const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
869                           next_off : ld_off;
870
871        // this can be a misaligned move
872        __ movq(r, Address(saved_sp, offset));
873      } else {
874        // sign extend and use a full word?
875        __ movl(r, Address(saved_sp, ld_off));
876      }
877    } else {
878      if (!r_2->is_valid()) {
879        __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
880      } else {
881        __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
882      }
883    }
884  }
885
886  // 6243940 We might end up in handle_wrong_method if
887  // the callee is deoptimized as we race thru here. If that
888  // happens we don't want to take a safepoint because the
889  // caller frame will look interpreted and arguments are now
890  // "compiled" so it is much better to make this transition
891  // invisible to the stack walking code. Unfortunately if
892  // we try and find the callee by normal means a safepoint
893  // is possible. So we stash the desired callee in the thread
894  // and the vm will find there should this case occur.
895
896  __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
897
898  // put Method* where a c2i would expect should we end up there
899  // only needed becaus eof c2 resolve stubs return Method* as a result in
900  // rax
901  __ mov(rax, rbx);
902  __ jmp(r11);
903}
904
905// ---------------------------------------------------------------
906AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
907                                                            int total_args_passed,
908                                                            int comp_args_on_stack,
909                                                            const BasicType *sig_bt,
910                                                            const VMRegPair *regs,
911                                                            AdapterFingerPrint* fingerprint) {
912  address i2c_entry = __ pc();
913
914  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
915
916  // -------------------------------------------------------------------------
917  // Generate a C2I adapter.  On entry we know rbx holds the Method* during calls
918  // to the interpreter.  The args start out packed in the compiled layout.  They
919  // need to be unpacked into the interpreter layout.  This will almost always
920  // require some stack space.  We grow the current (compiled) stack, then repack
921  // the args.  We  finally end in a jump to the generic interpreter entry point.
922  // On exit from the interpreter, the interpreter will restore our SP (lest the
923  // compiled code, which relys solely on SP and not RBP, get sick).
924
925  address c2i_unverified_entry = __ pc();
926  Label skip_fixup;
927  Label ok;
928
929  Register holder = rax;
930  Register receiver = j_rarg0;
931  Register temp = rbx;
932
933  {
934    __ load_klass(temp, receiver);
935    __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
936    __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset()));
937    __ jcc(Assembler::equal, ok);
938    __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
939
940    __ bind(ok);
941    // Method might have been compiled since the call site was patched to
942    // interpreted if that is the case treat it as a miss so we can get
943    // the call site corrected.
944    __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
945    __ jcc(Assembler::equal, skip_fixup);
946    __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
947  }
948
949  address c2i_entry = __ pc();
950
951  gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
952
953  __ flush();
954  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
955}
956
957int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
958                                         VMRegPair *regs,
959                                         VMRegPair *regs2,
960                                         int total_args_passed) {
961  assert(regs2 == NULL, "not needed on x86");
962// We return the amount of VMRegImpl stack slots we need to reserve for all
963// the arguments NOT counting out_preserve_stack_slots.
964
965// NOTE: These arrays will have to change when c1 is ported
966#ifdef _WIN64
967    static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
968      c_rarg0, c_rarg1, c_rarg2, c_rarg3
969    };
970    static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
971      c_farg0, c_farg1, c_farg2, c_farg3
972    };
973#else
974    static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
975      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5
976    };
977    static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
978      c_farg0, c_farg1, c_farg2, c_farg3,
979      c_farg4, c_farg5, c_farg6, c_farg7
980    };
981#endif // _WIN64
982
983
984    uint int_args = 0;
985    uint fp_args = 0;
986    uint stk_args = 0; // inc by 2 each time
987
988    for (int i = 0; i < total_args_passed; i++) {
989      switch (sig_bt[i]) {
990      case T_BOOLEAN:
991      case T_CHAR:
992      case T_BYTE:
993      case T_SHORT:
994      case T_INT:
995        if (int_args < Argument::n_int_register_parameters_c) {
996          regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
997#ifdef _WIN64
998          fp_args++;
999          // Allocate slots for callee to stuff register args the stack.
1000          stk_args += 2;
1001#endif
1002        } else {
1003          regs[i].set1(VMRegImpl::stack2reg(stk_args));
1004          stk_args += 2;
1005        }
1006        break;
1007      case T_LONG:
1008        assert(sig_bt[i + 1] == T_VOID, "expecting half");
1009        // fall through
1010      case T_OBJECT:
1011      case T_ARRAY:
1012      case T_ADDRESS:
1013      case T_METADATA:
1014        if (int_args < Argument::n_int_register_parameters_c) {
1015          regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
1016#ifdef _WIN64
1017          fp_args++;
1018          stk_args += 2;
1019#endif
1020        } else {
1021          regs[i].set2(VMRegImpl::stack2reg(stk_args));
1022          stk_args += 2;
1023        }
1024        break;
1025      case T_FLOAT:
1026        if (fp_args < Argument::n_float_register_parameters_c) {
1027          regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
1028#ifdef _WIN64
1029          int_args++;
1030          // Allocate slots for callee to stuff register args the stack.
1031          stk_args += 2;
1032#endif
1033        } else {
1034          regs[i].set1(VMRegImpl::stack2reg(stk_args));
1035          stk_args += 2;
1036        }
1037        break;
1038      case T_DOUBLE:
1039        assert(sig_bt[i + 1] == T_VOID, "expecting half");
1040        if (fp_args < Argument::n_float_register_parameters_c) {
1041          regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
1042#ifdef _WIN64
1043          int_args++;
1044          // Allocate slots for callee to stuff register args the stack.
1045          stk_args += 2;
1046#endif
1047        } else {
1048          regs[i].set2(VMRegImpl::stack2reg(stk_args));
1049          stk_args += 2;
1050        }
1051        break;
1052      case T_VOID: // Halves of longs and doubles
1053        assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
1054        regs[i].set_bad();
1055        break;
1056      default:
1057        ShouldNotReachHere();
1058        break;
1059      }
1060    }
1061#ifdef _WIN64
1062  // windows abi requires that we always allocate enough stack space
1063  // for 4 64bit registers to be stored down.
1064  if (stk_args < 8) {
1065    stk_args = 8;
1066  }
1067#endif // _WIN64
1068
1069  return stk_args;
1070}
1071
1072// On 64 bit we will store integer like items to the stack as
1073// 64 bits items (sparc abi) even though java would only store
1074// 32bits for a parameter. On 32bit it will simply be 32 bits
1075// So this routine will do 32->32 on 32bit and 32->64 on 64bit
1076static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1077  if (src.first()->is_stack()) {
1078    if (dst.first()->is_stack()) {
1079      // stack to stack
1080      __ movslq(rax, Address(rbp, reg2offset_in(src.first())));
1081      __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1082    } else {
1083      // stack to reg
1084      __ movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1085    }
1086  } else if (dst.first()->is_stack()) {
1087    // reg to stack
1088    // Do we really have to sign extend???
1089    // __ movslq(src.first()->as_Register(), src.first()->as_Register());
1090    __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1091  } else {
1092    // Do we really have to sign extend???
1093    // __ movslq(dst.first()->as_Register(), src.first()->as_Register());
1094    if (dst.first() != src.first()) {
1095      __ movq(dst.first()->as_Register(), src.first()->as_Register());
1096    }
1097  }
1098}
1099
1100static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1101  if (src.first()->is_stack()) {
1102    if (dst.first()->is_stack()) {
1103      // stack to stack
1104      __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1105      __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1106    } else {
1107      // stack to reg
1108      __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
1109    }
1110  } else if (dst.first()->is_stack()) {
1111    // reg to stack
1112    __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1113  } else {
1114    if (dst.first() != src.first()) {
1115      __ movq(dst.first()->as_Register(), src.first()->as_Register());
1116    }
1117  }
1118}
1119
1120// An oop arg. Must pass a handle not the oop itself
1121static void object_move(MacroAssembler* masm,
1122                        OopMap* map,
1123                        int oop_handle_offset,
1124                        int framesize_in_slots,
1125                        VMRegPair src,
1126                        VMRegPair dst,
1127                        bool is_receiver,
1128                        int* receiver_offset) {
1129
1130  // must pass a handle. First figure out the location we use as a handle
1131
1132  Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register();
1133
1134  // See if oop is NULL if it is we need no handle
1135
1136  if (src.first()->is_stack()) {
1137
1138    // Oop is already on the stack as an argument
1139    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1140    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1141    if (is_receiver) {
1142      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1143    }
1144
1145    __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1146    __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1147    // conditionally move a NULL
1148    __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
1149  } else {
1150
1151    // Oop is in an a register we must store it to the space we reserve
1152    // on the stack for oop_handles and pass a handle if oop is non-NULL
1153
1154    const Register rOop = src.first()->as_Register();
1155    int oop_slot;
1156    if (rOop == j_rarg0)
1157      oop_slot = 0;
1158    else if (rOop == j_rarg1)
1159      oop_slot = 1;
1160    else if (rOop == j_rarg2)
1161      oop_slot = 2;
1162    else if (rOop == j_rarg3)
1163      oop_slot = 3;
1164    else if (rOop == j_rarg4)
1165      oop_slot = 4;
1166    else {
1167      assert(rOop == j_rarg5, "wrong register");
1168      oop_slot = 5;
1169    }
1170
1171    oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset;
1172    int offset = oop_slot*VMRegImpl::stack_slot_size;
1173
1174    map->set_oop(VMRegImpl::stack2reg(oop_slot));
1175    // Store oop in handle area, may be NULL
1176    __ movptr(Address(rsp, offset), rOop);
1177    if (is_receiver) {
1178      *receiver_offset = offset;
1179    }
1180
1181    __ cmpptr(rOop, (int32_t)NULL_WORD);
1182    __ lea(rHandle, Address(rsp, offset));
1183    // conditionally move a NULL from the handle area where it was just stored
1184    __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
1185  }
1186
1187  // If arg is on the stack then place it otherwise it is already in correct reg.
1188  if (dst.first()->is_stack()) {
1189    __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1190  }
1191}
1192
1193// A float arg may have to do float reg int reg conversion
1194static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1195  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1196
1197  // The calling conventions assures us that each VMregpair is either
1198  // all really one physical register or adjacent stack slots.
1199  // This greatly simplifies the cases here compared to sparc.
1200
1201  if (src.first()->is_stack()) {
1202    if (dst.first()->is_stack()) {
1203      __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1204      __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1205    } else {
1206      // stack to reg
1207      assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1208      __ movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first())));
1209    }
1210  } else if (dst.first()->is_stack()) {
1211    // reg to stack
1212    assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters");
1213    __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1214  } else {
1215    // reg to reg
1216    // In theory these overlap but the ordering is such that this is likely a nop
1217    if ( src.first() != dst.first()) {
1218      __ movdbl(dst.first()->as_XMMRegister(),  src.first()->as_XMMRegister());
1219    }
1220  }
1221}
1222
1223// A long move
1224static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1225
1226  // The calling conventions assures us that each VMregpair is either
1227  // all really one physical register or adjacent stack slots.
1228  // This greatly simplifies the cases here compared to sparc.
1229
1230  if (src.is_single_phys_reg() ) {
1231    if (dst.is_single_phys_reg()) {
1232      if (dst.first() != src.first()) {
1233        __ mov(dst.first()->as_Register(), src.first()->as_Register());
1234      }
1235    } else {
1236      assert(dst.is_single_reg(), "not a stack pair");
1237      __ movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
1238    }
1239  } else if (dst.is_single_phys_reg()) {
1240    assert(src.is_single_reg(),  "not a stack pair");
1241    __ movq(dst.first()->as_Register(), Address(rbp, reg2offset_out(src.first())));
1242  } else {
1243    assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1244    __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1245    __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1246  }
1247}
1248
1249// A double move
1250static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1251
1252  // The calling conventions assures us that each VMregpair is either
1253  // all really one physical register or adjacent stack slots.
1254  // This greatly simplifies the cases here compared to sparc.
1255
1256  if (src.is_single_phys_reg() ) {
1257    if (dst.is_single_phys_reg()) {
1258      // In theory these overlap but the ordering is such that this is likely a nop
1259      if ( src.first() != dst.first()) {
1260        __ movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister());
1261      }
1262    } else {
1263      assert(dst.is_single_reg(), "not a stack pair");
1264      __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1265    }
1266  } else if (dst.is_single_phys_reg()) {
1267    assert(src.is_single_reg(),  "not a stack pair");
1268    __ movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_out(src.first())));
1269  } else {
1270    assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs");
1271    __ movq(rax, Address(rbp, reg2offset_in(src.first())));
1272    __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
1273  }
1274}
1275
1276
1277void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1278  // We always ignore the frame_slots arg and just use the space just below frame pointer
1279  // which by this time is free to use
1280  switch (ret_type) {
1281  case T_FLOAT:
1282    __ movflt(Address(rbp, -wordSize), xmm0);
1283    break;
1284  case T_DOUBLE:
1285    __ movdbl(Address(rbp, -wordSize), xmm0);
1286    break;
1287  case T_VOID:  break;
1288  default: {
1289    __ movptr(Address(rbp, -wordSize), rax);
1290    }
1291  }
1292}
1293
1294void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1295  // We always ignore the frame_slots arg and just use the space just below frame pointer
1296  // which by this time is free to use
1297  switch (ret_type) {
1298  case T_FLOAT:
1299    __ movflt(xmm0, Address(rbp, -wordSize));
1300    break;
1301  case T_DOUBLE:
1302    __ movdbl(xmm0, Address(rbp, -wordSize));
1303    break;
1304  case T_VOID:  break;
1305  default: {
1306    __ movptr(rax, Address(rbp, -wordSize));
1307    }
1308  }
1309}
1310
1311static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1312    for ( int i = first_arg ; i < arg_count ; i++ ) {
1313      if (args[i].first()->is_Register()) {
1314        __ push(args[i].first()->as_Register());
1315      } else if (args[i].first()->is_XMMRegister()) {
1316        __ subptr(rsp, 2*wordSize);
1317        __ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
1318      }
1319    }
1320}
1321
1322static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
1323    for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
1324      if (args[i].first()->is_Register()) {
1325        __ pop(args[i].first()->as_Register());
1326      } else if (args[i].first()->is_XMMRegister()) {
1327        __ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
1328        __ addptr(rsp, 2*wordSize);
1329      }
1330    }
1331}
1332
1333
1334static void save_or_restore_arguments(MacroAssembler* masm,
1335                                      const int stack_slots,
1336                                      const int total_in_args,
1337                                      const int arg_save_area,
1338                                      OopMap* map,
1339                                      VMRegPair* in_regs,
1340                                      BasicType* in_sig_bt) {
1341  // if map is non-NULL then the code should store the values,
1342  // otherwise it should load them.
1343  int slot = arg_save_area;
1344  // Save down double word first
1345  for ( int i = 0; i < total_in_args; i++) {
1346    if (in_regs[i].first()->is_XMMRegister() && in_sig_bt[i] == T_DOUBLE) {
1347      int offset = slot * VMRegImpl::stack_slot_size;
1348      slot += VMRegImpl::slots_per_word;
1349      assert(slot <= stack_slots, "overflow");
1350      if (map != NULL) {
1351        __ movdbl(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1352      } else {
1353        __ movdbl(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1354      }
1355    }
1356    if (in_regs[i].first()->is_Register() &&
1357        (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_ARRAY)) {
1358      int offset = slot * VMRegImpl::stack_slot_size;
1359      if (map != NULL) {
1360        __ movq(Address(rsp, offset), in_regs[i].first()->as_Register());
1361        if (in_sig_bt[i] == T_ARRAY) {
1362          map->set_oop(VMRegImpl::stack2reg(slot));;
1363        }
1364      } else {
1365        __ movq(in_regs[i].first()->as_Register(), Address(rsp, offset));
1366      }
1367      slot += VMRegImpl::slots_per_word;
1368    }
1369  }
1370  // Save or restore single word registers
1371  for ( int i = 0; i < total_in_args; i++) {
1372    if (in_regs[i].first()->is_Register()) {
1373      int offset = slot * VMRegImpl::stack_slot_size;
1374      slot++;
1375      assert(slot <= stack_slots, "overflow");
1376
1377      // Value is in an input register pass we must flush it to the stack
1378      const Register reg = in_regs[i].first()->as_Register();
1379      switch (in_sig_bt[i]) {
1380        case T_BOOLEAN:
1381        case T_CHAR:
1382        case T_BYTE:
1383        case T_SHORT:
1384        case T_INT:
1385          if (map != NULL) {
1386            __ movl(Address(rsp, offset), reg);
1387          } else {
1388            __ movl(reg, Address(rsp, offset));
1389          }
1390          break;
1391        case T_ARRAY:
1392        case T_LONG:
1393          // handled above
1394          break;
1395        case T_OBJECT:
1396        default: ShouldNotReachHere();
1397      }
1398    } else if (in_regs[i].first()->is_XMMRegister()) {
1399      if (in_sig_bt[i] == T_FLOAT) {
1400        int offset = slot * VMRegImpl::stack_slot_size;
1401        slot++;
1402        assert(slot <= stack_slots, "overflow");
1403        if (map != NULL) {
1404          __ movflt(Address(rsp, offset), in_regs[i].first()->as_XMMRegister());
1405        } else {
1406          __ movflt(in_regs[i].first()->as_XMMRegister(), Address(rsp, offset));
1407        }
1408      }
1409    } else if (in_regs[i].first()->is_stack()) {
1410      if (in_sig_bt[i] == T_ARRAY && map != NULL) {
1411        int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1412        map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1413      }
1414    }
1415  }
1416}
1417
1418
1419// Check GC_locker::needs_gc and enter the runtime if it's true.  This
1420// keeps a new JNI critical region from starting until a GC has been
1421// forced.  Save down any oops in registers and describe them in an
1422// OopMap.
1423static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1424                                               int stack_slots,
1425                                               int total_c_args,
1426                                               int total_in_args,
1427                                               int arg_save_area,
1428                                               OopMapSet* oop_maps,
1429                                               VMRegPair* in_regs,
1430                                               BasicType* in_sig_bt) {
1431  __ block_comment("check GC_locker::needs_gc");
1432  Label cont;
1433  __ cmp8(ExternalAddress((address)GC_locker::needs_gc_address()), false);
1434  __ jcc(Assembler::equal, cont);
1435
1436  // Save down any incoming oops and call into the runtime to halt for a GC
1437
1438  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1439  save_or_restore_arguments(masm, stack_slots, total_in_args,
1440                            arg_save_area, map, in_regs, in_sig_bt);
1441
1442  address the_pc = __ pc();
1443  oop_maps->add_gc_map( __ offset(), map);
1444  __ set_last_Java_frame(rsp, noreg, the_pc);
1445
1446  __ block_comment("block_for_jni_critical");
1447  __ movptr(c_rarg0, r15_thread);
1448  __ mov(r12, rsp); // remember sp
1449  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1450  __ andptr(rsp, -16); // align stack as required by ABI
1451  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical)));
1452  __ mov(rsp, r12); // restore sp
1453  __ reinit_heapbase();
1454
1455  __ reset_last_Java_frame(false, true);
1456
1457  save_or_restore_arguments(masm, stack_slots, total_in_args,
1458                            arg_save_area, NULL, in_regs, in_sig_bt);
1459
1460  __ bind(cont);
1461#ifdef ASSERT
1462  if (StressCriticalJNINatives) {
1463    // Stress register saving
1464    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1465    save_or_restore_arguments(masm, stack_slots, total_in_args,
1466                              arg_save_area, map, in_regs, in_sig_bt);
1467    // Destroy argument registers
1468    for (int i = 0; i < total_in_args - 1; i++) {
1469      if (in_regs[i].first()->is_Register()) {
1470        const Register reg = in_regs[i].first()->as_Register();
1471        __ xorptr(reg, reg);
1472      } else if (in_regs[i].first()->is_XMMRegister()) {
1473        __ xorpd(in_regs[i].first()->as_XMMRegister(), in_regs[i].first()->as_XMMRegister());
1474      } else if (in_regs[i].first()->is_FloatRegister()) {
1475        ShouldNotReachHere();
1476      } else if (in_regs[i].first()->is_stack()) {
1477        // Nothing to do
1478      } else {
1479        ShouldNotReachHere();
1480      }
1481      if (in_sig_bt[i] == T_LONG || in_sig_bt[i] == T_DOUBLE) {
1482        i++;
1483      }
1484    }
1485
1486    save_or_restore_arguments(masm, stack_slots, total_in_args,
1487                              arg_save_area, NULL, in_regs, in_sig_bt);
1488  }
1489#endif
1490}
1491
1492// Unpack an array argument into a pointer to the body and the length
1493// if the array is non-null, otherwise pass 0 for both.
1494static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1495  Register tmp_reg = rax;
1496  assert(!body_arg.first()->is_Register() || body_arg.first()->as_Register() != tmp_reg,
1497         "possible collision");
1498  assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
1499         "possible collision");
1500
1501  __ block_comment("unpack_array_argument {");
1502
1503  // Pass the length, ptr pair
1504  Label is_null, done;
1505  VMRegPair tmp;
1506  tmp.set_ptr(tmp_reg->as_VMReg());
1507  if (reg.first()->is_stack()) {
1508    // Load the arg up from the stack
1509    move_ptr(masm, reg, tmp);
1510    reg = tmp;
1511  }
1512  __ testptr(reg.first()->as_Register(), reg.first()->as_Register());
1513  __ jccb(Assembler::equal, is_null);
1514  __ lea(tmp_reg, Address(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1515  move_ptr(masm, tmp, body_arg);
1516  // load the length relative to the body.
1517  __ movl(tmp_reg, Address(tmp_reg, arrayOopDesc::length_offset_in_bytes() -
1518                           arrayOopDesc::base_offset_in_bytes(in_elem_type)));
1519  move32_64(masm, tmp, length_arg);
1520  __ jmpb(done);
1521  __ bind(is_null);
1522  // Pass zeros
1523  __ xorptr(tmp_reg, tmp_reg);
1524  move_ptr(masm, tmp, body_arg);
1525  move32_64(masm, tmp, length_arg);
1526  __ bind(done);
1527
1528  __ block_comment("} unpack_array_argument");
1529}
1530
1531
1532// Different signatures may require very different orders for the move
1533// to avoid clobbering other arguments.  There's no simple way to
1534// order them safely.  Compute a safe order for issuing stores and
1535// break any cycles in those stores.  This code is fairly general but
1536// it's not necessary on the other platforms so we keep it in the
1537// platform dependent code instead of moving it into a shared file.
1538// (See bugs 7013347 & 7145024.)
1539// Note that this code is specific to LP64.
1540class ComputeMoveOrder: public StackObj {
1541  class MoveOperation: public ResourceObj {
1542    friend class ComputeMoveOrder;
1543   private:
1544    VMRegPair        _src;
1545    VMRegPair        _dst;
1546    int              _src_index;
1547    int              _dst_index;
1548    bool             _processed;
1549    MoveOperation*  _next;
1550    MoveOperation*  _prev;
1551
1552    static int get_id(VMRegPair r) {
1553      return r.first()->value();
1554    }
1555
1556   public:
1557    MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst):
1558      _src(src)
1559    , _src_index(src_index)
1560    , _dst(dst)
1561    , _dst_index(dst_index)
1562    , _next(NULL)
1563    , _prev(NULL)
1564    , _processed(false) {
1565    }
1566
1567    VMRegPair src() const              { return _src; }
1568    int src_id() const                 { return get_id(src()); }
1569    int src_index() const              { return _src_index; }
1570    VMRegPair dst() const              { return _dst; }
1571    void set_dst(int i, VMRegPair dst) { _dst_index = i, _dst = dst; }
1572    int dst_index() const              { return _dst_index; }
1573    int dst_id() const                 { return get_id(dst()); }
1574    MoveOperation* next() const       { return _next; }
1575    MoveOperation* prev() const       { return _prev; }
1576    void set_processed()               { _processed = true; }
1577    bool is_processed() const          { return _processed; }
1578
1579    // insert
1580    void break_cycle(VMRegPair temp_register) {
1581      // create a new store following the last store
1582      // to move from the temp_register to the original
1583      MoveOperation* new_store = new MoveOperation(-1, temp_register, dst_index(), dst());
1584
1585      // break the cycle of links and insert new_store at the end
1586      // break the reverse link.
1587      MoveOperation* p = prev();
1588      assert(p->next() == this, "must be");
1589      _prev = NULL;
1590      p->_next = new_store;
1591      new_store->_prev = p;
1592
1593      // change the original store to save it's value in the temp.
1594      set_dst(-1, temp_register);
1595    }
1596
1597    void link(GrowableArray<MoveOperation*>& killer) {
1598      // link this store in front the store that it depends on
1599      MoveOperation* n = killer.at_grow(src_id(), NULL);
1600      if (n != NULL) {
1601        assert(_next == NULL && n->_prev == NULL, "shouldn't have been set yet");
1602        _next = n;
1603        n->_prev = this;
1604      }
1605    }
1606  };
1607
1608 private:
1609  GrowableArray<MoveOperation*> edges;
1610
1611 public:
1612  ComputeMoveOrder(int total_in_args, VMRegPair* in_regs, int total_c_args, VMRegPair* out_regs,
1613                    BasicType* in_sig_bt, GrowableArray<int>& arg_order, VMRegPair tmp_vmreg) {
1614    // Move operations where the dest is the stack can all be
1615    // scheduled first since they can't interfere with the other moves.
1616    for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
1617      if (in_sig_bt[i] == T_ARRAY) {
1618        c_arg--;
1619        if (out_regs[c_arg].first()->is_stack() &&
1620            out_regs[c_arg + 1].first()->is_stack()) {
1621          arg_order.push(i);
1622          arg_order.push(c_arg);
1623        } else {
1624          if (out_regs[c_arg].first()->is_stack() ||
1625              in_regs[i].first() == out_regs[c_arg].first()) {
1626            add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg + 1]);
1627          } else {
1628            add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1629          }
1630        }
1631      } else if (in_sig_bt[i] == T_VOID) {
1632        arg_order.push(i);
1633        arg_order.push(c_arg);
1634      } else {
1635        if (out_regs[c_arg].first()->is_stack() ||
1636            in_regs[i].first() == out_regs[c_arg].first()) {
1637          arg_order.push(i);
1638          arg_order.push(c_arg);
1639        } else {
1640          add_edge(i, in_regs[i].first(), c_arg, out_regs[c_arg]);
1641        }
1642      }
1643    }
1644    // Break any cycles in the register moves and emit the in the
1645    // proper order.
1646    GrowableArray<MoveOperation*>* stores = get_store_order(tmp_vmreg);
1647    for (int i = 0; i < stores->length(); i++) {
1648      arg_order.push(stores->at(i)->src_index());
1649      arg_order.push(stores->at(i)->dst_index());
1650    }
1651 }
1652
1653  // Collected all the move operations
1654  void add_edge(int src_index, VMRegPair src, int dst_index, VMRegPair dst) {
1655    if (src.first() == dst.first()) return;
1656    edges.append(new MoveOperation(src_index, src, dst_index, dst));
1657  }
1658
1659  // Walk the edges breaking cycles between moves.  The result list
1660  // can be walked in order to produce the proper set of loads
1661  GrowableArray<MoveOperation*>* get_store_order(VMRegPair temp_register) {
1662    // Record which moves kill which values
1663    GrowableArray<MoveOperation*> killer;
1664    for (int i = 0; i < edges.length(); i++) {
1665      MoveOperation* s = edges.at(i);
1666      assert(killer.at_grow(s->dst_id(), NULL) == NULL, "only one killer");
1667      killer.at_put_grow(s->dst_id(), s, NULL);
1668    }
1669    assert(killer.at_grow(MoveOperation::get_id(temp_register), NULL) == NULL,
1670           "make sure temp isn't in the registers that are killed");
1671
1672    // create links between loads and stores
1673    for (int i = 0; i < edges.length(); i++) {
1674      edges.at(i)->link(killer);
1675    }
1676
1677    // at this point, all the move operations are chained together
1678    // in a doubly linked list.  Processing it backwards finds
1679    // the beginning of the chain, forwards finds the end.  If there's
1680    // a cycle it can be broken at any point,  so pick an edge and walk
1681    // backward until the list ends or we end where we started.
1682    GrowableArray<MoveOperation*>* stores = new GrowableArray<MoveOperation*>();
1683    for (int e = 0; e < edges.length(); e++) {
1684      MoveOperation* s = edges.at(e);
1685      if (!s->is_processed()) {
1686        MoveOperation* start = s;
1687        // search for the beginning of the chain or cycle
1688        while (start->prev() != NULL && start->prev() != s) {
1689          start = start->prev();
1690        }
1691        if (start->prev() == s) {
1692          start->break_cycle(temp_register);
1693        }
1694        // walk the chain forward inserting to store list
1695        while (start != NULL) {
1696          stores->append(start);
1697          start->set_processed();
1698          start = start->next();
1699        }
1700      }
1701    }
1702    return stores;
1703  }
1704};
1705
1706static void verify_oop_args(MacroAssembler* masm,
1707                            const methodHandle& method,
1708                            const BasicType* sig_bt,
1709                            const VMRegPair* regs) {
1710  Register temp_reg = rbx;  // not part of any compiled calling seq
1711  if (VerifyOops) {
1712    for (int i = 0; i < method->size_of_parameters(); i++) {
1713      if (sig_bt[i] == T_OBJECT ||
1714          sig_bt[i] == T_ARRAY) {
1715        VMReg r = regs[i].first();
1716        assert(r->is_valid(), "bad oop arg");
1717        if (r->is_stack()) {
1718          __ movptr(temp_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1719          __ verify_oop(temp_reg);
1720        } else {
1721          __ verify_oop(r->as_Register());
1722        }
1723      }
1724    }
1725  }
1726}
1727
1728static void gen_special_dispatch(MacroAssembler* masm,
1729                                 methodHandle method,
1730                                 const BasicType* sig_bt,
1731                                 const VMRegPair* regs) {
1732  verify_oop_args(masm, method, sig_bt, regs);
1733  vmIntrinsics::ID iid = method->intrinsic_id();
1734
1735  // Now write the args into the outgoing interpreter space
1736  bool     has_receiver   = false;
1737  Register receiver_reg   = noreg;
1738  int      member_arg_pos = -1;
1739  Register member_reg     = noreg;
1740  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1741  if (ref_kind != 0) {
1742    member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1743    member_reg = rbx;  // known to be free at this point
1744    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1745  } else if (iid == vmIntrinsics::_invokeBasic) {
1746    has_receiver = true;
1747  } else {
1748    fatal("unexpected intrinsic id %d", iid);
1749  }
1750
1751  if (member_reg != noreg) {
1752    // Load the member_arg into register, if necessary.
1753    SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1754    VMReg r = regs[member_arg_pos].first();
1755    if (r->is_stack()) {
1756      __ movptr(member_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1757    } else {
1758      // no data motion is needed
1759      member_reg = r->as_Register();
1760    }
1761  }
1762
1763  if (has_receiver) {
1764    // Make sure the receiver is loaded into a register.
1765    assert(method->size_of_parameters() > 0, "oob");
1766    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1767    VMReg r = regs[0].first();
1768    assert(r->is_valid(), "bad receiver arg");
1769    if (r->is_stack()) {
1770      // Porting note:  This assumes that compiled calling conventions always
1771      // pass the receiver oop in a register.  If this is not true on some
1772      // platform, pick a temp and load the receiver from stack.
1773      fatal("receiver always in a register");
1774      receiver_reg = j_rarg0;  // known to be free at this point
1775      __ movptr(receiver_reg, Address(rsp, r->reg2stack() * VMRegImpl::stack_slot_size + wordSize));
1776    } else {
1777      // no data motion is needed
1778      receiver_reg = r->as_Register();
1779    }
1780  }
1781
1782  // Figure out which address we are really jumping to:
1783  MethodHandles::generate_method_handle_dispatch(masm, iid,
1784                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1785}
1786
1787// ---------------------------------------------------------------------------
1788// Generate a native wrapper for a given method.  The method takes arguments
1789// in the Java compiled code convention, marshals them to the native
1790// convention (handlizes oops, etc), transitions to native, makes the call,
1791// returns to java state (possibly blocking), unhandlizes any result and
1792// returns.
1793//
1794// Critical native functions are a shorthand for the use of
1795// GetPrimtiveArrayCritical and disallow the use of any other JNI
1796// functions.  The wrapper is expected to unpack the arguments before
1797// passing them to the callee and perform checks before and after the
1798// native call to ensure that they GC_locker
1799// lock_critical/unlock_critical semantics are followed.  Some other
1800// parts of JNI setup are skipped like the tear down of the JNI handle
1801// block and the check for pending exceptions it's impossible for them
1802// to be thrown.
1803//
1804// They are roughly structured like this:
1805//    if (GC_locker::needs_gc())
1806//      SharedRuntime::block_for_jni_critical();
1807//    tranistion to thread_in_native
1808//    unpack arrray arguments and call native entry point
1809//    check for safepoint in progress
1810//    check if any thread suspend flags are set
1811//      call into JVM and possible unlock the JNI critical
1812//      if a GC was suppressed while in the critical native.
1813//    transition back to thread_in_Java
1814//    return to caller
1815//
1816nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1817                                                const methodHandle& method,
1818                                                int compile_id,
1819                                                BasicType* in_sig_bt,
1820                                                VMRegPair* in_regs,
1821                                                BasicType ret_type) {
1822  if (method->is_method_handle_intrinsic()) {
1823    vmIntrinsics::ID iid = method->intrinsic_id();
1824    intptr_t start = (intptr_t)__ pc();
1825    int vep_offset = ((intptr_t)__ pc()) - start;
1826    gen_special_dispatch(masm,
1827                         method,
1828                         in_sig_bt,
1829                         in_regs);
1830    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1831    __ flush();
1832    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1833    return nmethod::new_native_nmethod(method,
1834                                       compile_id,
1835                                       masm->code(),
1836                                       vep_offset,
1837                                       frame_complete,
1838                                       stack_slots / VMRegImpl::slots_per_word,
1839                                       in_ByteSize(-1),
1840                                       in_ByteSize(-1),
1841                                       (OopMapSet*)NULL);
1842  }
1843  bool is_critical_native = true;
1844  address native_func = method->critical_native_function();
1845  if (native_func == NULL) {
1846    native_func = method->native_function();
1847    is_critical_native = false;
1848  }
1849  assert(native_func != NULL, "must have function");
1850
1851  // An OopMap for lock (and class if static)
1852  OopMapSet *oop_maps = new OopMapSet();
1853  intptr_t start = (intptr_t)__ pc();
1854
1855  // We have received a description of where all the java arg are located
1856  // on entry to the wrapper. We need to convert these args to where
1857  // the jni function will expect them. To figure out where they go
1858  // we convert the java signature to a C signature by inserting
1859  // the hidden arguments as arg[0] and possibly arg[1] (static method)
1860
1861  const int total_in_args = method->size_of_parameters();
1862  int total_c_args = total_in_args;
1863  if (!is_critical_native) {
1864    total_c_args += 1;
1865    if (method->is_static()) {
1866      total_c_args++;
1867    }
1868  } else {
1869    for (int i = 0; i < total_in_args; i++) {
1870      if (in_sig_bt[i] == T_ARRAY) {
1871        total_c_args++;
1872      }
1873    }
1874  }
1875
1876  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1877  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1878  BasicType* in_elem_bt = NULL;
1879
1880  int argc = 0;
1881  if (!is_critical_native) {
1882    out_sig_bt[argc++] = T_ADDRESS;
1883    if (method->is_static()) {
1884      out_sig_bt[argc++] = T_OBJECT;
1885    }
1886
1887    for (int i = 0; i < total_in_args ; i++ ) {
1888      out_sig_bt[argc++] = in_sig_bt[i];
1889    }
1890  } else {
1891    Thread* THREAD = Thread::current();
1892    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1893    SignatureStream ss(method->signature());
1894    for (int i = 0; i < total_in_args ; i++ ) {
1895      if (in_sig_bt[i] == T_ARRAY) {
1896        // Arrays are passed as int, elem* pair
1897        out_sig_bt[argc++] = T_INT;
1898        out_sig_bt[argc++] = T_ADDRESS;
1899        Symbol* atype = ss.as_symbol(CHECK_NULL);
1900        const char* at = atype->as_C_string();
1901        if (strlen(at) == 2) {
1902          assert(at[0] == '[', "must be");
1903          switch (at[1]) {
1904            case 'B': in_elem_bt[i]  = T_BYTE; break;
1905            case 'C': in_elem_bt[i]  = T_CHAR; break;
1906            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1907            case 'F': in_elem_bt[i]  = T_FLOAT; break;
1908            case 'I': in_elem_bt[i]  = T_INT; break;
1909            case 'J': in_elem_bt[i]  = T_LONG; break;
1910            case 'S': in_elem_bt[i]  = T_SHORT; break;
1911            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1912            default: ShouldNotReachHere();
1913          }
1914        }
1915      } else {
1916        out_sig_bt[argc++] = in_sig_bt[i];
1917        in_elem_bt[i] = T_VOID;
1918      }
1919      if (in_sig_bt[i] != T_VOID) {
1920        assert(in_sig_bt[i] == ss.type(), "must match");
1921        ss.next();
1922      }
1923    }
1924  }
1925
1926  // Now figure out where the args must be stored and how much stack space
1927  // they require.
1928  int out_arg_slots;
1929  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1930
1931  // Compute framesize for the wrapper.  We need to handlize all oops in
1932  // incoming registers
1933
1934  // Calculate the total number of stack slots we will need.
1935
1936  // First count the abi requirement plus all of the outgoing args
1937  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1938
1939  // Now the space for the inbound oop handle area
1940  int total_save_slots = 6 * VMRegImpl::slots_per_word;  // 6 arguments passed in registers
1941  if (is_critical_native) {
1942    // Critical natives may have to call out so they need a save area
1943    // for register arguments.
1944    int double_slots = 0;
1945    int single_slots = 0;
1946    for ( int i = 0; i < total_in_args; i++) {
1947      if (in_regs[i].first()->is_Register()) {
1948        const Register reg = in_regs[i].first()->as_Register();
1949        switch (in_sig_bt[i]) {
1950          case T_BOOLEAN:
1951          case T_BYTE:
1952          case T_SHORT:
1953          case T_CHAR:
1954          case T_INT:  single_slots++; break;
1955          case T_ARRAY:  // specific to LP64 (7145024)
1956          case T_LONG: double_slots++; break;
1957          default:  ShouldNotReachHere();
1958        }
1959      } else if (in_regs[i].first()->is_XMMRegister()) {
1960        switch (in_sig_bt[i]) {
1961          case T_FLOAT:  single_slots++; break;
1962          case T_DOUBLE: double_slots++; break;
1963          default:  ShouldNotReachHere();
1964        }
1965      } else if (in_regs[i].first()->is_FloatRegister()) {
1966        ShouldNotReachHere();
1967      }
1968    }
1969    total_save_slots = double_slots * 2 + single_slots;
1970    // align the save area
1971    if (double_slots != 0) {
1972      stack_slots = round_to(stack_slots, 2);
1973    }
1974  }
1975
1976  int oop_handle_offset = stack_slots;
1977  stack_slots += total_save_slots;
1978
1979  // Now any space we need for handlizing a klass if static method
1980
1981  int klass_slot_offset = 0;
1982  int klass_offset = -1;
1983  int lock_slot_offset = 0;
1984  bool is_static = false;
1985
1986  if (method->is_static()) {
1987    klass_slot_offset = stack_slots;
1988    stack_slots += VMRegImpl::slots_per_word;
1989    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1990    is_static = true;
1991  }
1992
1993  // Plus a lock if needed
1994
1995  if (method->is_synchronized()) {
1996    lock_slot_offset = stack_slots;
1997    stack_slots += VMRegImpl::slots_per_word;
1998  }
1999
2000  // Now a place (+2) to save return values or temp during shuffling
2001  // + 4 for return address (which we own) and saved rbp
2002  stack_slots += 6;
2003
2004  // Ok The space we have allocated will look like:
2005  //
2006  //
2007  // FP-> |                     |
2008  //      |---------------------|
2009  //      | 2 slots for moves   |
2010  //      |---------------------|
2011  //      | lock box (if sync)  |
2012  //      |---------------------| <- lock_slot_offset
2013  //      | klass (if static)   |
2014  //      |---------------------| <- klass_slot_offset
2015  //      | oopHandle area      |
2016  //      |---------------------| <- oop_handle_offset (6 java arg registers)
2017  //      | outbound memory     |
2018  //      | based arguments     |
2019  //      |                     |
2020  //      |---------------------|
2021  //      |                     |
2022  // SP-> | out_preserved_slots |
2023  //
2024  //
2025
2026
2027  // Now compute actual number of stack words we need rounding to make
2028  // stack properly aligned.
2029  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
2030
2031  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2032
2033  // First thing make an ic check to see if we should even be here
2034
2035  // We are free to use all registers as temps without saving them and
2036  // restoring them except rbp. rbp is the only callee save register
2037  // as far as the interpreter and the compiler(s) are concerned.
2038
2039
2040  const Register ic_reg = rax;
2041  const Register receiver = j_rarg0;
2042
2043  Label hit;
2044  Label exception_pending;
2045
2046  assert_different_registers(ic_reg, receiver, rscratch1);
2047  __ verify_oop(receiver);
2048  __ load_klass(rscratch1, receiver);
2049  __ cmpq(ic_reg, rscratch1);
2050  __ jcc(Assembler::equal, hit);
2051
2052  __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2053
2054  // Verified entry point must be aligned
2055  __ align(8);
2056
2057  __ bind(hit);
2058
2059  int vep_offset = ((intptr_t)__ pc()) - start;
2060
2061  // The instruction at the verified entry point must be 5 bytes or longer
2062  // because it can be patched on the fly by make_non_entrant. The stack bang
2063  // instruction fits that requirement.
2064
2065  // Generate stack overflow check
2066
2067  if (UseStackBanging) {
2068    __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2069  } else {
2070    // need a 5 byte instruction to allow MT safe patching to non-entrant
2071    __ fat_nop();
2072  }
2073
2074  // Generate a new frame for the wrapper.
2075  __ enter();
2076  // -2 because return address is already present and so is saved rbp
2077  __ subptr(rsp, stack_size - 2*wordSize);
2078
2079  // Frame is now completed as far as size and linkage.
2080  int frame_complete = ((intptr_t)__ pc()) - start;
2081
2082    if (UseRTMLocking) {
2083      // Abort RTM transaction before calling JNI
2084      // because critical section will be large and will be
2085      // aborted anyway. Also nmethod could be deoptimized.
2086      __ xabort(0);
2087    }
2088
2089#ifdef ASSERT
2090    {
2091      Label L;
2092      __ mov(rax, rsp);
2093      __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI)
2094      __ cmpptr(rax, rsp);
2095      __ jcc(Assembler::equal, L);
2096      __ stop("improperly aligned stack");
2097      __ bind(L);
2098    }
2099#endif /* ASSERT */
2100
2101
2102  // We use r14 as the oop handle for the receiver/klass
2103  // It is callee save so it survives the call to native
2104
2105  const Register oop_handle_reg = r14;
2106
2107  if (is_critical_native) {
2108    check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
2109                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2110  }
2111
2112  //
2113  // We immediately shuffle the arguments so that any vm call we have to
2114  // make from here on out (sync slow path, jvmti, etc.) we will have
2115  // captured the oops from our caller and have a valid oopMap for
2116  // them.
2117
2118  // -----------------
2119  // The Grand Shuffle
2120
2121  // The Java calling convention is either equal (linux) or denser (win64) than the
2122  // c calling convention. However the because of the jni_env argument the c calling
2123  // convention always has at least one more (and two for static) arguments than Java.
2124  // Therefore if we move the args from java -> c backwards then we will never have
2125  // a register->register conflict and we don't have to build a dependency graph
2126  // and figure out how to break any cycles.
2127  //
2128
2129  // Record esp-based slot for receiver on stack for non-static methods
2130  int receiver_offset = -1;
2131
2132  // This is a trick. We double the stack slots so we can claim
2133  // the oops in the caller's frame. Since we are sure to have
2134  // more args than the caller doubling is enough to make
2135  // sure we can capture all the incoming oop args from the
2136  // caller.
2137  //
2138  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2139
2140  // Mark location of rbp (someday)
2141  // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, vmreg(rbp));
2142
2143  // Use eax, ebx as temporaries during any memory-memory moves we have to do
2144  // All inbound args are referenced based on rbp and all outbound args via rsp.
2145
2146
2147#ifdef ASSERT
2148  bool reg_destroyed[RegisterImpl::number_of_registers];
2149  bool freg_destroyed[XMMRegisterImpl::number_of_registers];
2150  for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2151    reg_destroyed[r] = false;
2152  }
2153  for ( int f = 0 ; f < XMMRegisterImpl::number_of_registers ; f++ ) {
2154    freg_destroyed[f] = false;
2155  }
2156
2157#endif /* ASSERT */
2158
2159  // This may iterate in two different directions depending on the
2160  // kind of native it is.  The reason is that for regular JNI natives
2161  // the incoming and outgoing registers are offset upwards and for
2162  // critical natives they are offset down.
2163  GrowableArray<int> arg_order(2 * total_in_args);
2164  VMRegPair tmp_vmreg;
2165  tmp_vmreg.set1(rbx->as_VMReg());
2166
2167  if (!is_critical_native) {
2168    for (int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0; i--, c_arg--) {
2169      arg_order.push(i);
2170      arg_order.push(c_arg);
2171    }
2172  } else {
2173    // Compute a valid move order, using tmp_vmreg to break any cycles
2174    ComputeMoveOrder cmo(total_in_args, in_regs, total_c_args, out_regs, in_sig_bt, arg_order, tmp_vmreg);
2175  }
2176
2177  int temploc = -1;
2178  for (int ai = 0; ai < arg_order.length(); ai += 2) {
2179    int i = arg_order.at(ai);
2180    int c_arg = arg_order.at(ai + 1);
2181    __ block_comment(err_msg("move %d -> %d", i, c_arg));
2182    if (c_arg == -1) {
2183      assert(is_critical_native, "should only be required for critical natives");
2184      // This arg needs to be moved to a temporary
2185      __ mov(tmp_vmreg.first()->as_Register(), in_regs[i].first()->as_Register());
2186      in_regs[i] = tmp_vmreg;
2187      temploc = i;
2188      continue;
2189    } else if (i == -1) {
2190      assert(is_critical_native, "should only be required for critical natives");
2191      // Read from the temporary location
2192      assert(temploc != -1, "must be valid");
2193      i = temploc;
2194      temploc = -1;
2195    }
2196#ifdef ASSERT
2197    if (in_regs[i].first()->is_Register()) {
2198      assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "destroyed reg!");
2199    } else if (in_regs[i].first()->is_XMMRegister()) {
2200      assert(!freg_destroyed[in_regs[i].first()->as_XMMRegister()->encoding()], "destroyed reg!");
2201    }
2202    if (out_regs[c_arg].first()->is_Register()) {
2203      reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2204    } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2205      freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2206    }
2207#endif /* ASSERT */
2208    switch (in_sig_bt[i]) {
2209      case T_ARRAY:
2210        if (is_critical_native) {
2211          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg + 1], out_regs[c_arg]);
2212          c_arg++;
2213#ifdef ASSERT
2214          if (out_regs[c_arg].first()->is_Register()) {
2215            reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2216          } else if (out_regs[c_arg].first()->is_XMMRegister()) {
2217            freg_destroyed[out_regs[c_arg].first()->as_XMMRegister()->encoding()] = true;
2218          }
2219#endif
2220          break;
2221        }
2222      case T_OBJECT:
2223        assert(!is_critical_native, "no oop arguments");
2224        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2225                    ((i == 0) && (!is_static)),
2226                    &receiver_offset);
2227        break;
2228      case T_VOID:
2229        break;
2230
2231      case T_FLOAT:
2232        float_move(masm, in_regs[i], out_regs[c_arg]);
2233          break;
2234
2235      case T_DOUBLE:
2236        assert( i + 1 < total_in_args &&
2237                in_sig_bt[i + 1] == T_VOID &&
2238                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2239        double_move(masm, in_regs[i], out_regs[c_arg]);
2240        break;
2241
2242      case T_LONG :
2243        long_move(masm, in_regs[i], out_regs[c_arg]);
2244        break;
2245
2246      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2247
2248      default:
2249        move32_64(masm, in_regs[i], out_regs[c_arg]);
2250    }
2251  }
2252
2253  int c_arg;
2254
2255  // Pre-load a static method's oop into r14.  Used both by locking code and
2256  // the normal JNI call code.
2257  if (!is_critical_native) {
2258    // point c_arg at the first arg that is already loaded in case we
2259    // need to spill before we call out
2260    c_arg = total_c_args - total_in_args;
2261
2262    if (method->is_static()) {
2263
2264      //  load oop into a register
2265      __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
2266
2267      // Now handlize the static class mirror it's known not-null.
2268      __ movptr(Address(rsp, klass_offset), oop_handle_reg);
2269      map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2270
2271      // Now get the handle
2272      __ lea(oop_handle_reg, Address(rsp, klass_offset));
2273      // store the klass handle as second argument
2274      __ movptr(c_rarg1, oop_handle_reg);
2275      // and protect the arg if we must spill
2276      c_arg--;
2277    }
2278  } else {
2279    // For JNI critical methods we need to save all registers in save_args.
2280    c_arg = 0;
2281  }
2282
2283  // Change state to native (we save the return address in the thread, since it might not
2284  // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
2285  // points into the right code segment. It does not have to be the correct return pc.
2286  // We use the same pc/oopMap repeatedly when we call out
2287
2288  intptr_t the_pc = (intptr_t) __ pc();
2289  oop_maps->add_gc_map(the_pc - start, map);
2290
2291  __ set_last_Java_frame(rsp, noreg, (address)the_pc);
2292
2293
2294  // We have all of the arguments setup at this point. We must not touch any register
2295  // argument registers at this point (what if we save/restore them there are no oop?
2296
2297  {
2298    SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2299    // protect the args we've loaded
2300    save_args(masm, total_c_args, c_arg, out_regs);
2301    __ mov_metadata(c_rarg1, method());
2302    __ call_VM_leaf(
2303      CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2304      r15_thread, c_rarg1);
2305    restore_args(masm, total_c_args, c_arg, out_regs);
2306  }
2307
2308  // RedefineClasses() tracing support for obsolete method entry
2309  if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2310    // protect the args we've loaded
2311    save_args(masm, total_c_args, c_arg, out_regs);
2312    __ mov_metadata(c_rarg1, method());
2313    __ call_VM_leaf(
2314      CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2315      r15_thread, c_rarg1);
2316    restore_args(masm, total_c_args, c_arg, out_regs);
2317  }
2318
2319  // Lock a synchronized method
2320
2321  // Register definitions used by locking and unlocking
2322
2323  const Register swap_reg = rax;  // Must use rax for cmpxchg instruction
2324  const Register obj_reg  = rbx;  // Will contain the oop
2325  const Register lock_reg = r13;  // Address of compiler lock object (BasicLock)
2326  const Register old_hdr  = r13;  // value of old header at unlock time
2327
2328  Label slow_path_lock;
2329  Label lock_done;
2330
2331  if (method->is_synchronized()) {
2332    assert(!is_critical_native, "unhandled");
2333
2334
2335    const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
2336
2337    // Get the handle (the 2nd argument)
2338    __ mov(oop_handle_reg, c_rarg1);
2339
2340    // Get address of the box
2341
2342    __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2343
2344    // Load the oop from the handle
2345    __ movptr(obj_reg, Address(oop_handle_reg, 0));
2346
2347    if (UseBiasedLocking) {
2348      __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
2349    }
2350
2351    // Load immediate 1 into swap_reg %rax
2352    __ movl(swap_reg, 1);
2353
2354    // Load (object->mark() | 1) into swap_reg %rax
2355    __ orptr(swap_reg, Address(obj_reg, 0));
2356
2357    // Save (object->mark() | 1) into BasicLock's displaced header
2358    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2359
2360    if (os::is_MP()) {
2361      __ lock();
2362    }
2363
2364    // src -> dest iff dest == rax else rax <- dest
2365    __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
2366    __ jcc(Assembler::equal, lock_done);
2367
2368    // Hmm should this move to the slow path code area???
2369
2370    // Test if the oopMark is an obvious stack pointer, i.e.,
2371    //  1) (mark & 3) == 0, and
2372    //  2) rsp <= mark < mark + os::pagesize()
2373    // These 3 tests can be done by evaluating the following
2374    // expression: ((mark - rsp) & (3 - os::vm_page_size())),
2375    // assuming both stack pointer and pagesize have their
2376    // least significant 2 bits clear.
2377    // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
2378
2379    __ subptr(swap_reg, rsp);
2380    __ andptr(swap_reg, 3 - os::vm_page_size());
2381
2382    // Save the test result, for recursive case, the result is zero
2383    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
2384    __ jcc(Assembler::notEqual, slow_path_lock);
2385
2386    // Slow path will re-enter here
2387
2388    __ bind(lock_done);
2389  }
2390
2391
2392  // Finally just about ready to make the JNI call
2393
2394
2395  // get JNIEnv* which is first argument to native
2396  if (!is_critical_native) {
2397    __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
2398  }
2399
2400  // Now set thread in native
2401  __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
2402
2403  __ call(RuntimeAddress(native_func));
2404
2405  // Verify or restore cpu control state after JNI call
2406  __ restore_cpu_control_state_after_jni();
2407
2408  // Unpack native results.
2409  switch (ret_type) {
2410  case T_BOOLEAN: __ c2bool(rax);            break;
2411  case T_CHAR   : __ movzwl(rax, rax);      break;
2412  case T_BYTE   : __ sign_extend_byte (rax); break;
2413  case T_SHORT  : __ sign_extend_short(rax); break;
2414  case T_INT    : /* nothing to do */        break;
2415  case T_DOUBLE :
2416  case T_FLOAT  :
2417    // Result is in xmm0 we'll save as needed
2418    break;
2419  case T_ARRAY:                 // Really a handle
2420  case T_OBJECT:                // Really a handle
2421      break; // can't de-handlize until after safepoint check
2422  case T_VOID: break;
2423  case T_LONG: break;
2424  default       : ShouldNotReachHere();
2425  }
2426
2427  // Switch thread to "native transition" state before reading the synchronization state.
2428  // This additional state is necessary because reading and testing the synchronization
2429  // state is not atomic w.r.t. GC, as this scenario demonstrates:
2430  //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2431  //     VM thread changes sync state to synchronizing and suspends threads for GC.
2432  //     Thread A is resumed to finish this native method, but doesn't block here since it
2433  //     didn't see any synchronization is progress, and escapes.
2434  __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
2435
2436  if(os::is_MP()) {
2437    if (UseMembar) {
2438      // Force this write out before the read below
2439      __ membar(Assembler::Membar_mask_bits(
2440           Assembler::LoadLoad | Assembler::LoadStore |
2441           Assembler::StoreLoad | Assembler::StoreStore));
2442    } else {
2443      // Write serialization page so VM thread can do a pseudo remote membar.
2444      // We use the current thread pointer to calculate a thread specific
2445      // offset to write to within the page. This minimizes bus traffic
2446      // due to cache line collision.
2447      __ serialize_memory(r15_thread, rcx);
2448    }
2449  }
2450
2451  Label after_transition;
2452
2453  // check for safepoint operation in progress and/or pending suspend requests
2454  {
2455    Label Continue;
2456
2457    __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
2458             SafepointSynchronize::_not_synchronized);
2459
2460    Label L;
2461    __ jcc(Assembler::notEqual, L);
2462    __ cmpl(Address(r15_thread, JavaThread::suspend_flags_offset()), 0);
2463    __ jcc(Assembler::equal, Continue);
2464    __ bind(L);
2465
2466    // Don't use call_VM as it will see a possible pending exception and forward it
2467    // and never return here preventing us from clearing _last_native_pc down below.
2468    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
2469    // preserved and correspond to the bcp/locals pointers. So we do a runtime call
2470    // by hand.
2471    //
2472    save_native_result(masm, ret_type, stack_slots);
2473    __ mov(c_rarg0, r15_thread);
2474    __ mov(r12, rsp); // remember sp
2475    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2476    __ andptr(rsp, -16); // align stack as required by ABI
2477    if (!is_critical_native) {
2478      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
2479    } else {
2480      __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition)));
2481    }
2482    __ mov(rsp, r12); // restore sp
2483    __ reinit_heapbase();
2484    // Restore any method result value
2485    restore_native_result(masm, ret_type, stack_slots);
2486
2487    if (is_critical_native) {
2488      // The call above performed the transition to thread_in_Java so
2489      // skip the transition logic below.
2490      __ jmpb(after_transition);
2491    }
2492
2493    __ bind(Continue);
2494  }
2495
2496  // change thread state
2497  __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
2498  __ bind(after_transition);
2499
2500  Label reguard;
2501  Label reguard_done;
2502  __ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_reserved_disabled);
2503  __ jcc(Assembler::equal, reguard);
2504  __ bind(reguard_done);
2505
2506  // native result if any is live
2507
2508  // Unlock
2509  Label unlock_done;
2510  Label slow_path_unlock;
2511  if (method->is_synchronized()) {
2512
2513    // Get locked oop from the handle we passed to jni
2514    __ movptr(obj_reg, Address(oop_handle_reg, 0));
2515
2516    Label done;
2517
2518    if (UseBiasedLocking) {
2519      __ biased_locking_exit(obj_reg, old_hdr, done);
2520    }
2521
2522    // Simple recursive lock?
2523
2524    __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
2525    __ jcc(Assembler::equal, done);
2526
2527    // Must save rax if if it is live now because cmpxchg must use it
2528    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2529      save_native_result(masm, ret_type, stack_slots);
2530    }
2531
2532
2533    // get address of the stack lock
2534    __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2535    //  get old displaced header
2536    __ movptr(old_hdr, Address(rax, 0));
2537
2538    // Atomic swap old header if oop still contains the stack lock
2539    if (os::is_MP()) {
2540      __ lock();
2541    }
2542    __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
2543    __ jcc(Assembler::notEqual, slow_path_unlock);
2544
2545    // slow path re-enters here
2546    __ bind(unlock_done);
2547    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
2548      restore_native_result(masm, ret_type, stack_slots);
2549    }
2550
2551    __ bind(done);
2552
2553  }
2554  {
2555    SkipIfEqual skip(masm, &DTraceMethodProbes, false);
2556    save_native_result(masm, ret_type, stack_slots);
2557    __ mov_metadata(c_rarg1, method());
2558    __ call_VM_leaf(
2559         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2560         r15_thread, c_rarg1);
2561    restore_native_result(masm, ret_type, stack_slots);
2562  }
2563
2564  __ reset_last_Java_frame(false, true);
2565
2566  // Unpack oop result
2567  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2568      Label L;
2569      __ testptr(rax, rax);
2570      __ jcc(Assembler::zero, L);
2571      __ movptr(rax, Address(rax, 0));
2572      __ bind(L);
2573      __ verify_oop(rax);
2574  }
2575
2576  if (!is_critical_native) {
2577    // reset handle block
2578    __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
2579    __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
2580  }
2581
2582  // pop our frame
2583
2584  __ leave();
2585
2586  if (!is_critical_native) {
2587    // Any exception pending?
2588    __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2589    __ jcc(Assembler::notEqual, exception_pending);
2590  }
2591
2592  // Return
2593
2594  __ ret(0);
2595
2596  // Unexpected paths are out of line and go here
2597
2598  if (!is_critical_native) {
2599    // forward the exception
2600    __ bind(exception_pending);
2601
2602    // and forward the exception
2603    __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2604  }
2605
2606  // Slow path locking & unlocking
2607  if (method->is_synchronized()) {
2608
2609    // BEGIN Slow path lock
2610    __ bind(slow_path_lock);
2611
2612    // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
2613    // args are (oop obj, BasicLock* lock, JavaThread* thread)
2614
2615    // protect the args we've loaded
2616    save_args(masm, total_c_args, c_arg, out_regs);
2617
2618    __ mov(c_rarg0, obj_reg);
2619    __ mov(c_rarg1, lock_reg);
2620    __ mov(c_rarg2, r15_thread);
2621
2622    // Not a leaf but we have last_Java_frame setup as we want
2623    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
2624    restore_args(masm, total_c_args, c_arg, out_regs);
2625
2626#ifdef ASSERT
2627    { Label L;
2628    __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2629    __ jcc(Assembler::equal, L);
2630    __ stop("no pending exception allowed on exit from monitorenter");
2631    __ bind(L);
2632    }
2633#endif
2634    __ jmp(lock_done);
2635
2636    // END Slow path lock
2637
2638    // BEGIN Slow path unlock
2639    __ bind(slow_path_unlock);
2640
2641    // If we haven't already saved the native result we must save it now as xmm registers
2642    // are still exposed.
2643
2644    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2645      save_native_result(masm, ret_type, stack_slots);
2646    }
2647
2648    __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
2649
2650    __ mov(c_rarg0, obj_reg);
2651    __ mov(c_rarg2, r15_thread);
2652    __ mov(r12, rsp); // remember sp
2653    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2654    __ andptr(rsp, -16); // align stack as required by ABI
2655
2656    // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
2657    // NOTE that obj_reg == rbx currently
2658    __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
2659    __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
2660
2661    // args are (oop obj, BasicLock* lock, JavaThread* thread)
2662    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
2663    __ mov(rsp, r12); // restore sp
2664    __ reinit_heapbase();
2665#ifdef ASSERT
2666    {
2667      Label L;
2668      __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
2669      __ jcc(Assembler::equal, L);
2670      __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
2671      __ bind(L);
2672    }
2673#endif /* ASSERT */
2674
2675    __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
2676
2677    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
2678      restore_native_result(masm, ret_type, stack_slots);
2679    }
2680    __ jmp(unlock_done);
2681
2682    // END Slow path unlock
2683
2684  } // synchronized
2685
2686  // SLOW PATH Reguard the stack if needed
2687
2688  __ bind(reguard);
2689  save_native_result(masm, ret_type, stack_slots);
2690  __ mov(r12, rsp); // remember sp
2691  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
2692  __ andptr(rsp, -16); // align stack as required by ABI
2693  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
2694  __ mov(rsp, r12); // restore sp
2695  __ reinit_heapbase();
2696  restore_native_result(masm, ret_type, stack_slots);
2697  // and continue
2698  __ jmp(reguard_done);
2699
2700
2701
2702  __ flush();
2703
2704  nmethod *nm = nmethod::new_native_nmethod(method,
2705                                            compile_id,
2706                                            masm->code(),
2707                                            vep_offset,
2708                                            frame_complete,
2709                                            stack_slots / VMRegImpl::slots_per_word,
2710                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2711                                            in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
2712                                            oop_maps);
2713
2714  if (is_critical_native) {
2715    nm->set_lazy_critical_native(true);
2716  }
2717
2718  return nm;
2719
2720}
2721
2722// this function returns the adjust size (in number of words) to a c2i adapter
2723// activation for use during deoptimization
2724int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2725  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2726}
2727
2728
2729uint SharedRuntime::out_preserve_stack_slots() {
2730  return 0;
2731}
2732
2733//------------------------------generate_deopt_blob----------------------------
2734void SharedRuntime::generate_deopt_blob() {
2735  // Allocate space for the code
2736  ResourceMark rm;
2737  // Setup code generation tools
2738  int pad = 0;
2739#if INCLUDE_JVMCI
2740  if (EnableJVMCI) {
2741    pad += 512; // Increase the buffer size when compiling for JVMCI
2742  }
2743#endif
2744  CodeBuffer buffer("deopt_blob", 2048+pad, 1024);
2745  MacroAssembler* masm = new MacroAssembler(&buffer);
2746  int frame_size_in_words;
2747  OopMap* map = NULL;
2748  OopMapSet *oop_maps = new OopMapSet();
2749
2750  // -------------
2751  // This code enters when returning to a de-optimized nmethod.  A return
2752  // address has been pushed on the the stack, and return values are in
2753  // registers.
2754  // If we are doing a normal deopt then we were called from the patched
2755  // nmethod from the point we returned to the nmethod. So the return
2756  // address on the stack is wrong by NativeCall::instruction_size
2757  // We will adjust the value so it looks like we have the original return
2758  // address on the stack (like when we eagerly deoptimized).
2759  // In the case of an exception pending when deoptimizing, we enter
2760  // with a return address on the stack that points after the call we patched
2761  // into the exception handler. We have the following register state from,
2762  // e.g., the forward exception stub (see stubGenerator_x86_64.cpp).
2763  //    rax: exception oop
2764  //    rbx: exception handler
2765  //    rdx: throwing pc
2766  // So in this case we simply jam rdx into the useless return address and
2767  // the stack looks just like we want.
2768  //
2769  // At this point we need to de-opt.  We save the argument return
2770  // registers.  We call the first C routine, fetch_unroll_info().  This
2771  // routine captures the return values and returns a structure which
2772  // describes the current frame size and the sizes of all replacement frames.
2773  // The current frame is compiled code and may contain many inlined
2774  // functions, each with their own JVM state.  We pop the current frame, then
2775  // push all the new frames.  Then we call the C routine unpack_frames() to
2776  // populate these frames.  Finally unpack_frames() returns us the new target
2777  // address.  Notice that callee-save registers are BLOWN here; they have
2778  // already been captured in the vframeArray at the time the return PC was
2779  // patched.
2780  address start = __ pc();
2781  Label cont;
2782
2783  // Prolog for non exception case!
2784
2785  // Save everything in sight.
2786  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2787
2788  // Normal deoptimization.  Save exec mode for unpack_frames.
2789  __ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
2790  __ jmp(cont);
2791
2792  int reexecute_offset = __ pc() - start;
2793#if INCLUDE_JVMCI && !defined(COMPILER1)
2794  if (EnableJVMCI && UseJVMCICompiler) {
2795    // JVMCI does not use this kind of deoptimization
2796    __ should_not_reach_here();
2797  }
2798#endif
2799
2800  // Reexecute case
2801  // return address is the pc describes what bci to do re-execute at
2802
2803  // No need to update map as each call to save_live_registers will produce identical oopmap
2804  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2805
2806  __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
2807  __ jmp(cont);
2808
2809#if INCLUDE_JVMCI
2810  Label after_fetch_unroll_info_call;
2811  int implicit_exception_uncommon_trap_offset = 0;
2812  int uncommon_trap_offset = 0;
2813
2814  if (EnableJVMCI) {
2815    implicit_exception_uncommon_trap_offset = __ pc() - start;
2816
2817    __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2818    __ movptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())), (int32_t)NULL_WORD);
2819
2820    uncommon_trap_offset = __ pc() - start;
2821
2822    // Save everything in sight.
2823    RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2824    // fetch_unroll_info needs to call last_java_frame()
2825    __ set_last_Java_frame(noreg, noreg, NULL);
2826
2827    __ movl(c_rarg1, Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())));
2828    __ movl(Address(r15_thread, in_bytes(JavaThread::pending_deoptimization_offset())), -1);
2829
2830    __ movl(r14, (int32_t)Deoptimization::Unpack_reexecute);
2831    __ mov(c_rarg0, r15_thread);
2832    __ movl(c_rarg2, r14); // exec mode
2833    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2834    oop_maps->add_gc_map( __ pc()-start, map->deep_copy());
2835
2836    __ reset_last_Java_frame(false, false);
2837
2838    __ jmp(after_fetch_unroll_info_call);
2839  } // EnableJVMCI
2840#endif // INCLUDE_JVMCI
2841
2842  int exception_offset = __ pc() - start;
2843
2844  // Prolog for exception case
2845
2846  // all registers are dead at this entry point, except for rax, and
2847  // rdx which contain the exception oop and exception pc
2848  // respectively.  Set them in TLS and fall thru to the
2849  // unpack_with_exception_in_tls entry point.
2850
2851  __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
2852  __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
2853
2854  int exception_in_tls_offset = __ pc() - start;
2855
2856  // new implementation because exception oop is now passed in JavaThread
2857
2858  // Prolog for exception case
2859  // All registers must be preserved because they might be used by LinearScan
2860  // Exceptiop oop and throwing PC are passed in JavaThread
2861  // tos: stack at point of call to method that threw the exception (i.e. only
2862  // args are on the stack, no return address)
2863
2864  // make room on stack for the return address
2865  // It will be patched later with the throwing pc. The correct value is not
2866  // available now because loading it from memory would destroy registers.
2867  __ push(0);
2868
2869  // Save everything in sight.
2870  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
2871
2872  // Now it is safe to overwrite any register
2873
2874  // Deopt during an exception.  Save exec mode for unpack_frames.
2875  __ movl(r14, Deoptimization::Unpack_exception); // callee-saved
2876
2877  // load throwing pc from JavaThread and patch it as the return address
2878  // of the current frame. Then clear the field in JavaThread
2879
2880  __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2881  __ movptr(Address(rbp, wordSize), rdx);
2882  __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2883
2884#ifdef ASSERT
2885  // verify that there is really an exception oop in JavaThread
2886  __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2887  __ verify_oop(rax);
2888
2889  // verify that there is no pending exception
2890  Label no_pending_exception;
2891  __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
2892  __ testptr(rax, rax);
2893  __ jcc(Assembler::zero, no_pending_exception);
2894  __ stop("must not have pending exception here");
2895  __ bind(no_pending_exception);
2896#endif
2897
2898  __ bind(cont);
2899
2900  // Call C code.  Need thread and this frame, but NOT official VM entry
2901  // crud.  We cannot block on this call, no GC can happen.
2902  //
2903  // UnrollBlock* fetch_unroll_info(JavaThread* thread)
2904
2905  // fetch_unroll_info needs to call last_java_frame().
2906
2907  __ set_last_Java_frame(noreg, noreg, NULL);
2908#ifdef ASSERT
2909  { Label L;
2910    __ cmpptr(Address(r15_thread,
2911                    JavaThread::last_Java_fp_offset()),
2912            (int32_t)0);
2913    __ jcc(Assembler::equal, L);
2914    __ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
2915    __ bind(L);
2916  }
2917#endif // ASSERT
2918  __ mov(c_rarg0, r15_thread);
2919  __ movl(c_rarg1, r14); // exec_mode
2920  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2921
2922  // Need to have an oopmap that tells fetch_unroll_info where to
2923  // find any register it might need.
2924  oop_maps->add_gc_map(__ pc() - start, map);
2925
2926  __ reset_last_Java_frame(false, false);
2927
2928#if INCLUDE_JVMCI
2929  if (EnableJVMCI) {
2930    __ bind(after_fetch_unroll_info_call);
2931  }
2932#endif
2933
2934  // Load UnrollBlock* into rdi
2935  __ mov(rdi, rax);
2936
2937  __ movl(r14, Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()));
2938   Label noException;
2939  __ cmpl(r14, Deoptimization::Unpack_exception);   // Was exception pending?
2940  __ jcc(Assembler::notEqual, noException);
2941  __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
2942  // QQQ this is useless it was NULL above
2943  __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
2944  __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
2945  __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
2946
2947  __ verify_oop(rax);
2948
2949  // Overwrite the result registers with the exception results.
2950  __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
2951  // I think this is useless
2952  __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
2953
2954  __ bind(noException);
2955
2956  // Only register save data is on the stack.
2957  // Now restore the result registers.  Everything else is either dead
2958  // or captured in the vframeArray.
2959  RegisterSaver::restore_result_registers(masm);
2960
2961  // All of the register save area has been popped of the stack. Only the
2962  // return address remains.
2963
2964  // Pop all the frames we must move/replace.
2965  //
2966  // Frame picture (youngest to oldest)
2967  // 1: self-frame (no frame link)
2968  // 2: deopting frame  (no frame link)
2969  // 3: caller of deopting frame (could be compiled/interpreted).
2970  //
2971  // Note: by leaving the return address of self-frame on the stack
2972  // and using the size of frame 2 to adjust the stack
2973  // when we are done the return to frame 3 will still be on the stack.
2974
2975  // Pop deoptimized frame
2976  __ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2977  __ addptr(rsp, rcx);
2978
2979  // rsp should be pointing at the return address to the caller (3)
2980
2981  // Pick up the initial fp we should save
2982  // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
2983  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
2984
2985#ifdef ASSERT
2986  // Compilers generate code that bang the stack by as much as the
2987  // interpreter would need. So this stack banging should never
2988  // trigger a fault. Verify that it does not on non product builds.
2989  if (UseStackBanging) {
2990    __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2991    __ bang_stack_size(rbx, rcx);
2992  }
2993#endif
2994
2995  // Load address of array of frame pcs into rcx
2996  __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2997
2998  // Trash the old pc
2999  __ addptr(rsp, wordSize);
3000
3001  // Load address of array of frame sizes into rsi
3002  __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
3003
3004  // Load counter into rdx
3005  __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
3006
3007  // Now adjust the caller's stack to make up for the extra locals
3008  // but record the original sp so that we can save it in the skeletal interpreter
3009  // frame and the stack walking of interpreter_sender will get the unextended sp
3010  // value and not the "real" sp value.
3011
3012  const Register sender_sp = r8;
3013
3014  __ mov(sender_sp, rsp);
3015  __ movl(rbx, Address(rdi,
3016                       Deoptimization::UnrollBlock::
3017                       caller_adjustment_offset_in_bytes()));
3018  __ subptr(rsp, rbx);
3019
3020  // Push interpreter frames in a loop
3021  Label loop;
3022  __ bind(loop);
3023  __ movptr(rbx, Address(rsi, 0));      // Load frame size
3024  __ subptr(rbx, 2*wordSize);           // We'll push pc and ebp by hand
3025  __ pushptr(Address(rcx, 0));          // Save return address
3026  __ enter();                           // Save old & set new ebp
3027  __ subptr(rsp, rbx);                  // Prolog
3028  // This value is corrected by layout_activation_impl
3029  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3030  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
3031  __ mov(sender_sp, rsp);               // Pass sender_sp to next frame
3032  __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
3033  __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
3034  __ decrementl(rdx);                   // Decrement counter
3035  __ jcc(Assembler::notZero, loop);
3036  __ pushptr(Address(rcx, 0));          // Save final return address
3037
3038  // Re-push self-frame
3039  __ enter();                           // Save old & set new ebp
3040
3041  // Allocate a full sized register save area.
3042  // Return address and rbp are in place, so we allocate two less words.
3043  __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
3044
3045  // Restore frame locals after moving the frame
3046  __ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
3047  __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3048
3049  // Call C code.  Need thread but NOT official VM entry
3050  // crud.  We cannot block on this call, no GC can happen.  Call should
3051  // restore return values to their stack-slots with the new SP.
3052  //
3053  // void Deoptimization::unpack_frames(JavaThread* thread, int exec_mode)
3054
3055  // Use rbp because the frames look interpreted now
3056  // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3057  // Don't need the precise return PC here, just precise enough to point into this code blob.
3058  address the_pc = __ pc();
3059  __ set_last_Java_frame(noreg, rbp, the_pc);
3060
3061  __ andptr(rsp, -(StackAlignmentInBytes));  // Fix stack alignment as required by ABI
3062  __ mov(c_rarg0, r15_thread);
3063  __ movl(c_rarg1, r14); // second arg: exec_mode
3064  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3065  // Revert SP alignment after call since we're going to do some SP relative addressing below
3066  __ movptr(rsp, Address(r15_thread, JavaThread::last_Java_sp_offset()));
3067
3068  // Set an oopmap for the call site
3069  // Use the same PC we used for the last java frame
3070  oop_maps->add_gc_map(the_pc - start,
3071                       new OopMap( frame_size_in_words, 0 ));
3072
3073  // Clear fp AND pc
3074  __ reset_last_Java_frame(true, true);
3075
3076  // Collect return values
3077  __ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
3078  __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
3079  // I think this is useless (throwing pc?)
3080  __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
3081
3082  // Pop self-frame.
3083  __ leave();                           // Epilog
3084
3085  // Jump to interpreter
3086  __ ret(0);
3087
3088  // Make sure all code is generated
3089  masm->flush();
3090
3091  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
3092  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3093#if INCLUDE_JVMCI
3094  if (EnableJVMCI) {
3095    _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
3096    _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
3097  }
3098#endif
3099}
3100
3101#ifdef COMPILER2
3102//------------------------------generate_uncommon_trap_blob--------------------
3103void SharedRuntime::generate_uncommon_trap_blob() {
3104  // Allocate space for the code
3105  ResourceMark rm;
3106  // Setup code generation tools
3107  CodeBuffer buffer("uncommon_trap_blob", 2048, 1024);
3108  MacroAssembler* masm = new MacroAssembler(&buffer);
3109
3110  assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3111
3112  address start = __ pc();
3113
3114  if (UseRTMLocking) {
3115    // Abort RTM transaction before possible nmethod deoptimization.
3116    __ xabort(0);
3117  }
3118
3119  // Push self-frame.  We get here with a return address on the
3120  // stack, so rsp is 8-byte aligned until we allocate our frame.
3121  __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
3122
3123  // No callee saved registers. rbp is assumed implicitly saved
3124  __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3125
3126  // compiler left unloaded_class_index in j_rarg0 move to where the
3127  // runtime expects it.
3128  __ movl(c_rarg1, j_rarg0);
3129
3130  __ set_last_Java_frame(noreg, noreg, NULL);
3131
3132  // Call C code.  Need thread but NOT official VM entry
3133  // crud.  We cannot block on this call, no GC can happen.  Call should
3134  // capture callee-saved registers as well as return values.
3135  // Thread is in rdi already.
3136  //
3137  // UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
3138
3139  __ mov(c_rarg0, r15_thread);
3140  __ movl(c_rarg2, Deoptimization::Unpack_uncommon_trap);
3141  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
3142
3143  // Set an oopmap for the call site
3144  OopMapSet* oop_maps = new OopMapSet();
3145  OopMap* map = new OopMap(SimpleRuntimeFrame::framesize, 0);
3146
3147  // location of rbp is known implicitly by the frame sender code
3148
3149  oop_maps->add_gc_map(__ pc() - start, map);
3150
3151  __ reset_last_Java_frame(false, false);
3152
3153  // Load UnrollBlock* into rdi
3154  __ mov(rdi, rax);
3155
3156#ifdef ASSERT
3157  { Label L;
3158    __ cmpptr(Address(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes()),
3159            (int32_t)Deoptimization::Unpack_uncommon_trap);
3160    __ jcc(Assembler::equal, L);
3161    __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3162    __ bind(L);
3163  }
3164#endif
3165
3166  // Pop all the frames we must move/replace.
3167  //
3168  // Frame picture (youngest to oldest)
3169  // 1: self-frame (no frame link)
3170  // 2: deopting frame  (no frame link)
3171  // 3: caller of deopting frame (could be compiled/interpreted).
3172
3173  // Pop self-frame.  We have no frame, and must rely only on rax and rsp.
3174  __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
3175
3176  // Pop deoptimized frame (int)
3177  __ movl(rcx, Address(rdi,
3178                       Deoptimization::UnrollBlock::
3179                       size_of_deoptimized_frame_offset_in_bytes()));
3180  __ addptr(rsp, rcx);
3181
3182  // rsp should be pointing at the return address to the caller (3)
3183
3184  // Pick up the initial fp we should save
3185  // restore rbp before stack bang because if stack overflow is thrown it needs to be pushed (and preserved)
3186  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_info_offset_in_bytes()));
3187
3188#ifdef ASSERT
3189  // Compilers generate code that bang the stack by as much as the
3190  // interpreter would need. So this stack banging should never
3191  // trigger a fault. Verify that it does not on non product builds.
3192  if (UseStackBanging) {
3193    __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
3194    __ bang_stack_size(rbx, rcx);
3195  }
3196#endif
3197
3198  // Load address of array of frame pcs into rcx (address*)
3199  __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
3200
3201  // Trash the return pc
3202  __ addptr(rsp, wordSize);
3203
3204  // Load address of array of frame sizes into rsi (intptr_t*)
3205  __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock:: frame_sizes_offset_in_bytes()));
3206
3207  // Counter
3208  __ movl(rdx, Address(rdi, Deoptimization::UnrollBlock:: number_of_frames_offset_in_bytes())); // (int)
3209
3210  // Now adjust the caller's stack to make up for the extra locals but
3211  // record the original sp so that we can save it in the skeletal
3212  // interpreter frame and the stack walking of interpreter_sender
3213  // will get the unextended sp value and not the "real" sp value.
3214
3215  const Register sender_sp = r8;
3216
3217  __ mov(sender_sp, rsp);
3218  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock:: caller_adjustment_offset_in_bytes())); // (int)
3219  __ subptr(rsp, rbx);
3220
3221  // Push interpreter frames in a loop
3222  Label loop;
3223  __ bind(loop);
3224  __ movptr(rbx, Address(rsi, 0)); // Load frame size
3225  __ subptr(rbx, 2 * wordSize);    // We'll push pc and rbp by hand
3226  __ pushptr(Address(rcx, 0));     // Save return address
3227  __ enter();                      // Save old & set new rbp
3228  __ subptr(rsp, rbx);             // Prolog
3229  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
3230            sender_sp);            // Make it walkable
3231  // This value is corrected by layout_activation_impl
3232  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
3233  __ mov(sender_sp, rsp);          // Pass sender_sp to next frame
3234  __ addptr(rsi, wordSize);        // Bump array pointer (sizes)
3235  __ addptr(rcx, wordSize);        // Bump array pointer (pcs)
3236  __ decrementl(rdx);              // Decrement counter
3237  __ jcc(Assembler::notZero, loop);
3238  __ pushptr(Address(rcx, 0));     // Save final return address
3239
3240  // Re-push self-frame
3241  __ enter();                 // Save old & set new rbp
3242  __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
3243                              // Prolog
3244
3245  // Use rbp because the frames look interpreted now
3246  // Save "the_pc" since it cannot easily be retrieved using the last_java_SP after we aligned SP.
3247  // Don't need the precise return PC here, just precise enough to point into this code blob.
3248  address the_pc = __ pc();
3249  __ set_last_Java_frame(noreg, rbp, the_pc);
3250
3251  // Call C code.  Need thread but NOT official VM entry
3252  // crud.  We cannot block on this call, no GC can happen.  Call should
3253  // restore return values to their stack-slots with the new SP.
3254  // Thread is in rdi already.
3255  //
3256  // BasicType unpack_frames(JavaThread* thread, int exec_mode);
3257
3258  __ andptr(rsp, -(StackAlignmentInBytes)); // Align SP as required by ABI
3259  __ mov(c_rarg0, r15_thread);
3260  __ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
3261  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
3262
3263  // Set an oopmap for the call site
3264  // Use the same PC we used for the last java frame
3265  oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3266
3267  // Clear fp AND pc
3268  __ reset_last_Java_frame(true, true);
3269
3270  // Pop self-frame.
3271  __ leave();                 // Epilog
3272
3273  // Jump to interpreter
3274  __ ret(0);
3275
3276  // Make sure all code is generated
3277  masm->flush();
3278
3279  _uncommon_trap_blob =  UncommonTrapBlob::create(&buffer, oop_maps,
3280                                                 SimpleRuntimeFrame::framesize >> 1);
3281}
3282#endif // COMPILER2
3283
3284
3285//------------------------------generate_handler_blob------
3286//
3287// Generate a special Compile2Runtime blob that saves all registers,
3288// and setup oopmap.
3289//
3290SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3291  assert(StubRoutines::forward_exception_entry() != NULL,
3292         "must be generated before");
3293
3294  ResourceMark rm;
3295  OopMapSet *oop_maps = new OopMapSet();
3296  OopMap* map;
3297
3298  // Allocate space for the code.  Setup code generation tools.
3299  CodeBuffer buffer("handler_blob", 2048, 1024);
3300  MacroAssembler* masm = new MacroAssembler(&buffer);
3301
3302  address start   = __ pc();
3303  address call_pc = NULL;
3304  int frame_size_in_words;
3305  bool cause_return = (poll_type == POLL_AT_RETURN);
3306  bool save_vectors = (poll_type == POLL_AT_VECTOR_LOOP);
3307
3308  if (UseRTMLocking) {
3309    // Abort RTM transaction before calling runtime
3310    // because critical section will be large and will be
3311    // aborted anyway. Also nmethod could be deoptimized.
3312    __ xabort(0);
3313  }
3314
3315  // Make room for return address (or push it again)
3316  if (!cause_return) {
3317    __ push(rbx);
3318  }
3319
3320  // Save registers, fpu state, and flags
3321  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words, save_vectors);
3322
3323  // The following is basically a call_VM.  However, we need the precise
3324  // address of the call in order to generate an oopmap. Hence, we do all the
3325  // work outselves.
3326
3327  __ set_last_Java_frame(noreg, noreg, NULL);
3328
3329  // The return address must always be correct so that frame constructor never
3330  // sees an invalid pc.
3331
3332  if (!cause_return) {
3333    // overwrite the dummy value we pushed on entry
3334    __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
3335    __ movptr(Address(rbp, wordSize), c_rarg0);
3336  }
3337
3338  // Do the call
3339  __ mov(c_rarg0, r15_thread);
3340  __ call(RuntimeAddress(call_ptr));
3341
3342  // Set an oopmap for the call site.  This oopmap will map all
3343  // oop-registers and debug-info registers as callee-saved.  This
3344  // will allow deoptimization at this safepoint to find all possible
3345  // debug-info recordings, as well as let GC find all oops.
3346
3347  oop_maps->add_gc_map( __ pc() - start, map);
3348
3349  Label noException;
3350
3351  __ reset_last_Java_frame(false, false);
3352
3353  __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3354  __ jcc(Assembler::equal, noException);
3355
3356  // Exception pending
3357
3358  RegisterSaver::restore_live_registers(masm, save_vectors);
3359
3360  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3361
3362  // No exception case
3363  __ bind(noException);
3364
3365  // Normal exit, restore registers and exit.
3366  RegisterSaver::restore_live_registers(masm, save_vectors);
3367
3368  __ ret(0);
3369
3370  // Make sure all code is generated
3371  masm->flush();
3372
3373  // Fill-out other meta info
3374  return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
3375}
3376
3377//
3378// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3379//
3380// Generate a stub that calls into vm to find out the proper destination
3381// of a java call. All the argument registers are live at this point
3382// but since this is generic code we don't know what they are and the caller
3383// must do any gc of the args.
3384//
3385RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3386  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3387
3388  // allocate space for the code
3389  ResourceMark rm;
3390
3391  CodeBuffer buffer(name, 1000, 512);
3392  MacroAssembler* masm                = new MacroAssembler(&buffer);
3393
3394  int frame_size_in_words;
3395
3396  OopMapSet *oop_maps = new OopMapSet();
3397  OopMap* map = NULL;
3398
3399  int start = __ offset();
3400
3401  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
3402
3403  int frame_complete = __ offset();
3404
3405  __ set_last_Java_frame(noreg, noreg, NULL);
3406
3407  __ mov(c_rarg0, r15_thread);
3408
3409  __ call(RuntimeAddress(destination));
3410
3411
3412  // Set an oopmap for the call site.
3413  // We need this not only for callee-saved registers, but also for volatile
3414  // registers that the compiler might be keeping live across a safepoint.
3415
3416  oop_maps->add_gc_map( __ offset() - start, map);
3417
3418  // rax contains the address we are going to jump to assuming no exception got installed
3419
3420  // clear last_Java_sp
3421  __ reset_last_Java_frame(false, false);
3422  // check for pending exceptions
3423  Label pending;
3424  __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
3425  __ jcc(Assembler::notEqual, pending);
3426
3427  // get the returned Method*
3428  __ get_vm_result_2(rbx, r15_thread);
3429  __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
3430
3431  __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
3432
3433  RegisterSaver::restore_live_registers(masm);
3434
3435  // We are back the the original state on entry and ready to go.
3436
3437  __ jmp(rax);
3438
3439  // Pending exception after the safepoint
3440
3441  __ bind(pending);
3442
3443  RegisterSaver::restore_live_registers(masm);
3444
3445  // exception pending => remove activation and forward to exception handler
3446
3447  __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
3448
3449  __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
3450  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3451
3452  // -------------
3453  // make sure all code is generated
3454  masm->flush();
3455
3456  // return the  blob
3457  // frame_size_words or bytes??
3458  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_in_words, oop_maps, true);
3459}
3460
3461
3462//------------------------------Montgomery multiplication------------------------
3463//
3464
3465#ifndef _WINDOWS
3466
3467#define ASM_SUBTRACT
3468
3469#ifdef ASM_SUBTRACT
3470// Subtract 0:b from carry:a.  Return carry.
3471static unsigned long
3472sub(unsigned long a[], unsigned long b[], unsigned long carry, long len) {
3473  long i = 0, cnt = len;
3474  unsigned long tmp;
3475  asm volatile("clc; "
3476               "0: ; "
3477               "mov (%[b], %[i], 8), %[tmp]; "
3478               "sbb %[tmp], (%[a], %[i], 8); "
3479               "inc %[i]; dec %[cnt]; "
3480               "jne 0b; "
3481               "mov %[carry], %[tmp]; sbb $0, %[tmp]; "
3482               : [i]"+r"(i), [cnt]"+r"(cnt), [tmp]"=&r"(tmp)
3483               : [a]"r"(a), [b]"r"(b), [carry]"r"(carry)
3484               : "memory");
3485  return tmp;
3486}
3487#else // ASM_SUBTRACT
3488typedef int __attribute__((mode(TI))) int128;
3489
3490// Subtract 0:b from carry:a.  Return carry.
3491static unsigned long
3492sub(unsigned long a[], unsigned long b[], unsigned long carry, int len) {
3493  int128 tmp = 0;
3494  int i;
3495  for (i = 0; i < len; i++) {
3496    tmp += a[i];
3497    tmp -= b[i];
3498    a[i] = tmp;
3499    tmp >>= 64;
3500    assert(-1 <= tmp && tmp <= 0, "invariant");
3501  }
3502  return tmp + carry;
3503}
3504#endif // ! ASM_SUBTRACT
3505
3506// Multiply (unsigned) Long A by Long B, accumulating the double-
3507// length result into the accumulator formed of T0, T1, and T2.
3508#define MACC(A, B, T0, T1, T2)                                  \
3509do {                                                            \
3510  unsigned long hi, lo;                                         \
3511  __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4"   \
3512           : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3513           : "r"(A), "a"(B) : "cc");                            \
3514 } while(0)
3515
3516// As above, but add twice the double-length result into the
3517// accumulator.
3518#define MACC2(A, B, T0, T1, T2)                                 \
3519do {                                                            \
3520  unsigned long hi, lo;                                         \
3521  __asm__ ("mul %5; add %%rax, %2; adc %%rdx, %3; adc $0, %4; " \
3522           "add %%rax, %2; adc %%rdx, %3; adc $0, %4"           \
3523           : "=&d"(hi), "=a"(lo), "+r"(T0), "+r"(T1), "+g"(T2)  \
3524           : "r"(A), "a"(B) : "cc");                            \
3525 } while(0)
3526
3527// Fast Montgomery multiplication.  The derivation of the algorithm is
3528// in  A Cryptographic Library for the Motorola DSP56000,
3529// Dusse and Kaliski, Proc. EUROCRYPT 90, pp. 230-237.
3530
3531static void __attribute__((noinline))
3532montgomery_multiply(unsigned long a[], unsigned long b[], unsigned long n[],
3533                    unsigned long m[], unsigned long inv, int len) {
3534  unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3535  int i;
3536
3537  assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3538
3539  for (i = 0; i < len; i++) {
3540    int j;
3541    for (j = 0; j < i; j++) {
3542      MACC(a[j], b[i-j], t0, t1, t2);
3543      MACC(m[j], n[i-j], t0, t1, t2);
3544    }
3545    MACC(a[i], b[0], t0, t1, t2);
3546    m[i] = t0 * inv;
3547    MACC(m[i], n[0], t0, t1, t2);
3548
3549    assert(t0 == 0, "broken Montgomery multiply");
3550
3551    t0 = t1; t1 = t2; t2 = 0;
3552  }
3553
3554  for (i = len; i < 2*len; i++) {
3555    int j;
3556    for (j = i-len+1; j < len; j++) {
3557      MACC(a[j], b[i-j], t0, t1, t2);
3558      MACC(m[j], n[i-j], t0, t1, t2);
3559    }
3560    m[i-len] = t0;
3561    t0 = t1; t1 = t2; t2 = 0;
3562  }
3563
3564  while (t0)
3565    t0 = sub(m, n, t0, len);
3566}
3567
3568// Fast Montgomery squaring.  This uses asymptotically 25% fewer
3569// multiplies so it should be up to 25% faster than Montgomery
3570// multiplication.  However, its loop control is more complex and it
3571// may actually run slower on some machines.
3572
3573static void __attribute__((noinline))
3574montgomery_square(unsigned long a[], unsigned long n[],
3575                  unsigned long m[], unsigned long inv, int len) {
3576  unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
3577  int i;
3578
3579  assert(inv * n[0] == -1UL, "broken inverse in Montgomery multiply");
3580
3581  for (i = 0; i < len; i++) {
3582    int j;
3583    int end = (i+1)/2;
3584    for (j = 0; j < end; j++) {
3585      MACC2(a[j], a[i-j], t0, t1, t2);
3586      MACC(m[j], n[i-j], t0, t1, t2);
3587    }
3588    if ((i & 1) == 0) {
3589      MACC(a[j], a[j], t0, t1, t2);
3590    }
3591    for (; j < i; j++) {
3592      MACC(m[j], n[i-j], t0, t1, t2);
3593    }
3594    m[i] = t0 * inv;
3595    MACC(m[i], n[0], t0, t1, t2);
3596
3597    assert(t0 == 0, "broken Montgomery square");
3598
3599    t0 = t1; t1 = t2; t2 = 0;
3600  }
3601
3602  for (i = len; i < 2*len; i++) {
3603    int start = i-len+1;
3604    int end = start + (len - start)/2;
3605    int j;
3606    for (j = start; j < end; j++) {
3607      MACC2(a[j], a[i-j], t0, t1, t2);
3608      MACC(m[j], n[i-j], t0, t1, t2);
3609    }
3610    if ((i & 1) == 0) {
3611      MACC(a[j], a[j], t0, t1, t2);
3612    }
3613    for (; j < len; j++) {
3614      MACC(m[j], n[i-j], t0, t1, t2);
3615    }
3616    m[i-len] = t0;
3617    t0 = t1; t1 = t2; t2 = 0;
3618  }
3619
3620  while (t0)
3621    t0 = sub(m, n, t0, len);
3622}
3623
3624// Swap words in a longword.
3625static unsigned long swap(unsigned long x) {
3626  return (x << 32) | (x >> 32);
3627}
3628
3629// Copy len longwords from s to d, word-swapping as we go.  The
3630// destination array is reversed.
3631static void reverse_words(unsigned long *s, unsigned long *d, int len) {
3632  d += len;
3633  while(len-- > 0) {
3634    d--;
3635    *d = swap(*s);
3636    s++;
3637  }
3638}
3639
3640// The threshold at which squaring is advantageous was determined
3641// experimentally on an i7-3930K (Ivy Bridge) CPU @ 3.5GHz.
3642#define MONTGOMERY_SQUARING_THRESHOLD 64
3643
3644void SharedRuntime::montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints,
3645                                        jint len, jlong inv,
3646                                        jint *m_ints) {
3647  assert(len % 2 == 0, "array length in montgomery_multiply must be even");
3648  int longwords = len/2;
3649
3650  // Make very sure we don't use so much space that the stack might
3651  // overflow.  512 jints corresponds to an 16384-bit integer and
3652  // will use here a total of 8k bytes of stack space.
3653  int total_allocation = longwords * sizeof (unsigned long) * 4;
3654  guarantee(total_allocation <= 8192, "must be");
3655  unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3656
3657  // Local scratch arrays
3658  unsigned long
3659    *a = scratch + 0 * longwords,
3660    *b = scratch + 1 * longwords,
3661    *n = scratch + 2 * longwords,
3662    *m = scratch + 3 * longwords;
3663
3664  reverse_words((unsigned long *)a_ints, a, longwords);
3665  reverse_words((unsigned long *)b_ints, b, longwords);
3666  reverse_words((unsigned long *)n_ints, n, longwords);
3667
3668  ::montgomery_multiply(a, b, n, m, (unsigned long)inv, longwords);
3669
3670  reverse_words(m, (unsigned long *)m_ints, longwords);
3671}
3672
3673void SharedRuntime::montgomery_square(jint *a_ints, jint *n_ints,
3674                                      jint len, jlong inv,
3675                                      jint *m_ints) {
3676  assert(len % 2 == 0, "array length in montgomery_square must be even");
3677  int longwords = len/2;
3678
3679  // Make very sure we don't use so much space that the stack might
3680  // overflow.  512 jints corresponds to an 16384-bit integer and
3681  // will use here a total of 6k bytes of stack space.
3682  int total_allocation = longwords * sizeof (unsigned long) * 3;
3683  guarantee(total_allocation <= 8192, "must be");
3684  unsigned long *scratch = (unsigned long *)alloca(total_allocation);
3685
3686  // Local scratch arrays
3687  unsigned long
3688    *a = scratch + 0 * longwords,
3689    *n = scratch + 1 * longwords,
3690    *m = scratch + 2 * longwords;
3691
3692  reverse_words((unsigned long *)a_ints, a, longwords);
3693  reverse_words((unsigned long *)n_ints, n, longwords);
3694
3695  if (len >= MONTGOMERY_SQUARING_THRESHOLD) {
3696    ::montgomery_square(a, n, m, (unsigned long)inv, longwords);
3697  } else {
3698    ::montgomery_multiply(a, a, n, m, (unsigned long)inv, longwords);
3699  }
3700
3701  reverse_words(m, (unsigned long *)m_ints, longwords);
3702}
3703
3704#endif // WINDOWS
3705
3706#ifdef COMPILER2
3707// This is here instead of runtime_x86_64.cpp because it uses SimpleRuntimeFrame
3708//
3709//------------------------------generate_exception_blob---------------------------
3710// creates exception blob at the end
3711// Using exception blob, this code is jumped from a compiled method.
3712// (see emit_exception_handler in x86_64.ad file)
3713//
3714// Given an exception pc at a call we call into the runtime for the
3715// handler in this method. This handler might merely restore state
3716// (i.e. callee save registers) unwind the frame and jump to the
3717// exception handler for the nmethod if there is no Java level handler
3718// for the nmethod.
3719//
3720// This code is entered with a jmp.
3721//
3722// Arguments:
3723//   rax: exception oop
3724//   rdx: exception pc
3725//
3726// Results:
3727//   rax: exception oop
3728//   rdx: exception pc in caller or ???
3729//   destination: exception handler of caller
3730//
3731// Note: the exception pc MUST be at a call (precise debug information)
3732//       Registers rax, rdx, rcx, rsi, rdi, r8-r11 are not callee saved.
3733//
3734
3735void OptoRuntime::generate_exception_blob() {
3736  assert(!OptoRuntime::is_callee_saved_register(RDX_num), "");
3737  assert(!OptoRuntime::is_callee_saved_register(RAX_num), "");
3738  assert(!OptoRuntime::is_callee_saved_register(RCX_num), "");
3739
3740  assert(SimpleRuntimeFrame::framesize % 4 == 0, "sp not 16-byte aligned");
3741
3742  // Allocate space for the code
3743  ResourceMark rm;
3744  // Setup code generation tools
3745  CodeBuffer buffer("exception_blob", 2048, 1024);
3746  MacroAssembler* masm = new MacroAssembler(&buffer);
3747
3748
3749  address start = __ pc();
3750
3751  // Exception pc is 'return address' for stack walker
3752  __ push(rdx);
3753  __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
3754
3755  // Save callee-saved registers.  See x86_64.ad.
3756
3757  // rbp is an implicitly saved callee saved register (i.e., the calling
3758  // convention will save/restore it in the prolog/epilog). Other than that
3759  // there are no callee save registers now that adapter frames are gone.
3760
3761  __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
3762
3763  // Store exception in Thread object. We cannot pass any arguments to the
3764  // handle_exception call, since we do not want to make any assumption
3765  // about the size of the frame where the exception happened in.
3766  // c_rarg0 is either rdi (Linux) or rcx (Windows).
3767  __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
3768  __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
3769
3770  // This call does all the hard work.  It checks if an exception handler
3771  // exists in the method.
3772  // If so, it returns the handler address.
3773  // If not, it prepares for stack-unwinding, restoring the callee-save
3774  // registers of the frame being removed.
3775  //
3776  // address OptoRuntime::handle_exception_C(JavaThread* thread)
3777
3778  // At a method handle call, the stack may not be properly aligned
3779  // when returning with an exception.
3780  address the_pc = __ pc();
3781  __ set_last_Java_frame(noreg, noreg, the_pc);
3782  __ mov(c_rarg0, r15_thread);
3783  __ andptr(rsp, -(StackAlignmentInBytes));    // Align stack
3784  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
3785
3786  // Set an oopmap for the call site.  This oopmap will only be used if we
3787  // are unwinding the stack.  Hence, all locations will be dead.
3788  // Callee-saved registers will be the same as the frame above (i.e.,
3789  // handle_exception_stub), since they were restored when we got the
3790  // exception.
3791
3792  OopMapSet* oop_maps = new OopMapSet();
3793
3794  oop_maps->add_gc_map(the_pc - start, new OopMap(SimpleRuntimeFrame::framesize, 0));
3795
3796  __ reset_last_Java_frame(false, true);
3797
3798  // Restore callee-saved registers
3799
3800  // rbp is an implicitly saved callee-saved register (i.e., the calling
3801  // convention will save restore it in prolog/epilog) Other than that
3802  // there are no callee save registers now that adapter frames are gone.
3803
3804  __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
3805
3806  __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
3807  __ pop(rdx);                  // No need for exception pc anymore
3808
3809  // rax: exception handler
3810
3811  // We have a handler in rax (could be deopt blob).
3812  __ mov(r8, rax);
3813
3814  // Get the exception oop
3815  __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
3816  // Get the exception pc in case we are deoptimized
3817  __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
3818#ifdef ASSERT
3819  __ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
3820  __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
3821#endif
3822  // Clear the exception oop so GC no longer processes it as a root.
3823  __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int)NULL_WORD);
3824
3825  // rax: exception oop
3826  // r8:  exception handler
3827  // rdx: exception pc
3828  // Jump to handler
3829
3830  __ jmp(r8);
3831
3832  // Make sure all code is generated
3833  masm->flush();
3834
3835  // Set exception blob
3836  _exception_blob =  ExceptionBlob::create(&buffer, oop_maps, SimpleRuntimeFrame::framesize >> 1);
3837}
3838#endif // COMPILER2
3839