sharedRuntime_x86_32.cpp revision 1879:f95d63e2154a
1/*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/assembler.hpp"
27#include "assembler_x86.inline.hpp"
28#include "code/debugInfoRec.hpp"
29#include "code/icBuffer.hpp"
30#include "code/vtableStubs.hpp"
31#include "interpreter/interpreter.hpp"
32#include "oops/compiledICHolderOop.hpp"
33#include "prims/jvmtiRedefineClassesTrace.hpp"
34#include "runtime/sharedRuntime.hpp"
35#include "runtime/vframeArray.hpp"
36#include "vmreg_x86.inline.hpp"
37#ifdef COMPILER1
38#include "c1/c1_Runtime1.hpp"
39#endif
40#ifdef COMPILER2
41#include "opto/runtime.hpp"
42#endif
43
44#define __ masm->
45#ifdef COMPILER2
46UncommonTrapBlob   *SharedRuntime::_uncommon_trap_blob;
47#endif // COMPILER2
48
49DeoptimizationBlob *SharedRuntime::_deopt_blob;
50SafepointBlob      *SharedRuntime::_polling_page_safepoint_handler_blob;
51SafepointBlob      *SharedRuntime::_polling_page_return_handler_blob;
52RuntimeStub*       SharedRuntime::_wrong_method_blob;
53RuntimeStub*       SharedRuntime::_ic_miss_blob;
54RuntimeStub*       SharedRuntime::_resolve_opt_virtual_call_blob;
55RuntimeStub*       SharedRuntime::_resolve_virtual_call_blob;
56RuntimeStub*       SharedRuntime::_resolve_static_call_blob;
57
58const int StackAlignmentInSlots = StackAlignmentInBytes / VMRegImpl::stack_slot_size;
59
60class RegisterSaver {
61  enum { FPU_regs_live = 8 /*for the FPU stack*/+8/*eight more for XMM registers*/ };
62  // Capture info about frame layout
63  enum layout {
64                fpu_state_off = 0,
65                fpu_state_end = fpu_state_off+FPUStateSizeInWords-1,
66                st0_off, st0H_off,
67                st1_off, st1H_off,
68                st2_off, st2H_off,
69                st3_off, st3H_off,
70                st4_off, st4H_off,
71                st5_off, st5H_off,
72                st6_off, st6H_off,
73                st7_off, st7H_off,
74
75                xmm0_off, xmm0H_off,
76                xmm1_off, xmm1H_off,
77                xmm2_off, xmm2H_off,
78                xmm3_off, xmm3H_off,
79                xmm4_off, xmm4H_off,
80                xmm5_off, xmm5H_off,
81                xmm6_off, xmm6H_off,
82                xmm7_off, xmm7H_off,
83                flags_off,
84                rdi_off,
85                rsi_off,
86                ignore_off,  // extra copy of rbp,
87                rsp_off,
88                rbx_off,
89                rdx_off,
90                rcx_off,
91                rax_off,
92                // The frame sender code expects that rbp will be in the "natural" place and
93                // will override any oopMap setting for it. We must therefore force the layout
94                // so that it agrees with the frame sender code.
95                rbp_off,
96                return_off,      // slot for return address
97                reg_save_size };
98
99
100  public:
101
102  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words,
103                                     int* total_frame_words, bool verify_fpu = true);
104  static void restore_live_registers(MacroAssembler* masm);
105
106  static int rax_offset() { return rax_off; }
107  static int rbx_offset() { return rbx_off; }
108
109  // Offsets into the register save area
110  // Used by deoptimization when it is managing result register
111  // values on its own
112
113  static int raxOffset(void) { return rax_off; }
114  static int rdxOffset(void) { return rdx_off; }
115  static int rbxOffset(void) { return rbx_off; }
116  static int xmm0Offset(void) { return xmm0_off; }
117  // This really returns a slot in the fp save area, which one is not important
118  static int fpResultOffset(void) { return st0_off; }
119
120  // During deoptimization only the result register need to be restored
121  // all the other values have already been extracted.
122
123  static void restore_result_registers(MacroAssembler* masm);
124
125};
126
127OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words,
128                                           int* total_frame_words, bool verify_fpu) {
129
130  int frame_size_in_bytes =  (reg_save_size + additional_frame_words) * wordSize;
131  int frame_words = frame_size_in_bytes / wordSize;
132  *total_frame_words = frame_words;
133
134  assert(FPUStateSizeInWords == 27, "update stack layout");
135
136  // save registers, fpu state, and flags
137  // We assume caller has already has return address slot on the stack
138  // We push epb twice in this sequence because we want the real rbp,
139  // to be under the return like a normal enter and we want to use pusha
140  // We push by hand instead of pusing push
141  __ enter();
142  __ pusha();
143  __ pushf();
144  __ subptr(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space
145  __ push_FPU_state();          // Save FPU state & init
146
147  if (verify_fpu) {
148    // Some stubs may have non standard FPU control word settings so
149    // only check and reset the value when it required to be the
150    // standard value.  The safepoint blob in particular can be used
151    // in methods which are using the 24 bit control word for
152    // optimized float math.
153
154#ifdef ASSERT
155    // Make sure the control word has the expected value
156    Label ok;
157    __ cmpw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
158    __ jccb(Assembler::equal, ok);
159    __ stop("corrupted control word detected");
160    __ bind(ok);
161#endif
162
163    // Reset the control word to guard against exceptions being unmasked
164    // since fstp_d can cause FPU stack underflow exceptions.  Write it
165    // into the on stack copy and then reload that to make sure that the
166    // current and future values are correct.
167    __ movw(Address(rsp, 0), StubRoutines::fpu_cntrl_wrd_std());
168  }
169
170  __ frstor(Address(rsp, 0));
171  if (!verify_fpu) {
172    // Set the control word so that exceptions are masked for the
173    // following code.
174    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
175  }
176
177  // Save the FPU registers in de-opt-able form
178
179  __ fstp_d(Address(rsp, st0_off*wordSize)); // st(0)
180  __ fstp_d(Address(rsp, st1_off*wordSize)); // st(1)
181  __ fstp_d(Address(rsp, st2_off*wordSize)); // st(2)
182  __ fstp_d(Address(rsp, st3_off*wordSize)); // st(3)
183  __ fstp_d(Address(rsp, st4_off*wordSize)); // st(4)
184  __ fstp_d(Address(rsp, st5_off*wordSize)); // st(5)
185  __ fstp_d(Address(rsp, st6_off*wordSize)); // st(6)
186  __ fstp_d(Address(rsp, st7_off*wordSize)); // st(7)
187
188  if( UseSSE == 1 ) {           // Save the XMM state
189    __ movflt(Address(rsp,xmm0_off*wordSize),xmm0);
190    __ movflt(Address(rsp,xmm1_off*wordSize),xmm1);
191    __ movflt(Address(rsp,xmm2_off*wordSize),xmm2);
192    __ movflt(Address(rsp,xmm3_off*wordSize),xmm3);
193    __ movflt(Address(rsp,xmm4_off*wordSize),xmm4);
194    __ movflt(Address(rsp,xmm5_off*wordSize),xmm5);
195    __ movflt(Address(rsp,xmm6_off*wordSize),xmm6);
196    __ movflt(Address(rsp,xmm7_off*wordSize),xmm7);
197  } else if( UseSSE >= 2 ) {
198    __ movdbl(Address(rsp,xmm0_off*wordSize),xmm0);
199    __ movdbl(Address(rsp,xmm1_off*wordSize),xmm1);
200    __ movdbl(Address(rsp,xmm2_off*wordSize),xmm2);
201    __ movdbl(Address(rsp,xmm3_off*wordSize),xmm3);
202    __ movdbl(Address(rsp,xmm4_off*wordSize),xmm4);
203    __ movdbl(Address(rsp,xmm5_off*wordSize),xmm5);
204    __ movdbl(Address(rsp,xmm6_off*wordSize),xmm6);
205    __ movdbl(Address(rsp,xmm7_off*wordSize),xmm7);
206  }
207
208  // Set an oopmap for the call site.  This oopmap will map all
209  // oop-registers and debug-info registers as callee-saved.  This
210  // will allow deoptimization at this safepoint to find all possible
211  // debug-info recordings, as well as let GC find all oops.
212
213  OopMapSet *oop_maps = new OopMapSet();
214  OopMap* map =  new OopMap( frame_words, 0 );
215
216#define STACK_OFFSET(x) VMRegImpl::stack2reg((x) + additional_frame_words)
217
218  map->set_callee_saved(STACK_OFFSET( rax_off), rax->as_VMReg());
219  map->set_callee_saved(STACK_OFFSET( rcx_off), rcx->as_VMReg());
220  map->set_callee_saved(STACK_OFFSET( rdx_off), rdx->as_VMReg());
221  map->set_callee_saved(STACK_OFFSET( rbx_off), rbx->as_VMReg());
222  // rbp, location is known implicitly, no oopMap
223  map->set_callee_saved(STACK_OFFSET( rsi_off), rsi->as_VMReg());
224  map->set_callee_saved(STACK_OFFSET( rdi_off), rdi->as_VMReg());
225  map->set_callee_saved(STACK_OFFSET(st0_off), as_FloatRegister(0)->as_VMReg());
226  map->set_callee_saved(STACK_OFFSET(st1_off), as_FloatRegister(1)->as_VMReg());
227  map->set_callee_saved(STACK_OFFSET(st2_off), as_FloatRegister(2)->as_VMReg());
228  map->set_callee_saved(STACK_OFFSET(st3_off), as_FloatRegister(3)->as_VMReg());
229  map->set_callee_saved(STACK_OFFSET(st4_off), as_FloatRegister(4)->as_VMReg());
230  map->set_callee_saved(STACK_OFFSET(st5_off), as_FloatRegister(5)->as_VMReg());
231  map->set_callee_saved(STACK_OFFSET(st6_off), as_FloatRegister(6)->as_VMReg());
232  map->set_callee_saved(STACK_OFFSET(st7_off), as_FloatRegister(7)->as_VMReg());
233  map->set_callee_saved(STACK_OFFSET(xmm0_off), xmm0->as_VMReg());
234  map->set_callee_saved(STACK_OFFSET(xmm1_off), xmm1->as_VMReg());
235  map->set_callee_saved(STACK_OFFSET(xmm2_off), xmm2->as_VMReg());
236  map->set_callee_saved(STACK_OFFSET(xmm3_off), xmm3->as_VMReg());
237  map->set_callee_saved(STACK_OFFSET(xmm4_off), xmm4->as_VMReg());
238  map->set_callee_saved(STACK_OFFSET(xmm5_off), xmm5->as_VMReg());
239  map->set_callee_saved(STACK_OFFSET(xmm6_off), xmm6->as_VMReg());
240  map->set_callee_saved(STACK_OFFSET(xmm7_off), xmm7->as_VMReg());
241  // %%% This is really a waste but we'll keep things as they were for now
242  if (true) {
243#define NEXTREG(x) (x)->as_VMReg()->next()
244    map->set_callee_saved(STACK_OFFSET(st0H_off), NEXTREG(as_FloatRegister(0)));
245    map->set_callee_saved(STACK_OFFSET(st1H_off), NEXTREG(as_FloatRegister(1)));
246    map->set_callee_saved(STACK_OFFSET(st2H_off), NEXTREG(as_FloatRegister(2)));
247    map->set_callee_saved(STACK_OFFSET(st3H_off), NEXTREG(as_FloatRegister(3)));
248    map->set_callee_saved(STACK_OFFSET(st4H_off), NEXTREG(as_FloatRegister(4)));
249    map->set_callee_saved(STACK_OFFSET(st5H_off), NEXTREG(as_FloatRegister(5)));
250    map->set_callee_saved(STACK_OFFSET(st6H_off), NEXTREG(as_FloatRegister(6)));
251    map->set_callee_saved(STACK_OFFSET(st7H_off), NEXTREG(as_FloatRegister(7)));
252    map->set_callee_saved(STACK_OFFSET(xmm0H_off), NEXTREG(xmm0));
253    map->set_callee_saved(STACK_OFFSET(xmm1H_off), NEXTREG(xmm1));
254    map->set_callee_saved(STACK_OFFSET(xmm2H_off), NEXTREG(xmm2));
255    map->set_callee_saved(STACK_OFFSET(xmm3H_off), NEXTREG(xmm3));
256    map->set_callee_saved(STACK_OFFSET(xmm4H_off), NEXTREG(xmm4));
257    map->set_callee_saved(STACK_OFFSET(xmm5H_off), NEXTREG(xmm5));
258    map->set_callee_saved(STACK_OFFSET(xmm6H_off), NEXTREG(xmm6));
259    map->set_callee_saved(STACK_OFFSET(xmm7H_off), NEXTREG(xmm7));
260#undef NEXTREG
261#undef STACK_OFFSET
262  }
263
264  return map;
265
266}
267
268void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
269
270  // Recover XMM & FPU state
271  if( UseSSE == 1 ) {
272    __ movflt(xmm0,Address(rsp,xmm0_off*wordSize));
273    __ movflt(xmm1,Address(rsp,xmm1_off*wordSize));
274    __ movflt(xmm2,Address(rsp,xmm2_off*wordSize));
275    __ movflt(xmm3,Address(rsp,xmm3_off*wordSize));
276    __ movflt(xmm4,Address(rsp,xmm4_off*wordSize));
277    __ movflt(xmm5,Address(rsp,xmm5_off*wordSize));
278    __ movflt(xmm6,Address(rsp,xmm6_off*wordSize));
279    __ movflt(xmm7,Address(rsp,xmm7_off*wordSize));
280  } else if( UseSSE >= 2 ) {
281    __ movdbl(xmm0,Address(rsp,xmm0_off*wordSize));
282    __ movdbl(xmm1,Address(rsp,xmm1_off*wordSize));
283    __ movdbl(xmm2,Address(rsp,xmm2_off*wordSize));
284    __ movdbl(xmm3,Address(rsp,xmm3_off*wordSize));
285    __ movdbl(xmm4,Address(rsp,xmm4_off*wordSize));
286    __ movdbl(xmm5,Address(rsp,xmm5_off*wordSize));
287    __ movdbl(xmm6,Address(rsp,xmm6_off*wordSize));
288    __ movdbl(xmm7,Address(rsp,xmm7_off*wordSize));
289  }
290  __ pop_FPU_state();
291  __ addptr(rsp, FPU_regs_live*sizeof(jdouble)); // Pop FPU registers
292
293  __ popf();
294  __ popa();
295  // Get the rbp, described implicitly by the frame sender code (no oopMap)
296  __ pop(rbp);
297
298}
299
300void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
301
302  // Just restore result register. Only used by deoptimization. By
303  // now any callee save register that needs to be restore to a c2
304  // caller of the deoptee has been extracted into the vframeArray
305  // and will be stuffed into the c2i adapter we create for later
306  // restoration so only result registers need to be restored here.
307  //
308
309  __ frstor(Address(rsp, 0));      // Restore fpu state
310
311  // Recover XMM & FPU state
312  if( UseSSE == 1 ) {
313    __ movflt(xmm0, Address(rsp, xmm0_off*wordSize));
314  } else if( UseSSE >= 2 ) {
315    __ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
316  }
317  __ movptr(rax, Address(rsp, rax_off*wordSize));
318  __ movptr(rdx, Address(rsp, rdx_off*wordSize));
319  // Pop all of the register save are off the stack except the return address
320  __ addptr(rsp, return_off * wordSize);
321}
322
323// The java_calling_convention describes stack locations as ideal slots on
324// a frame with no abi restrictions. Since we must observe abi restrictions
325// (like the placement of the register window) the slots must be biased by
326// the following value.
327static int reg2offset_in(VMReg r) {
328  // Account for saved rbp, and return address
329  // This should really be in_preserve_stack_slots
330  return (r->reg2stack() + 2) * VMRegImpl::stack_slot_size;
331}
332
333static int reg2offset_out(VMReg r) {
334  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
335}
336
337// ---------------------------------------------------------------------------
338// Read the array of BasicTypes from a signature, and compute where the
339// arguments should go.  Values in the VMRegPair regs array refer to 4-byte
340// quantities.  Values less than SharedInfo::stack0 are registers, those above
341// refer to 4-byte stack slots.  All stack slots are based off of the stack pointer
342// as framesizes are fixed.
343// VMRegImpl::stack0 refers to the first slot 0(sp).
344// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
345// up to RegisterImpl::number_of_registers) are the 32-bit
346// integer registers.
347
348// Pass first two oop/int args in registers ECX and EDX.
349// Pass first two float/double args in registers XMM0 and XMM1.
350// Doubles have precedence, so if you pass a mix of floats and doubles
351// the doubles will grab the registers before the floats will.
352
353// Note: the INPUTS in sig_bt are in units of Java argument words, which are
354// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
355// units regardless of build. Of course for i486 there is no 64 bit build
356
357
358// ---------------------------------------------------------------------------
359// The compiled Java calling convention.
360// Pass first two oop/int args in registers ECX and EDX.
361// Pass first two float/double args in registers XMM0 and XMM1.
362// Doubles have precedence, so if you pass a mix of floats and doubles
363// the doubles will grab the registers before the floats will.
364int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
365                                           VMRegPair *regs,
366                                           int total_args_passed,
367                                           int is_outgoing) {
368  uint    stack = 0;          // Starting stack position for args on stack
369
370
371  // Pass first two oop/int args in registers ECX and EDX.
372  uint reg_arg0 = 9999;
373  uint reg_arg1 = 9999;
374
375  // Pass first two float/double args in registers XMM0 and XMM1.
376  // Doubles have precedence, so if you pass a mix of floats and doubles
377  // the doubles will grab the registers before the floats will.
378  // CNC - TURNED OFF FOR non-SSE.
379  //       On Intel we have to round all doubles (and most floats) at
380  //       call sites by storing to the stack in any case.
381  // UseSSE=0 ==> Don't Use ==> 9999+0
382  // UseSSE=1 ==> Floats only ==> 9999+1
383  // UseSSE>=2 ==> Floats or doubles ==> 9999+2
384  enum { fltarg_dontuse = 9999+0, fltarg_float_only = 9999+1, fltarg_flt_dbl = 9999+2 };
385  uint fargs = (UseSSE>=2) ? 2 : UseSSE;
386  uint freg_arg0 = 9999+fargs;
387  uint freg_arg1 = 9999+fargs;
388
389  // Pass doubles & longs aligned on the stack.  First count stack slots for doubles
390  int i;
391  for( i = 0; i < total_args_passed; i++) {
392    if( sig_bt[i] == T_DOUBLE ) {
393      // first 2 doubles go in registers
394      if( freg_arg0 == fltarg_flt_dbl ) freg_arg0 = i;
395      else if( freg_arg1 == fltarg_flt_dbl ) freg_arg1 = i;
396      else // Else double is passed low on the stack to be aligned.
397        stack += 2;
398    } else if( sig_bt[i] == T_LONG ) {
399      stack += 2;
400    }
401  }
402  int dstack = 0;             // Separate counter for placing doubles
403
404  // Now pick where all else goes.
405  for( i = 0; i < total_args_passed; i++) {
406    // From the type and the argument number (count) compute the location
407    switch( sig_bt[i] ) {
408    case T_SHORT:
409    case T_CHAR:
410    case T_BYTE:
411    case T_BOOLEAN:
412    case T_INT:
413    case T_ARRAY:
414    case T_OBJECT:
415    case T_ADDRESS:
416      if( reg_arg0 == 9999 )  {
417        reg_arg0 = i;
418        regs[i].set1(rcx->as_VMReg());
419      } else if( reg_arg1 == 9999 )  {
420        reg_arg1 = i;
421        regs[i].set1(rdx->as_VMReg());
422      } else {
423        regs[i].set1(VMRegImpl::stack2reg(stack++));
424      }
425      break;
426    case T_FLOAT:
427      if( freg_arg0 == fltarg_flt_dbl || freg_arg0 == fltarg_float_only ) {
428        freg_arg0 = i;
429        regs[i].set1(xmm0->as_VMReg());
430      } else if( freg_arg1 == fltarg_flt_dbl || freg_arg1 == fltarg_float_only ) {
431        freg_arg1 = i;
432        regs[i].set1(xmm1->as_VMReg());
433      } else {
434        regs[i].set1(VMRegImpl::stack2reg(stack++));
435      }
436      break;
437    case T_LONG:
438      assert(sig_bt[i+1] == T_VOID, "missing Half" );
439      regs[i].set2(VMRegImpl::stack2reg(dstack));
440      dstack += 2;
441      break;
442    case T_DOUBLE:
443      assert(sig_bt[i+1] == T_VOID, "missing Half" );
444      if( freg_arg0 == (uint)i ) {
445        regs[i].set2(xmm0->as_VMReg());
446      } else if( freg_arg1 == (uint)i ) {
447        regs[i].set2(xmm1->as_VMReg());
448      } else {
449        regs[i].set2(VMRegImpl::stack2reg(dstack));
450        dstack += 2;
451      }
452      break;
453    case T_VOID: regs[i].set_bad(); break;
454      break;
455    default:
456      ShouldNotReachHere();
457      break;
458    }
459  }
460
461  // return value can be odd number of VMRegImpl stack slots make multiple of 2
462  return round_to(stack, 2);
463}
464
465// Patch the callers callsite with entry to compiled code if it exists.
466static void patch_callers_callsite(MacroAssembler *masm) {
467  Label L;
468  __ verify_oop(rbx);
469  __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
470  __ jcc(Assembler::equal, L);
471  // Schedule the branch target address early.
472  // Call into the VM to patch the caller, then jump to compiled callee
473  // rax, isn't live so capture return address while we easily can
474  __ movptr(rax, Address(rsp, 0));
475  __ pusha();
476  __ pushf();
477
478  if (UseSSE == 1) {
479    __ subptr(rsp, 2*wordSize);
480    __ movflt(Address(rsp, 0), xmm0);
481    __ movflt(Address(rsp, wordSize), xmm1);
482  }
483  if (UseSSE >= 2) {
484    __ subptr(rsp, 4*wordSize);
485    __ movdbl(Address(rsp, 0), xmm0);
486    __ movdbl(Address(rsp, 2*wordSize), xmm1);
487  }
488#ifdef COMPILER2
489  // C2 may leave the stack dirty if not in SSE2+ mode
490  if (UseSSE >= 2) {
491    __ verify_FPU(0, "c2i transition should have clean FPU stack");
492  } else {
493    __ empty_FPU_stack();
494  }
495#endif /* COMPILER2 */
496
497  // VM needs caller's callsite
498  __ push(rax);
499  // VM needs target method
500  __ push(rbx);
501  __ verify_oop(rbx);
502  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
503  __ addptr(rsp, 2*wordSize);
504
505  if (UseSSE == 1) {
506    __ movflt(xmm0, Address(rsp, 0));
507    __ movflt(xmm1, Address(rsp, wordSize));
508    __ addptr(rsp, 2*wordSize);
509  }
510  if (UseSSE >= 2) {
511    __ movdbl(xmm0, Address(rsp, 0));
512    __ movdbl(xmm1, Address(rsp, 2*wordSize));
513    __ addptr(rsp, 4*wordSize);
514  }
515
516  __ popf();
517  __ popa();
518  __ bind(L);
519}
520
521
522static void move_c2i_double(MacroAssembler *masm, XMMRegister r, int st_off) {
523  int next_off = st_off - Interpreter::stackElementSize;
524  __ movdbl(Address(rsp, next_off), r);
525}
526
527static void gen_c2i_adapter(MacroAssembler *masm,
528                            int total_args_passed,
529                            int comp_args_on_stack,
530                            const BasicType *sig_bt,
531                            const VMRegPair *regs,
532                            Label& skip_fixup) {
533  // Before we get into the guts of the C2I adapter, see if we should be here
534  // at all.  We've come from compiled code and are attempting to jump to the
535  // interpreter, which means the caller made a static call to get here
536  // (vcalls always get a compiled target if there is one).  Check for a
537  // compiled target.  If there is one, we need to patch the caller's call.
538  patch_callers_callsite(masm);
539
540  __ bind(skip_fixup);
541
542#ifdef COMPILER2
543  // C2 may leave the stack dirty if not in SSE2+ mode
544  if (UseSSE >= 2) {
545    __ verify_FPU(0, "c2i transition should have clean FPU stack");
546  } else {
547    __ empty_FPU_stack();
548  }
549#endif /* COMPILER2 */
550
551  // Since all args are passed on the stack, total_args_passed * interpreter_
552  // stack_element_size  is the
553  // space we need.
554  int extraspace = total_args_passed * Interpreter::stackElementSize;
555
556  // Get return address
557  __ pop(rax);
558
559  // set senderSP value
560  __ movptr(rsi, rsp);
561
562  __ subptr(rsp, extraspace);
563
564  // Now write the args into the outgoing interpreter space
565  for (int i = 0; i < total_args_passed; i++) {
566    if (sig_bt[i] == T_VOID) {
567      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
568      continue;
569    }
570
571    // st_off points to lowest address on stack.
572    int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize;
573    int next_off = st_off - Interpreter::stackElementSize;
574
575    // Say 4 args:
576    // i   st_off
577    // 0   12 T_LONG
578    // 1    8 T_VOID
579    // 2    4 T_OBJECT
580    // 3    0 T_BOOL
581    VMReg r_1 = regs[i].first();
582    VMReg r_2 = regs[i].second();
583    if (!r_1->is_valid()) {
584      assert(!r_2->is_valid(), "");
585      continue;
586    }
587
588    if (r_1->is_stack()) {
589      // memory to memory use fpu stack top
590      int ld_off = r_1->reg2stack() * VMRegImpl::stack_slot_size + extraspace;
591
592      if (!r_2->is_valid()) {
593        __ movl(rdi, Address(rsp, ld_off));
594        __ movptr(Address(rsp, st_off), rdi);
595      } else {
596
597        // ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
598        // st_off == MSW, st_off-wordSize == LSW
599
600        __ movptr(rdi, Address(rsp, ld_off));
601        __ movptr(Address(rsp, next_off), rdi);
602#ifndef _LP64
603        __ movptr(rdi, Address(rsp, ld_off + wordSize));
604        __ movptr(Address(rsp, st_off), rdi);
605#else
606#ifdef ASSERT
607        // Overwrite the unused slot with known junk
608        __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
609        __ movptr(Address(rsp, st_off), rax);
610#endif /* ASSERT */
611#endif // _LP64
612      }
613    } else if (r_1->is_Register()) {
614      Register r = r_1->as_Register();
615      if (!r_2->is_valid()) {
616        __ movl(Address(rsp, st_off), r);
617      } else {
618        // long/double in gpr
619        NOT_LP64(ShouldNotReachHere());
620        // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
621        // T_DOUBLE and T_LONG use two slots in the interpreter
622        if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
623          // long/double in gpr
624#ifdef ASSERT
625          // Overwrite the unused slot with known junk
626          LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
627          __ movptr(Address(rsp, st_off), rax);
628#endif /* ASSERT */
629          __ movptr(Address(rsp, next_off), r);
630        } else {
631          __ movptr(Address(rsp, st_off), r);
632        }
633      }
634    } else {
635      assert(r_1->is_XMMRegister(), "");
636      if (!r_2->is_valid()) {
637        __ movflt(Address(rsp, st_off), r_1->as_XMMRegister());
638      } else {
639        assert(sig_bt[i] == T_DOUBLE || sig_bt[i] == T_LONG, "wrong type");
640        move_c2i_double(masm, r_1->as_XMMRegister(), st_off);
641      }
642    }
643  }
644
645  // Schedule the branch target address early.
646  __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
647  // And repush original return address
648  __ push(rax);
649  __ jmp(rcx);
650}
651
652
653static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_sp, int ld_off) {
654  int next_val_off = ld_off - Interpreter::stackElementSize;
655  __ movdbl(r, Address(saved_sp, next_val_off));
656}
657
658static void gen_i2c_adapter(MacroAssembler *masm,
659                            int total_args_passed,
660                            int comp_args_on_stack,
661                            const BasicType *sig_bt,
662                            const VMRegPair *regs) {
663  // we're being called from the interpreter but need to find the
664  // compiled return entry point.  The return address on the stack
665  // should point at it and we just need to pull the old value out.
666  // load up the pointer to the compiled return entry point and
667  // rewrite our return pc. The code is arranged like so:
668  //
669  // .word Interpreter::return_sentinel
670  // .word address_of_compiled_return_point
671  // return_entry_point: blah_blah_blah
672  //
673  // So we can find the appropriate return point by loading up the word
674  // just prior to the current return address we have on the stack.
675  //
676  // We will only enter here from an interpreted frame and never from after
677  // passing thru a c2i. Azul allowed this but we do not. If we lose the
678  // race and use a c2i we will remain interpreted for the race loser(s).
679  // This removes all sorts of headaches on the x86 side and also eliminates
680  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
681
682
683  // Note: rsi contains the senderSP on entry. We must preserve it since
684  // we may do a i2c -> c2i transition if we lose a race where compiled
685  // code goes non-entrant while we get args ready.
686
687  // Pick up the return address
688  __ movptr(rax, Address(rsp, 0));
689
690  // If UseSSE >= 2 then no cleanup is needed on the return to the
691  // interpreter so skip fixing up the return entry point unless
692  // VerifyFPU is enabled.
693  if (UseSSE < 2 || VerifyFPU) {
694    Label skip, chk_int;
695    // If we were called from the call stub we need to do a little bit different
696    // cleanup than if the interpreter returned to the call stub.
697
698    ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address);
699    __ cmpptr(rax, stub_return_address.addr());
700    __ jcc(Assembler::notEqual, chk_int);
701    assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
702    __ lea(rax, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
703    __ jmp(skip);
704
705    // It must be the interpreter since we never get here via a c2i (unlike Azul)
706
707    __ bind(chk_int);
708#ifdef ASSERT
709    {
710      Label ok;
711      __ cmpl(Address(rax, -2*wordSize), Interpreter::return_sentinel);
712      __ jcc(Assembler::equal, ok);
713      __ int3();
714      __ bind(ok);
715    }
716#endif // ASSERT
717    __ movptr(rax, Address(rax, -wordSize));
718    __ bind(skip);
719  }
720
721  // rax, now contains the compiled return entry point which will do an
722  // cleanup needed for the return from compiled to interpreted.
723
724  // Must preserve original SP for loading incoming arguments because
725  // we need to align the outgoing SP for compiled code.
726  __ movptr(rdi, rsp);
727
728  // Cut-out for having no stack args.  Since up to 2 int/oop args are passed
729  // in registers, we will occasionally have no stack args.
730  int comp_words_on_stack = 0;
731  if (comp_args_on_stack) {
732    // Sig words on the stack are greater-than VMRegImpl::stack0.  Those in
733    // registers are below.  By subtracting stack0, we either get a negative
734    // number (all values in registers) or the maximum stack slot accessed.
735    // int comp_args_on_stack = VMRegImpl::reg2stack(max_arg);
736    // Convert 4-byte stack slots to words.
737    comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
738    // Round up to miminum stack alignment, in wordSize
739    comp_words_on_stack = round_to(comp_words_on_stack, 2);
740    __ subptr(rsp, comp_words_on_stack * wordSize);
741  }
742
743  // Align the outgoing SP
744  __ andptr(rsp, -(StackAlignmentInBytes));
745
746  // push the return address on the stack (note that pushing, rather
747  // than storing it, yields the correct frame alignment for the callee)
748  __ push(rax);
749
750  // Put saved SP in another register
751  const Register saved_sp = rax;
752  __ movptr(saved_sp, rdi);
753
754
755  // Will jump to the compiled code just as if compiled code was doing it.
756  // Pre-load the register-jump target early, to schedule it better.
757  __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
758
759  // Now generate the shuffle code.  Pick up all register args and move the
760  // rest through the floating point stack top.
761  for (int i = 0; i < total_args_passed; i++) {
762    if (sig_bt[i] == T_VOID) {
763      // Longs and doubles are passed in native word order, but misaligned
764      // in the 32-bit build.
765      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
766      continue;
767    }
768
769    // Pick up 0, 1 or 2 words from SP+offset.
770
771    assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
772            "scrambled load targets?");
773    // Load in argument order going down.
774    int ld_off = (total_args_passed - i) * Interpreter::stackElementSize;
775    // Point to interpreter value (vs. tag)
776    int next_off = ld_off - Interpreter::stackElementSize;
777    //
778    //
779    //
780    VMReg r_1 = regs[i].first();
781    VMReg r_2 = regs[i].second();
782    if (!r_1->is_valid()) {
783      assert(!r_2->is_valid(), "");
784      continue;
785    }
786    if (r_1->is_stack()) {
787      // Convert stack slot to an SP offset (+ wordSize to account for return address )
788      int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
789
790      // We can use rsi as a temp here because compiled code doesn't need rsi as an input
791      // and if we end up going thru a c2i because of a miss a reasonable value of rsi
792      // we be generated.
793      if (!r_2->is_valid()) {
794        // __ fld_s(Address(saved_sp, ld_off));
795        // __ fstp_s(Address(rsp, st_off));
796        __ movl(rsi, Address(saved_sp, ld_off));
797        __ movptr(Address(rsp, st_off), rsi);
798      } else {
799        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
800        // are accessed as negative so LSW is at LOW address
801
802        // ld_off is MSW so get LSW
803        // st_off is LSW (i.e. reg.first())
804        // __ fld_d(Address(saved_sp, next_off));
805        // __ fstp_d(Address(rsp, st_off));
806        //
807        // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
808        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
809        // So we must adjust where to pick up the data to match the interpreter.
810        //
811        // Interpreter local[n] == MSW, local[n+1] == LSW however locals
812        // are accessed as negative so LSW is at LOW address
813
814        // ld_off is MSW so get LSW
815        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
816                           next_off : ld_off;
817        __ movptr(rsi, Address(saved_sp, offset));
818        __ movptr(Address(rsp, st_off), rsi);
819#ifndef _LP64
820        __ movptr(rsi, Address(saved_sp, ld_off));
821        __ movptr(Address(rsp, st_off + wordSize), rsi);
822#endif // _LP64
823      }
824    } else if (r_1->is_Register()) {  // Register argument
825      Register r = r_1->as_Register();
826      assert(r != rax, "must be different");
827      if (r_2->is_valid()) {
828        //
829        // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
830        // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
831        // So we must adjust where to pick up the data to match the interpreter.
832
833        const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
834                           next_off : ld_off;
835
836        // this can be a misaligned move
837        __ movptr(r, Address(saved_sp, offset));
838#ifndef _LP64
839        assert(r_2->as_Register() != rax, "need another temporary register");
840        // Remember r_1 is low address (and LSB on x86)
841        // So r_2 gets loaded from high address regardless of the platform
842        __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
843#endif // _LP64
844      } else {
845        __ movl(r, Address(saved_sp, ld_off));
846      }
847    } else {
848      assert(r_1->is_XMMRegister(), "");
849      if (!r_2->is_valid()) {
850        __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
851      } else {
852        move_i2c_double(masm, r_1->as_XMMRegister(), saved_sp, ld_off);
853      }
854    }
855  }
856
857  // 6243940 We might end up in handle_wrong_method if
858  // the callee is deoptimized as we race thru here. If that
859  // happens we don't want to take a safepoint because the
860  // caller frame will look interpreted and arguments are now
861  // "compiled" so it is much better to make this transition
862  // invisible to the stack walking code. Unfortunately if
863  // we try and find the callee by normal means a safepoint
864  // is possible. So we stash the desired callee in the thread
865  // and the vm will find there should this case occur.
866
867  __ get_thread(rax);
868  __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
869
870  // move methodOop to rax, in case we end up in an c2i adapter.
871  // the c2i adapters expect methodOop in rax, (c2) because c2's
872  // resolve stubs return the result (the method) in rax,.
873  // I'd love to fix this.
874  __ mov(rax, rbx);
875
876  __ jmp(rdi);
877}
878
879// ---------------------------------------------------------------
880AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
881                                                            int total_args_passed,
882                                                            int comp_args_on_stack,
883                                                            const BasicType *sig_bt,
884                                                            const VMRegPair *regs,
885                                                            AdapterFingerPrint* fingerprint) {
886  address i2c_entry = __ pc();
887
888  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
889
890  // -------------------------------------------------------------------------
891  // Generate a C2I adapter.  On entry we know rbx, holds the methodOop during calls
892  // to the interpreter.  The args start out packed in the compiled layout.  They
893  // need to be unpacked into the interpreter layout.  This will almost always
894  // require some stack space.  We grow the current (compiled) stack, then repack
895  // the args.  We  finally end in a jump to the generic interpreter entry point.
896  // On exit from the interpreter, the interpreter will restore our SP (lest the
897  // compiled code, which relys solely on SP and not EBP, get sick).
898
899  address c2i_unverified_entry = __ pc();
900  Label skip_fixup;
901
902  Register holder = rax;
903  Register receiver = rcx;
904  Register temp = rbx;
905
906  {
907
908    Label missed;
909
910    __ verify_oop(holder);
911    __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
912    __ verify_oop(temp);
913
914    __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
915    __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
916    __ jcc(Assembler::notEqual, missed);
917    // Method might have been compiled since the call site was patched to
918    // interpreted if that is the case treat it as a miss so we can get
919    // the call site corrected.
920    __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
921    __ jcc(Assembler::equal, skip_fixup);
922
923    __ bind(missed);
924    __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
925  }
926
927  address c2i_entry = __ pc();
928
929  gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
930
931  __ flush();
932  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
933}
934
935int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
936                                         VMRegPair *regs,
937                                         int total_args_passed) {
938// We return the amount of VMRegImpl stack slots we need to reserve for all
939// the arguments NOT counting out_preserve_stack_slots.
940
941  uint    stack = 0;        // All arguments on stack
942
943  for( int i = 0; i < total_args_passed; i++) {
944    // From the type and the argument number (count) compute the location
945    switch( sig_bt[i] ) {
946    case T_BOOLEAN:
947    case T_CHAR:
948    case T_FLOAT:
949    case T_BYTE:
950    case T_SHORT:
951    case T_INT:
952    case T_OBJECT:
953    case T_ARRAY:
954    case T_ADDRESS:
955      regs[i].set1(VMRegImpl::stack2reg(stack++));
956      break;
957    case T_LONG:
958    case T_DOUBLE: // The stack numbering is reversed from Java
959      // Since C arguments do not get reversed, the ordering for
960      // doubles on the stack must be opposite the Java convention
961      assert(sig_bt[i+1] == T_VOID, "missing Half" );
962      regs[i].set2(VMRegImpl::stack2reg(stack));
963      stack += 2;
964      break;
965    case T_VOID: regs[i].set_bad(); break;
966    default:
967      ShouldNotReachHere();
968      break;
969    }
970  }
971  return stack;
972}
973
974// A simple move of integer like type
975static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
976  if (src.first()->is_stack()) {
977    if (dst.first()->is_stack()) {
978      // stack to stack
979      // __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
980      // __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
981      __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
982      __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
983    } else {
984      // stack to reg
985      __ movl2ptr(dst.first()->as_Register(),  Address(rbp, reg2offset_in(src.first())));
986    }
987  } else if (dst.first()->is_stack()) {
988    // reg to stack
989    // no need to sign extend on 64bit
990    __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
991  } else {
992    if (dst.first() != src.first()) {
993      __ mov(dst.first()->as_Register(), src.first()->as_Register());
994    }
995  }
996}
997
998// An oop arg. Must pass a handle not the oop itself
999static void object_move(MacroAssembler* masm,
1000                        OopMap* map,
1001                        int oop_handle_offset,
1002                        int framesize_in_slots,
1003                        VMRegPair src,
1004                        VMRegPair dst,
1005                        bool is_receiver,
1006                        int* receiver_offset) {
1007
1008  // Because of the calling conventions we know that src can be a
1009  // register or a stack location. dst can only be a stack location.
1010
1011  assert(dst.first()->is_stack(), "must be stack");
1012  // must pass a handle. First figure out the location we use as a handle
1013
1014  if (src.first()->is_stack()) {
1015    // Oop is already on the stack as an argument
1016    Register rHandle = rax;
1017    Label nil;
1018    __ xorptr(rHandle, rHandle);
1019    __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
1020    __ jcc(Assembler::equal, nil);
1021    __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
1022    __ bind(nil);
1023    __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1024
1025    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1026    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1027    if (is_receiver) {
1028      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1029    }
1030  } else {
1031    // Oop is in an a register we must store it to the space we reserve
1032    // on the stack for oop_handles
1033    const Register rOop = src.first()->as_Register();
1034    const Register rHandle = rax;
1035    int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
1036    int offset = oop_slot*VMRegImpl::stack_slot_size;
1037    Label skip;
1038    __ movptr(Address(rsp, offset), rOop);
1039    map->set_oop(VMRegImpl::stack2reg(oop_slot));
1040    __ xorptr(rHandle, rHandle);
1041    __ cmpptr(rOop, (int32_t)NULL_WORD);
1042    __ jcc(Assembler::equal, skip);
1043    __ lea(rHandle, Address(rsp, offset));
1044    __ bind(skip);
1045    // Store the handle parameter
1046    __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
1047    if (is_receiver) {
1048      *receiver_offset = offset;
1049    }
1050  }
1051}
1052
1053// A float arg may have to do float reg int reg conversion
1054static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1055  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1056
1057  // Because of the calling convention we know that src is either a stack location
1058  // or an xmm register. dst can only be a stack location.
1059
1060  assert(dst.first()->is_stack() && ( src.first()->is_stack() || src.first()->is_XMMRegister()), "bad parameters");
1061
1062  if (src.first()->is_stack()) {
1063    __ movl(rax, Address(rbp, reg2offset_in(src.first())));
1064    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1065  } else {
1066    // reg to stack
1067    __ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1068  }
1069}
1070
1071// A long move
1072static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1073
1074  // The only legal possibility for a long_move VMRegPair is:
1075  // 1: two stack slots (possibly unaligned)
1076  // as neither the java  or C calling convention will use registers
1077  // for longs.
1078
1079  if (src.first()->is_stack() && dst.first()->is_stack()) {
1080    assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
1081    __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1082    NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1083    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1084    NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1085  } else {
1086    ShouldNotReachHere();
1087  }
1088}
1089
1090// A double move
1091static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1092
1093  // The only legal possibilities for a double_move VMRegPair are:
1094  // The painful thing here is that like long_move a VMRegPair might be
1095
1096  // Because of the calling convention we know that src is either
1097  //   1: a single physical register (xmm registers only)
1098  //   2: two stack slots (possibly unaligned)
1099  // dst can only be a pair of stack slots.
1100
1101  assert(dst.first()->is_stack() && (src.first()->is_XMMRegister() || src.first()->is_stack()), "bad args");
1102
1103  if (src.first()->is_stack()) {
1104    // source is all stack
1105    __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
1106    NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
1107    __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
1108    NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
1109  } else {
1110    // reg to stack
1111    // No worries about stack alignment
1112    __ movdbl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
1113  }
1114}
1115
1116
1117void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1118  // We always ignore the frame_slots arg and just use the space just below frame pointer
1119  // which by this time is free to use
1120  switch (ret_type) {
1121  case T_FLOAT:
1122    __ fstp_s(Address(rbp, -wordSize));
1123    break;
1124  case T_DOUBLE:
1125    __ fstp_d(Address(rbp, -2*wordSize));
1126    break;
1127  case T_VOID:  break;
1128  case T_LONG:
1129    __ movptr(Address(rbp, -wordSize), rax);
1130    NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
1131    break;
1132  default: {
1133    __ movptr(Address(rbp, -wordSize), rax);
1134    }
1135  }
1136}
1137
1138void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1139  // We always ignore the frame_slots arg and just use the space just below frame pointer
1140  // which by this time is free to use
1141  switch (ret_type) {
1142  case T_FLOAT:
1143    __ fld_s(Address(rbp, -wordSize));
1144    break;
1145  case T_DOUBLE:
1146    __ fld_d(Address(rbp, -2*wordSize));
1147    break;
1148  case T_LONG:
1149    __ movptr(rax, Address(rbp, -wordSize));
1150    NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
1151    break;
1152  case T_VOID:  break;
1153  default: {
1154    __ movptr(rax, Address(rbp, -wordSize));
1155    }
1156  }
1157}
1158
1159// ---------------------------------------------------------------------------
1160// Generate a native wrapper for a given method.  The method takes arguments
1161// in the Java compiled code convention, marshals them to the native
1162// convention (handlizes oops, etc), transitions to native, makes the call,
1163// returns to java state (possibly blocking), unhandlizes any result and
1164// returns.
1165nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
1166                                                methodHandle method,
1167                                                int total_in_args,
1168                                                int comp_args_on_stack,
1169                                                BasicType *in_sig_bt,
1170                                                VMRegPair *in_regs,
1171                                                BasicType ret_type) {
1172
1173  // An OopMap for lock (and class if static)
1174  OopMapSet *oop_maps = new OopMapSet();
1175
1176  // We have received a description of where all the java arg are located
1177  // on entry to the wrapper. We need to convert these args to where
1178  // the jni function will expect them. To figure out where they go
1179  // we convert the java signature to a C signature by inserting
1180  // the hidden arguments as arg[0] and possibly arg[1] (static method)
1181
1182  int total_c_args = total_in_args + 1;
1183  if (method->is_static()) {
1184    total_c_args++;
1185  }
1186
1187  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1188  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair,   total_c_args);
1189
1190  int argc = 0;
1191  out_sig_bt[argc++] = T_ADDRESS;
1192  if (method->is_static()) {
1193    out_sig_bt[argc++] = T_OBJECT;
1194  }
1195
1196  int i;
1197  for (i = 0; i < total_in_args ; i++ ) {
1198    out_sig_bt[argc++] = in_sig_bt[i];
1199  }
1200
1201
1202  // Now figure out where the args must be stored and how much stack space
1203  // they require (neglecting out_preserve_stack_slots but space for storing
1204  // the 1st six register arguments). It's weird see int_stk_helper.
1205  //
1206  int out_arg_slots;
1207  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
1208
1209  // Compute framesize for the wrapper.  We need to handlize all oops in
1210  // registers a max of 2 on x86.
1211
1212  // Calculate the total number of stack slots we will need.
1213
1214  // First count the abi requirement plus all of the outgoing args
1215  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1216
1217  // Now the space for the inbound oop handle area
1218
1219  int oop_handle_offset = stack_slots;
1220  stack_slots += 2*VMRegImpl::slots_per_word;
1221
1222  // Now any space we need for handlizing a klass if static method
1223
1224  int klass_slot_offset = 0;
1225  int klass_offset = -1;
1226  int lock_slot_offset = 0;
1227  bool is_static = false;
1228  int oop_temp_slot_offset = 0;
1229
1230  if (method->is_static()) {
1231    klass_slot_offset = stack_slots;
1232    stack_slots += VMRegImpl::slots_per_word;
1233    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
1234    is_static = true;
1235  }
1236
1237  // Plus a lock if needed
1238
1239  if (method->is_synchronized()) {
1240    lock_slot_offset = stack_slots;
1241    stack_slots += VMRegImpl::slots_per_word;
1242  }
1243
1244  // Now a place (+2) to save return values or temp during shuffling
1245  // + 2 for return address (which we own) and saved rbp,
1246  stack_slots += 4;
1247
1248  // Ok The space we have allocated will look like:
1249  //
1250  //
1251  // FP-> |                     |
1252  //      |---------------------|
1253  //      | 2 slots for moves   |
1254  //      |---------------------|
1255  //      | lock box (if sync)  |
1256  //      |---------------------| <- lock_slot_offset  (-lock_slot_rbp_offset)
1257  //      | klass (if static)   |
1258  //      |---------------------| <- klass_slot_offset
1259  //      | oopHandle area      |
1260  //      |---------------------| <- oop_handle_offset (a max of 2 registers)
1261  //      | outbound memory     |
1262  //      | based arguments     |
1263  //      |                     |
1264  //      |---------------------|
1265  //      |                     |
1266  // SP-> | out_preserved_slots |
1267  //
1268  //
1269  // ****************************************************************************
1270  // WARNING - on Windows Java Natives use pascal calling convention and pop the
1271  // arguments off of the stack after the jni call. Before the call we can use
1272  // instructions that are SP relative. After the jni call we switch to FP
1273  // relative instructions instead of re-adjusting the stack on windows.
1274  // ****************************************************************************
1275
1276
1277  // Now compute actual number of stack words we need rounding to make
1278  // stack properly aligned.
1279  stack_slots = round_to(stack_slots, StackAlignmentInSlots);
1280
1281  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
1282
1283  intptr_t start = (intptr_t)__ pc();
1284
1285  // First thing make an ic check to see if we should even be here
1286
1287  // We are free to use all registers as temps without saving them and
1288  // restoring them except rbp,. rbp, is the only callee save register
1289  // as far as the interpreter and the compiler(s) are concerned.
1290
1291
1292  const Register ic_reg = rax;
1293  const Register receiver = rcx;
1294  Label hit;
1295  Label exception_pending;
1296
1297
1298  __ verify_oop(receiver);
1299  __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
1300  __ jcc(Assembler::equal, hit);
1301
1302  __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
1303
1304  // verified entry must be aligned for code patching.
1305  // and the first 5 bytes must be in the same cache line
1306  // if we align at 8 then we will be sure 5 bytes are in the same line
1307  __ align(8);
1308
1309  __ bind(hit);
1310
1311  int vep_offset = ((intptr_t)__ pc()) - start;
1312
1313#ifdef COMPILER1
1314  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1315    // Object.hashCode can pull the hashCode from the header word
1316    // instead of doing a full VM transition once it's been computed.
1317    // Since hashCode is usually polymorphic at call sites we can't do
1318    // this optimization at the call site without a lot of work.
1319    Label slowCase;
1320    Register receiver = rcx;
1321    Register result = rax;
1322    __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
1323
1324    // check if locked
1325    __ testptr(result, markOopDesc::unlocked_value);
1326    __ jcc (Assembler::zero, slowCase);
1327
1328    if (UseBiasedLocking) {
1329      // Check if biased and fall through to runtime if so
1330      __ testptr(result, markOopDesc::biased_lock_bit_in_place);
1331      __ jcc (Assembler::notZero, slowCase);
1332    }
1333
1334    // get hash
1335    __ andptr(result, markOopDesc::hash_mask_in_place);
1336    // test if hashCode exists
1337    __ jcc  (Assembler::zero, slowCase);
1338    __ shrptr(result, markOopDesc::hash_shift);
1339    __ ret(0);
1340    __ bind (slowCase);
1341  }
1342#endif // COMPILER1
1343
1344  // The instruction at the verified entry point must be 5 bytes or longer
1345  // because it can be patched on the fly by make_non_entrant. The stack bang
1346  // instruction fits that requirement.
1347
1348  // Generate stack overflow check
1349
1350  if (UseStackBanging) {
1351    __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
1352  } else {
1353    // need a 5 byte instruction to allow MT safe patching to non-entrant
1354    __ fat_nop();
1355  }
1356
1357  // Generate a new frame for the wrapper.
1358  __ enter();
1359  // -2 because return address is already present and so is saved rbp,
1360  __ subptr(rsp, stack_size - 2*wordSize);
1361
1362  // Frame is now completed as far a size and linkage.
1363
1364  int frame_complete = ((intptr_t)__ pc()) - start;
1365
1366  // Calculate the difference between rsp and rbp,. We need to know it
1367  // after the native call because on windows Java Natives will pop
1368  // the arguments and it is painful to do rsp relative addressing
1369  // in a platform independent way. So after the call we switch to
1370  // rbp, relative addressing.
1371
1372  int fp_adjustment = stack_size - 2*wordSize;
1373
1374#ifdef COMPILER2
1375  // C2 may leave the stack dirty if not in SSE2+ mode
1376  if (UseSSE >= 2) {
1377    __ verify_FPU(0, "c2i transition should have clean FPU stack");
1378  } else {
1379    __ empty_FPU_stack();
1380  }
1381#endif /* COMPILER2 */
1382
1383  // Compute the rbp, offset for any slots used after the jni call
1384
1385  int lock_slot_rbp_offset = (lock_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1386  int oop_temp_slot_rbp_offset = (oop_temp_slot_offset*VMRegImpl::stack_slot_size) - fp_adjustment;
1387
1388  // We use rdi as a thread pointer because it is callee save and
1389  // if we load it once it is usable thru the entire wrapper
1390  const Register thread = rdi;
1391
1392  // We use rsi as the oop handle for the receiver/klass
1393  // It is callee save so it survives the call to native
1394
1395  const Register oop_handle_reg = rsi;
1396
1397  __ get_thread(thread);
1398
1399
1400  //
1401  // We immediately shuffle the arguments so that any vm call we have to
1402  // make from here on out (sync slow path, jvmti, etc.) we will have
1403  // captured the oops from our caller and have a valid oopMap for
1404  // them.
1405
1406  // -----------------
1407  // The Grand Shuffle
1408  //
1409  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
1410  // and, if static, the class mirror instead of a receiver.  This pretty much
1411  // guarantees that register layout will not match (and x86 doesn't use reg
1412  // parms though amd does).  Since the native abi doesn't use register args
1413  // and the java conventions does we don't have to worry about collisions.
1414  // All of our moved are reg->stack or stack->stack.
1415  // We ignore the extra arguments during the shuffle and handle them at the
1416  // last moment. The shuffle is described by the two calling convention
1417  // vectors we have in our possession. We simply walk the java vector to
1418  // get the source locations and the c vector to get the destinations.
1419
1420  int c_arg = method->is_static() ? 2 : 1 ;
1421
1422  // Record rsp-based slot for receiver on stack for non-static methods
1423  int receiver_offset = -1;
1424
1425  // This is a trick. We double the stack slots so we can claim
1426  // the oops in the caller's frame. Since we are sure to have
1427  // more args than the caller doubling is enough to make
1428  // sure we can capture all the incoming oop args from the
1429  // caller.
1430  //
1431  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1432
1433  // Mark location of rbp,
1434  // map->set_callee_saved(VMRegImpl::stack2reg( stack_slots - 2), stack_slots * 2, 0, rbp->as_VMReg());
1435
1436  // We know that we only have args in at most two integer registers (rcx, rdx). So rax, rbx
1437  // Are free to temporaries if we have to do  stack to steck moves.
1438  // All inbound args are referenced based on rbp, and all outbound args via rsp.
1439
1440  for (i = 0; i < total_in_args ; i++, c_arg++ ) {
1441    switch (in_sig_bt[i]) {
1442      case T_ARRAY:
1443      case T_OBJECT:
1444        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
1445                    ((i == 0) && (!is_static)),
1446                    &receiver_offset);
1447        break;
1448      case T_VOID:
1449        break;
1450
1451      case T_FLOAT:
1452        float_move(masm, in_regs[i], out_regs[c_arg]);
1453          break;
1454
1455      case T_DOUBLE:
1456        assert( i + 1 < total_in_args &&
1457                in_sig_bt[i + 1] == T_VOID &&
1458                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
1459        double_move(masm, in_regs[i], out_regs[c_arg]);
1460        break;
1461
1462      case T_LONG :
1463        long_move(masm, in_regs[i], out_regs[c_arg]);
1464        break;
1465
1466      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
1467
1468      default:
1469        simple_move32(masm, in_regs[i], out_regs[c_arg]);
1470    }
1471  }
1472
1473  // Pre-load a static method's oop into rsi.  Used both by locking code and
1474  // the normal JNI call code.
1475  if (method->is_static()) {
1476
1477    //  load opp into a register
1478    __ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
1479
1480    // Now handlize the static class mirror it's known not-null.
1481    __ movptr(Address(rsp, klass_offset), oop_handle_reg);
1482    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
1483
1484    // Now get the handle
1485    __ lea(oop_handle_reg, Address(rsp, klass_offset));
1486    // store the klass handle as second argument
1487    __ movptr(Address(rsp, wordSize), oop_handle_reg);
1488  }
1489
1490  // Change state to native (we save the return address in the thread, since it might not
1491  // be pushed on the stack when we do a a stack traversal). It is enough that the pc()
1492  // points into the right code segment. It does not have to be the correct return pc.
1493  // We use the same pc/oopMap repeatedly when we call out
1494
1495  intptr_t the_pc = (intptr_t) __ pc();
1496  oop_maps->add_gc_map(the_pc - start, map);
1497
1498  __ set_last_Java_frame(thread, rsp, noreg, (address)the_pc);
1499
1500
1501  // We have all of the arguments setup at this point. We must not touch any register
1502  // argument registers at this point (what if we save/restore them there are no oop?
1503
1504  {
1505    SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1506    __ movoop(rax, JNIHandles::make_local(method()));
1507    __ call_VM_leaf(
1508         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
1509         thread, rax);
1510  }
1511
1512  // RedefineClasses() tracing support for obsolete method entry
1513  if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
1514    __ movoop(rax, JNIHandles::make_local(method()));
1515    __ call_VM_leaf(
1516         CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
1517         thread, rax);
1518  }
1519
1520  // These are register definitions we need for locking/unlocking
1521  const Register swap_reg = rax;  // Must use rax, for cmpxchg instruction
1522  const Register obj_reg  = rcx;  // Will contain the oop
1523  const Register lock_reg = rdx;  // Address of compiler lock object (BasicLock)
1524
1525  Label slow_path_lock;
1526  Label lock_done;
1527
1528  // Lock a synchronized method
1529  if (method->is_synchronized()) {
1530
1531
1532    const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
1533
1534    // Get the handle (the 2nd argument)
1535    __ movptr(oop_handle_reg, Address(rsp, wordSize));
1536
1537    // Get address of the box
1538
1539    __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
1540
1541    // Load the oop from the handle
1542    __ movptr(obj_reg, Address(oop_handle_reg, 0));
1543
1544    if (UseBiasedLocking) {
1545      // Note that oop_handle_reg is trashed during this call
1546      __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock);
1547    }
1548
1549    // Load immediate 1 into swap_reg %rax,
1550    __ movptr(swap_reg, 1);
1551
1552    // Load (object->mark() | 1) into swap_reg %rax,
1553    __ orptr(swap_reg, Address(obj_reg, 0));
1554
1555    // Save (object->mark() | 1) into BasicLock's displaced header
1556    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1557
1558    if (os::is_MP()) {
1559      __ lock();
1560    }
1561
1562    // src -> dest iff dest == rax, else rax, <- dest
1563    // *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
1564    __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
1565    __ jcc(Assembler::equal, lock_done);
1566
1567    // Test if the oopMark is an obvious stack pointer, i.e.,
1568    //  1) (mark & 3) == 0, and
1569    //  2) rsp <= mark < mark + os::pagesize()
1570    // These 3 tests can be done by evaluating the following
1571    // expression: ((mark - rsp) & (3 - os::vm_page_size())),
1572    // assuming both stack pointer and pagesize have their
1573    // least significant 2 bits clear.
1574    // NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
1575
1576    __ subptr(swap_reg, rsp);
1577    __ andptr(swap_reg, 3 - os::vm_page_size());
1578
1579    // Save the test result, for recursive case, the result is zero
1580    __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
1581    __ jcc(Assembler::notEqual, slow_path_lock);
1582    // Slow path will re-enter here
1583    __ bind(lock_done);
1584
1585    if (UseBiasedLocking) {
1586      // Re-fetch oop_handle_reg as we trashed it above
1587      __ movptr(oop_handle_reg, Address(rsp, wordSize));
1588    }
1589  }
1590
1591
1592  // Finally just about ready to make the JNI call
1593
1594
1595  // get JNIEnv* which is first argument to native
1596
1597  __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
1598  __ movptr(Address(rsp, 0), rdx);
1599
1600  // Now set thread in native
1601  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
1602
1603  __ call(RuntimeAddress(method->native_function()));
1604
1605  // WARNING - on Windows Java Natives use pascal calling convention and pop the
1606  // arguments off of the stack. We could just re-adjust the stack pointer here
1607  // and continue to do SP relative addressing but we instead switch to FP
1608  // relative addressing.
1609
1610  // Unpack native results.
1611  switch (ret_type) {
1612  case T_BOOLEAN: __ c2bool(rax);            break;
1613  case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
1614  case T_BYTE   : __ sign_extend_byte (rax); break;
1615  case T_SHORT  : __ sign_extend_short(rax); break;
1616  case T_INT    : /* nothing to do */        break;
1617  case T_DOUBLE :
1618  case T_FLOAT  :
1619    // Result is in st0 we'll save as needed
1620    break;
1621  case T_ARRAY:                 // Really a handle
1622  case T_OBJECT:                // Really a handle
1623      break; // can't de-handlize until after safepoint check
1624  case T_VOID: break;
1625  case T_LONG: break;
1626  default       : ShouldNotReachHere();
1627  }
1628
1629  // Switch thread to "native transition" state before reading the synchronization state.
1630  // This additional state is necessary because reading and testing the synchronization
1631  // state is not atomic w.r.t. GC, as this scenario demonstrates:
1632  //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1633  //     VM thread changes sync state to synchronizing and suspends threads for GC.
1634  //     Thread A is resumed to finish this native method, but doesn't block here since it
1635  //     didn't see any synchronization is progress, and escapes.
1636  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
1637
1638  if(os::is_MP()) {
1639    if (UseMembar) {
1640      // Force this write out before the read below
1641      __ membar(Assembler::Membar_mask_bits(
1642           Assembler::LoadLoad | Assembler::LoadStore |
1643           Assembler::StoreLoad | Assembler::StoreStore));
1644    } else {
1645      // Write serialization page so VM thread can do a pseudo remote membar.
1646      // We use the current thread pointer to calculate a thread specific
1647      // offset to write to within the page. This minimizes bus traffic
1648      // due to cache line collision.
1649      __ serialize_memory(thread, rcx);
1650    }
1651  }
1652
1653  if (AlwaysRestoreFPU) {
1654    // Make sure the control word is correct.
1655    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1656  }
1657
1658  // check for safepoint operation in progress and/or pending suspend requests
1659  { Label Continue;
1660
1661    __ cmp32(ExternalAddress((address)SafepointSynchronize::address_of_state()),
1662             SafepointSynchronize::_not_synchronized);
1663
1664    Label L;
1665    __ jcc(Assembler::notEqual, L);
1666    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1667    __ jcc(Assembler::equal, Continue);
1668    __ bind(L);
1669
1670    // Don't use call_VM as it will see a possible pending exception and forward it
1671    // and never return here preventing us from clearing _last_native_pc down below.
1672    // Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
1673    // preserved and correspond to the bcp/locals pointers. So we do a runtime call
1674    // by hand.
1675    //
1676    save_native_result(masm, ret_type, stack_slots);
1677    __ push(thread);
1678    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1679                                            JavaThread::check_special_condition_for_native_trans)));
1680    __ increment(rsp, wordSize);
1681    // Restore any method result value
1682    restore_native_result(masm, ret_type, stack_slots);
1683
1684    __ bind(Continue);
1685  }
1686
1687  // change thread state
1688  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1689
1690  Label reguard;
1691  Label reguard_done;
1692  __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
1693  __ jcc(Assembler::equal, reguard);
1694
1695  // slow path reguard  re-enters here
1696  __ bind(reguard_done);
1697
1698  // Handle possible exception (will unlock if necessary)
1699
1700  // native result if any is live
1701
1702  // Unlock
1703  Label slow_path_unlock;
1704  Label unlock_done;
1705  if (method->is_synchronized()) {
1706
1707    Label done;
1708
1709    // Get locked oop from the handle we passed to jni
1710    __ movptr(obj_reg, Address(oop_handle_reg, 0));
1711
1712    if (UseBiasedLocking) {
1713      __ biased_locking_exit(obj_reg, rbx, done);
1714    }
1715
1716    // Simple recursive lock?
1717
1718    __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
1719    __ jcc(Assembler::equal, done);
1720
1721    // Must save rax, if if it is live now because cmpxchg must use it
1722    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1723      save_native_result(masm, ret_type, stack_slots);
1724    }
1725
1726    //  get old displaced header
1727    __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
1728
1729    // get address of the stack lock
1730    __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1731
1732    // Atomic swap old header if oop still contains the stack lock
1733    if (os::is_MP()) {
1734    __ lock();
1735    }
1736
1737    // src -> dest iff dest == rax, else rax, <- dest
1738    // *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
1739    __ cmpxchgptr(rbx, Address(obj_reg, 0));
1740    __ jcc(Assembler::notEqual, slow_path_unlock);
1741
1742    // slow path re-enters here
1743    __ bind(unlock_done);
1744    if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
1745      restore_native_result(masm, ret_type, stack_slots);
1746    }
1747
1748    __ bind(done);
1749
1750  }
1751
1752  {
1753    SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0);
1754    // Tell dtrace about this method exit
1755    save_native_result(masm, ret_type, stack_slots);
1756    __ movoop(rax, JNIHandles::make_local(method()));
1757    __ call_VM_leaf(
1758         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
1759         thread, rax);
1760    restore_native_result(masm, ret_type, stack_slots);
1761  }
1762
1763  // We can finally stop using that last_Java_frame we setup ages ago
1764
1765  __ reset_last_Java_frame(thread, false, true);
1766
1767  // Unpack oop result
1768  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
1769      Label L;
1770      __ cmpptr(rax, (int32_t)NULL_WORD);
1771      __ jcc(Assembler::equal, L);
1772      __ movptr(rax, Address(rax, 0));
1773      __ bind(L);
1774      __ verify_oop(rax);
1775  }
1776
1777  // reset handle block
1778  __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
1779
1780  __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
1781
1782  // Any exception pending?
1783  __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
1784  __ jcc(Assembler::notEqual, exception_pending);
1785
1786
1787  // no exception, we're almost done
1788
1789  // check that only result value is on FPU stack
1790  __ verify_FPU(ret_type == T_FLOAT || ret_type == T_DOUBLE ? 1 : 0, "native_wrapper normal exit");
1791
1792  // Fixup floating pointer results so that result looks like a return from a compiled method
1793  if (ret_type == T_FLOAT) {
1794    if (UseSSE >= 1) {
1795      // Pop st0 and store as float and reload into xmm register
1796      __ fstp_s(Address(rbp, -4));
1797      __ movflt(xmm0, Address(rbp, -4));
1798    }
1799  } else if (ret_type == T_DOUBLE) {
1800    if (UseSSE >= 2) {
1801      // Pop st0 and store as double and reload into xmm register
1802      __ fstp_d(Address(rbp, -8));
1803      __ movdbl(xmm0, Address(rbp, -8));
1804    }
1805  }
1806
1807  // Return
1808
1809  __ leave();
1810  __ ret(0);
1811
1812  // Unexpected paths are out of line and go here
1813
1814  // Slow path locking & unlocking
1815  if (method->is_synchronized()) {
1816
1817    // BEGIN Slow path lock
1818
1819    __ bind(slow_path_lock);
1820
1821    // has last_Java_frame setup. No exceptions so do vanilla call not call_VM
1822    // args are (oop obj, BasicLock* lock, JavaThread* thread)
1823    __ push(thread);
1824    __ push(lock_reg);
1825    __ push(obj_reg);
1826    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
1827    __ addptr(rsp, 3*wordSize);
1828
1829#ifdef ASSERT
1830    { Label L;
1831    __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
1832    __ jcc(Assembler::equal, L);
1833    __ stop("no pending exception allowed on exit from monitorenter");
1834    __ bind(L);
1835    }
1836#endif
1837    __ jmp(lock_done);
1838
1839    // END Slow path lock
1840
1841    // BEGIN Slow path unlock
1842    __ bind(slow_path_unlock);
1843
1844    // Slow path unlock
1845
1846    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1847      save_native_result(masm, ret_type, stack_slots);
1848    }
1849    // Save pending exception around call to VM (which contains an EXCEPTION_MARK)
1850
1851    __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1852    __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
1853
1854
1855    // should be a peal
1856    // +wordSize because of the push above
1857    __ lea(rax, Address(rbp, lock_slot_rbp_offset));
1858    __ push(rax);
1859
1860    __ push(obj_reg);
1861    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
1862    __ addptr(rsp, 2*wordSize);
1863#ifdef ASSERT
1864    {
1865      Label L;
1866      __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
1867      __ jcc(Assembler::equal, L);
1868      __ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
1869      __ bind(L);
1870    }
1871#endif /* ASSERT */
1872
1873    __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
1874
1875    if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
1876      restore_native_result(masm, ret_type, stack_slots);
1877    }
1878    __ jmp(unlock_done);
1879    // END Slow path unlock
1880
1881  }
1882
1883  // SLOW PATH Reguard the stack if needed
1884
1885  __ bind(reguard);
1886  save_native_result(masm, ret_type, stack_slots);
1887  {
1888    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1889  }
1890  restore_native_result(masm, ret_type, stack_slots);
1891  __ jmp(reguard_done);
1892
1893
1894  // BEGIN EXCEPTION PROCESSING
1895
1896  // Forward  the exception
1897  __ bind(exception_pending);
1898
1899  // remove possible return value from FPU register stack
1900  __ empty_FPU_stack();
1901
1902  // pop our frame
1903  __ leave();
1904  // and forward the exception
1905  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
1906
1907  __ flush();
1908
1909  nmethod *nm = nmethod::new_native_nmethod(method,
1910                                            masm->code(),
1911                                            vep_offset,
1912                                            frame_complete,
1913                                            stack_slots / VMRegImpl::slots_per_word,
1914                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
1915                                            in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
1916                                            oop_maps);
1917  return nm;
1918
1919}
1920
1921#ifdef HAVE_DTRACE_H
1922// ---------------------------------------------------------------------------
1923// Generate a dtrace nmethod for a given signature.  The method takes arguments
1924// in the Java compiled code convention, marshals them to the native
1925// abi and then leaves nops at the position you would expect to call a native
1926// function. When the probe is enabled the nops are replaced with a trap
1927// instruction that dtrace inserts and the trace will cause a notification
1928// to dtrace.
1929//
1930// The probes are only able to take primitive types and java/lang/String as
1931// arguments.  No other java types are allowed. Strings are converted to utf8
1932// strings so that from dtrace point of view java strings are converted to C
1933// strings. There is an arbitrary fixed limit on the total space that a method
1934// can use for converting the strings. (256 chars per string in the signature).
1935// So any java string larger then this is truncated.
1936
1937nmethod *SharedRuntime::generate_dtrace_nmethod(
1938    MacroAssembler *masm, methodHandle method) {
1939
1940  // generate_dtrace_nmethod is guarded by a mutex so we are sure to
1941  // be single threaded in this method.
1942  assert(AdapterHandlerLibrary_lock->owned_by_self(), "must be");
1943
1944  // Fill in the signature array, for the calling-convention call.
1945  int total_args_passed = method->size_of_parameters();
1946
1947  BasicType* in_sig_bt  = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
1948  VMRegPair  *in_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
1949
1950  // The signature we are going to use for the trap that dtrace will see
1951  // java/lang/String is converted. We drop "this" and any other object
1952  // is converted to NULL.  (A one-slot java/lang/Long object reference
1953  // is converted to a two-slot long, which is why we double the allocation).
1954  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed * 2);
1955  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed * 2);
1956
1957  int i=0;
1958  int total_strings = 0;
1959  int first_arg_to_pass = 0;
1960  int total_c_args = 0;
1961
1962  if( !method->is_static() ) {  // Pass in receiver first
1963    in_sig_bt[i++] = T_OBJECT;
1964    first_arg_to_pass = 1;
1965  }
1966
1967  // We need to convert the java args to where a native (non-jni) function
1968  // would expect them. To figure out where they go we convert the java
1969  // signature to a C signature.
1970
1971  SignatureStream ss(method->signature());
1972  for ( ; !ss.at_return_type(); ss.next()) {
1973    BasicType bt = ss.type();
1974    in_sig_bt[i++] = bt;  // Collect remaining bits of signature
1975    out_sig_bt[total_c_args++] = bt;
1976    if( bt == T_OBJECT) {
1977      symbolOop s = ss.as_symbol_or_null();
1978      if (s == vmSymbols::java_lang_String()) {
1979        total_strings++;
1980        out_sig_bt[total_c_args-1] = T_ADDRESS;
1981      } else if (s == vmSymbols::java_lang_Boolean() ||
1982                 s == vmSymbols::java_lang_Character() ||
1983                 s == vmSymbols::java_lang_Byte() ||
1984                 s == vmSymbols::java_lang_Short() ||
1985                 s == vmSymbols::java_lang_Integer() ||
1986                 s == vmSymbols::java_lang_Float()) {
1987        out_sig_bt[total_c_args-1] = T_INT;
1988      } else if (s == vmSymbols::java_lang_Long() ||
1989                 s == vmSymbols::java_lang_Double()) {
1990        out_sig_bt[total_c_args-1] = T_LONG;
1991        out_sig_bt[total_c_args++] = T_VOID;
1992      }
1993    } else if ( bt == T_LONG || bt == T_DOUBLE ) {
1994      in_sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
1995      out_sig_bt[total_c_args++] = T_VOID;
1996    }
1997  }
1998
1999  assert(i==total_args_passed, "validly parsed signature");
2000
2001  // Now get the compiled-Java layout as input arguments
2002  int comp_args_on_stack;
2003  comp_args_on_stack = SharedRuntime::java_calling_convention(
2004      in_sig_bt, in_regs, total_args_passed, false);
2005
2006  // Now figure out where the args must be stored and how much stack space
2007  // they require (neglecting out_preserve_stack_slots).
2008
2009  int out_arg_slots;
2010  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, total_c_args);
2011
2012  // Calculate the total number of stack slots we will need.
2013
2014  // First count the abi requirement plus all of the outgoing args
2015  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2016
2017  // Now space for the string(s) we must convert
2018
2019  int* string_locs   = NEW_RESOURCE_ARRAY(int, total_strings + 1);
2020  for (i = 0; i < total_strings ; i++) {
2021    string_locs[i] = stack_slots;
2022    stack_slots += max_dtrace_string_size / VMRegImpl::stack_slot_size;
2023  }
2024
2025  // + 2 for return address (which we own) and saved rbp,
2026
2027  stack_slots += 2;
2028
2029  // Ok The space we have allocated will look like:
2030  //
2031  //
2032  // FP-> |                     |
2033  //      |---------------------|
2034  //      | string[n]           |
2035  //      |---------------------| <- string_locs[n]
2036  //      | string[n-1]         |
2037  //      |---------------------| <- string_locs[n-1]
2038  //      | ...                 |
2039  //      | ...                 |
2040  //      |---------------------| <- string_locs[1]
2041  //      | string[0]           |
2042  //      |---------------------| <- string_locs[0]
2043  //      | outbound memory     |
2044  //      | based arguments     |
2045  //      |                     |
2046  //      |---------------------|
2047  //      |                     |
2048  // SP-> | out_preserved_slots |
2049  //
2050  //
2051
2052  // Now compute actual number of stack words we need rounding to make
2053  // stack properly aligned.
2054  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2055
2056  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2057
2058  intptr_t start = (intptr_t)__ pc();
2059
2060  // First thing make an ic check to see if we should even be here
2061
2062  // We are free to use all registers as temps without saving them and
2063  // restoring them except rbp. rbp, is the only callee save register
2064  // as far as the interpreter and the compiler(s) are concerned.
2065
2066  const Register ic_reg = rax;
2067  const Register receiver = rcx;
2068  Label hit;
2069  Label exception_pending;
2070
2071
2072  __ verify_oop(receiver);
2073  __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
2074  __ jcc(Assembler::equal, hit);
2075
2076  __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2077
2078  // verified entry must be aligned for code patching.
2079  // and the first 5 bytes must be in the same cache line
2080  // if we align at 8 then we will be sure 5 bytes are in the same line
2081  __ align(8);
2082
2083  __ bind(hit);
2084
2085  int vep_offset = ((intptr_t)__ pc()) - start;
2086
2087
2088  // The instruction at the verified entry point must be 5 bytes or longer
2089  // because it can be patched on the fly by make_non_entrant. The stack bang
2090  // instruction fits that requirement.
2091
2092  // Generate stack overflow check
2093
2094
2095  if (UseStackBanging) {
2096    if (stack_size <= StackShadowPages*os::vm_page_size()) {
2097      __ bang_stack_with_offset(StackShadowPages*os::vm_page_size());
2098    } else {
2099      __ movl(rax, stack_size);
2100      __ bang_stack_size(rax, rbx);
2101    }
2102  } else {
2103    // need a 5 byte instruction to allow MT safe patching to non-entrant
2104    __ fat_nop();
2105  }
2106
2107  assert(((int)__ pc() - start - vep_offset) >= 5,
2108         "valid size for make_non_entrant");
2109
2110  // Generate a new frame for the wrapper.
2111  __ enter();
2112
2113  // -2 because return address is already present and so is saved rbp,
2114  if (stack_size - 2*wordSize != 0) {
2115    __ subl(rsp, stack_size - 2*wordSize);
2116  }
2117
2118  // Frame is now completed as far a size and linkage.
2119
2120  int frame_complete = ((intptr_t)__ pc()) - start;
2121
2122  // First thing we do store all the args as if we are doing the call.
2123  // Since the C calling convention is stack based that ensures that
2124  // all the Java register args are stored before we need to convert any
2125  // string we might have.
2126
2127  int sid = 0;
2128  int c_arg, j_arg;
2129  int string_reg = 0;
2130
2131  for (j_arg = first_arg_to_pass, c_arg = 0 ;
2132       j_arg < total_args_passed ; j_arg++, c_arg++ ) {
2133
2134    VMRegPair src = in_regs[j_arg];
2135    VMRegPair dst = out_regs[c_arg];
2136    assert(dst.first()->is_stack() || in_sig_bt[j_arg] == T_VOID,
2137           "stack based abi assumed");
2138
2139    switch (in_sig_bt[j_arg]) {
2140
2141      case T_ARRAY:
2142      case T_OBJECT:
2143        if (out_sig_bt[c_arg] == T_ADDRESS) {
2144          // Any register based arg for a java string after the first
2145          // will be destroyed by the call to get_utf so we store
2146          // the original value in the location the utf string address
2147          // will eventually be stored.
2148          if (src.first()->is_reg()) {
2149            if (string_reg++ != 0) {
2150              simple_move32(masm, src, dst);
2151            }
2152          }
2153        } else if (out_sig_bt[c_arg] == T_INT || out_sig_bt[c_arg] == T_LONG) {
2154          // need to unbox a one-word value
2155          Register in_reg = rax;
2156          if ( src.first()->is_reg() ) {
2157            in_reg = src.first()->as_Register();
2158          } else {
2159            simple_move32(masm, src, in_reg->as_VMReg());
2160          }
2161          Label skipUnbox;
2162          __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
2163          if ( out_sig_bt[c_arg] == T_LONG ) {
2164            __ movl(Address(rsp, reg2offset_out(dst.second())), NULL_WORD);
2165          }
2166          __ testl(in_reg, in_reg);
2167          __ jcc(Assembler::zero, skipUnbox);
2168          assert(dst.first()->is_stack() &&
2169                 (!dst.second()->is_valid() || dst.second()->is_stack()),
2170                 "value(s) must go into stack slots");
2171
2172          BasicType bt = out_sig_bt[c_arg];
2173          int box_offset = java_lang_boxing_object::value_offset_in_bytes(bt);
2174          if ( bt == T_LONG ) {
2175            __ movl(rbx, Address(in_reg,
2176                                 box_offset + VMRegImpl::stack_slot_size));
2177            __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
2178          }
2179          __ movl(in_reg,  Address(in_reg, box_offset));
2180          __ movl(Address(rsp, reg2offset_out(dst.first())), in_reg);
2181          __ bind(skipUnbox);
2182        } else {
2183          // Convert the arg to NULL
2184          __ movl(Address(rsp, reg2offset_out(dst.first())), NULL_WORD);
2185        }
2186        if (out_sig_bt[c_arg] == T_LONG) {
2187          assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2188          ++c_arg; // Move over the T_VOID To keep the loop indices in sync
2189        }
2190        break;
2191
2192      case T_VOID:
2193        break;
2194
2195      case T_FLOAT:
2196        float_move(masm, src, dst);
2197        break;
2198
2199      case T_DOUBLE:
2200        assert( j_arg + 1 < total_args_passed &&
2201                in_sig_bt[j_arg + 1] == T_VOID, "bad arg list");
2202        double_move(masm, src, dst);
2203        break;
2204
2205      case T_LONG :
2206        long_move(masm, src, dst);
2207        break;
2208
2209      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2210
2211      default:
2212        simple_move32(masm, src, dst);
2213    }
2214  }
2215
2216  // Now we must convert any string we have to utf8
2217  //
2218
2219  for (sid = 0, j_arg = first_arg_to_pass, c_arg = 0 ;
2220       sid < total_strings ; j_arg++, c_arg++ ) {
2221
2222    if (out_sig_bt[c_arg] == T_ADDRESS) {
2223
2224      Address utf8_addr = Address(
2225          rsp, string_locs[sid++] * VMRegImpl::stack_slot_size);
2226      __ leal(rax, utf8_addr);
2227
2228      // The first string we find might still be in the original java arg
2229      // register
2230      VMReg orig_loc = in_regs[j_arg].first();
2231      Register string_oop;
2232
2233      // This is where the argument will eventually reside
2234      Address dest = Address(rsp, reg2offset_out(out_regs[c_arg].first()));
2235
2236      if (sid == 1 && orig_loc->is_reg()) {
2237        string_oop = orig_loc->as_Register();
2238        assert(string_oop != rax, "smashed arg");
2239      } else {
2240
2241        if (orig_loc->is_reg()) {
2242          // Get the copy of the jls object
2243          __ movl(rcx, dest);
2244        } else {
2245          // arg is still in the original location
2246          __ movl(rcx, Address(rbp, reg2offset_in(orig_loc)));
2247        }
2248        string_oop = rcx;
2249
2250      }
2251      Label nullString;
2252      __ movl(dest, NULL_WORD);
2253      __ testl(string_oop, string_oop);
2254      __ jcc(Assembler::zero, nullString);
2255
2256      // Now we can store the address of the utf string as the argument
2257      __ movl(dest, rax);
2258
2259      // And do the conversion
2260      __ call_VM_leaf(CAST_FROM_FN_PTR(
2261             address, SharedRuntime::get_utf), string_oop, rax);
2262      __ bind(nullString);
2263    }
2264
2265    if (in_sig_bt[j_arg] == T_OBJECT && out_sig_bt[c_arg] == T_LONG) {
2266      assert(out_sig_bt[c_arg+1] == T_VOID, "must be");
2267      ++c_arg; // Move over the T_VOID To keep the loop indices in sync
2268    }
2269  }
2270
2271
2272  // Ok now we are done. Need to place the nop that dtrace wants in order to
2273  // patch in the trap
2274
2275  int patch_offset = ((intptr_t)__ pc()) - start;
2276
2277  __ nop();
2278
2279
2280  // Return
2281
2282  __ leave();
2283  __ ret(0);
2284
2285  __ flush();
2286
2287  nmethod *nm = nmethod::new_dtrace_nmethod(
2288      method, masm->code(), vep_offset, patch_offset, frame_complete,
2289      stack_slots / VMRegImpl::slots_per_word);
2290  return nm;
2291
2292}
2293
2294#endif // HAVE_DTRACE_H
2295
2296// this function returns the adjust size (in number of words) to a c2i adapter
2297// activation for use during deoptimization
2298int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals ) {
2299  return (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2300}
2301
2302
2303uint SharedRuntime::out_preserve_stack_slots() {
2304  return 0;
2305}
2306
2307
2308//------------------------------generate_deopt_blob----------------------------
2309void SharedRuntime::generate_deopt_blob() {
2310  // allocate space for the code
2311  ResourceMark rm;
2312  // setup code generation tools
2313  CodeBuffer   buffer("deopt_blob", 1024, 1024);
2314  MacroAssembler* masm = new MacroAssembler(&buffer);
2315  int frame_size_in_words;
2316  OopMap* map = NULL;
2317  // Account for the extra args we place on the stack
2318  // by the time we call fetch_unroll_info
2319  const int additional_words = 2; // deopt kind, thread
2320
2321  OopMapSet *oop_maps = new OopMapSet();
2322
2323  // -------------
2324  // This code enters when returning to a de-optimized nmethod.  A return
2325  // address has been pushed on the the stack, and return values are in
2326  // registers.
2327  // If we are doing a normal deopt then we were called from the patched
2328  // nmethod from the point we returned to the nmethod. So the return
2329  // address on the stack is wrong by NativeCall::instruction_size
2330  // We will adjust the value to it looks like we have the original return
2331  // address on the stack (like when we eagerly deoptimized).
2332  // In the case of an exception pending with deoptimized then we enter
2333  // with a return address on the stack that points after the call we patched
2334  // into the exception handler. We have the following register state:
2335  //    rax,: exception
2336  //    rbx,: exception handler
2337  //    rdx: throwing pc
2338  // So in this case we simply jam rdx into the useless return address and
2339  // the stack looks just like we want.
2340  //
2341  // At this point we need to de-opt.  We save the argument return
2342  // registers.  We call the first C routine, fetch_unroll_info().  This
2343  // routine captures the return values and returns a structure which
2344  // describes the current frame size and the sizes of all replacement frames.
2345  // The current frame is compiled code and may contain many inlined
2346  // functions, each with their own JVM state.  We pop the current frame, then
2347  // push all the new frames.  Then we call the C routine unpack_frames() to
2348  // populate these frames.  Finally unpack_frames() returns us the new target
2349  // address.  Notice that callee-save registers are BLOWN here; they have
2350  // already been captured in the vframeArray at the time the return PC was
2351  // patched.
2352  address start = __ pc();
2353  Label cont;
2354
2355  // Prolog for non exception case!
2356
2357  // Save everything in sight.
2358
2359  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2360  // Normal deoptimization
2361  __ push(Deoptimization::Unpack_deopt);
2362  __ jmp(cont);
2363
2364  int reexecute_offset = __ pc() - start;
2365
2366  // Reexecute case
2367  // return address is the pc describes what bci to do re-execute at
2368
2369  // No need to update map as each call to save_live_registers will produce identical oopmap
2370  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2371
2372  __ push(Deoptimization::Unpack_reexecute);
2373  __ jmp(cont);
2374
2375  int exception_offset = __ pc() - start;
2376
2377  // Prolog for exception case
2378
2379  // all registers are dead at this entry point, except for rax, and
2380  // rdx which contain the exception oop and exception pc
2381  // respectively.  Set them in TLS and fall thru to the
2382  // unpack_with_exception_in_tls entry point.
2383
2384  __ get_thread(rdi);
2385  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
2386  __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
2387
2388  int exception_in_tls_offset = __ pc() - start;
2389
2390  // new implementation because exception oop is now passed in JavaThread
2391
2392  // Prolog for exception case
2393  // All registers must be preserved because they might be used by LinearScan
2394  // Exceptiop oop and throwing PC are passed in JavaThread
2395  // tos: stack at point of call to method that threw the exception (i.e. only
2396  // args are on the stack, no return address)
2397
2398  // make room on stack for the return address
2399  // It will be patched later with the throwing pc. The correct value is not
2400  // available now because loading it from memory would destroy registers.
2401  __ push(0);
2402
2403  // Save everything in sight.
2404
2405  // No need to update map as each call to save_live_registers will produce identical oopmap
2406  (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2407
2408  // Now it is safe to overwrite any register
2409
2410  // store the correct deoptimization type
2411  __ push(Deoptimization::Unpack_exception);
2412
2413  // load throwing pc from JavaThread and patch it as the return address
2414  // of the current frame. Then clear the field in JavaThread
2415  __ get_thread(rdi);
2416  __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
2417  __ movptr(Address(rbp, wordSize), rdx);
2418  __ movptr(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
2419
2420#ifdef ASSERT
2421  // verify that there is really an exception oop in JavaThread
2422  __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
2423  __ verify_oop(rax);
2424
2425  // verify that there is no pending exception
2426  Label no_pending_exception;
2427  __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
2428  __ testptr(rax, rax);
2429  __ jcc(Assembler::zero, no_pending_exception);
2430  __ stop("must not have pending exception here");
2431  __ bind(no_pending_exception);
2432#endif
2433
2434  __ bind(cont);
2435
2436  // Compiled code leaves the floating point stack dirty, empty it.
2437  __ empty_FPU_stack();
2438
2439
2440  // Call C code.  Need thread and this frame, but NOT official VM entry
2441  // crud.  We cannot block on this call, no GC can happen.
2442  __ get_thread(rcx);
2443  __ push(rcx);
2444  // fetch_unroll_info needs to call last_java_frame()
2445  __ set_last_Java_frame(rcx, noreg, noreg, NULL);
2446
2447  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
2448
2449  // Need to have an oopmap that tells fetch_unroll_info where to
2450  // find any register it might need.
2451
2452  oop_maps->add_gc_map( __ pc()-start, map);
2453
2454  // Discard arg to fetch_unroll_info
2455  __ pop(rcx);
2456
2457  __ get_thread(rcx);
2458  __ reset_last_Java_frame(rcx, false, false);
2459
2460  // Load UnrollBlock into EDI
2461  __ mov(rdi, rax);
2462
2463  // Move the unpack kind to a safe place in the UnrollBlock because
2464  // we are very short of registers
2465
2466  Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
2467  // retrieve the deopt kind from where we left it.
2468  __ pop(rax);
2469  __ movl(unpack_kind, rax);                      // save the unpack_kind value
2470
2471   Label noException;
2472  __ cmpl(rax, Deoptimization::Unpack_exception);   // Was exception pending?
2473  __ jcc(Assembler::notEqual, noException);
2474  __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
2475  __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
2476  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
2477  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
2478
2479  __ verify_oop(rax);
2480
2481  // Overwrite the result registers with the exception results.
2482  __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2483  __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2484
2485  __ bind(noException);
2486
2487  // Stack is back to only having register save data on the stack.
2488  // Now restore the result registers. Everything else is either dead or captured
2489  // in the vframeArray.
2490
2491  RegisterSaver::restore_result_registers(masm);
2492
2493  // Non standard control word may be leaked out through a safepoint blob, and we can
2494  // deopt at a poll point with the non standard control word. However, we should make
2495  // sure the control word is correct after restore_result_registers.
2496  __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
2497
2498  // All of the register save area has been popped of the stack. Only the
2499  // return address remains.
2500
2501  // Pop all the frames we must move/replace.
2502  //
2503  // Frame picture (youngest to oldest)
2504  // 1: self-frame (no frame link)
2505  // 2: deopting frame  (no frame link)
2506  // 3: caller of deopting frame (could be compiled/interpreted).
2507  //
2508  // Note: by leaving the return address of self-frame on the stack
2509  // and using the size of frame 2 to adjust the stack
2510  // when we are done the return to frame 3 will still be on the stack.
2511
2512  // Pop deoptimized frame
2513  __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2514
2515  // sp should be pointing at the return address to the caller (3)
2516
2517  // Stack bang to make sure there's enough room for these interpreter frames.
2518  if (UseStackBanging) {
2519    __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2520    __ bang_stack_size(rbx, rcx);
2521  }
2522
2523  // Load array of frame pcs into ECX
2524  __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2525
2526  __ pop(rsi); // trash the old pc
2527
2528  // Load array of frame sizes into ESI
2529  __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2530
2531  Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2532
2533  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2534  __ movl(counter, rbx);
2535
2536  // Pick up the initial fp we should save
2537  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2538
2539  // Now adjust the caller's stack to make up for the extra locals
2540  // but record the original sp so that we can save it in the skeletal interpreter
2541  // frame and the stack walking of interpreter_sender will get the unextended sp
2542  // value and not the "real" sp value.
2543
2544  Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2545  __ movptr(sp_temp, rsp);
2546  __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2547  __ subptr(rsp, rbx);
2548
2549  // Push interpreter frames in a loop
2550  Label loop;
2551  __ bind(loop);
2552  __ movptr(rbx, Address(rsi, 0));      // Load frame size
2553#ifdef CC_INTERP
2554  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
2555#ifdef ASSERT
2556  __ push(0xDEADDEAD);                  // Make a recognizable pattern
2557  __ push(0xDEADDEAD);
2558#else /* ASSERT */
2559  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
2560#endif /* ASSERT */
2561#else /* CC_INTERP */
2562  __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2563#endif /* CC_INTERP */
2564  __ pushptr(Address(rcx, 0));          // save return address
2565  __ enter();                           // save old & set new rbp,
2566  __ subptr(rsp, rbx);                  // Prolog!
2567  __ movptr(rbx, sp_temp);              // sender's sp
2568#ifdef CC_INTERP
2569  __ movptr(Address(rbp,
2570                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2571          rbx); // Make it walkable
2572#else /* CC_INTERP */
2573  // This value is corrected by layout_activation_impl
2574  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
2575  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2576#endif /* CC_INTERP */
2577  __ movptr(sp_temp, rsp);              // pass to next frame
2578  __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2579  __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2580  __ decrementl(counter);             // decrement counter
2581  __ jcc(Assembler::notZero, loop);
2582  __ pushptr(Address(rcx, 0));          // save final return address
2583
2584  // Re-push self-frame
2585  __ enter();                           // save old & set new rbp,
2586
2587  //  Return address and rbp, are in place
2588  // We'll push additional args later. Just allocate a full sized
2589  // register save area
2590  __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
2591
2592  // Restore frame locals after moving the frame
2593  __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
2594  __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
2595  __ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize));   // Pop float stack and store in local
2596  if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2597  if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
2598
2599  // Set up the args to unpack_frame
2600
2601  __ pushl(unpack_kind);                     // get the unpack_kind value
2602  __ get_thread(rcx);
2603  __ push(rcx);
2604
2605  // set last_Java_sp, last_Java_fp
2606  __ set_last_Java_frame(rcx, noreg, rbp, NULL);
2607
2608  // Call C code.  Need thread but NOT official VM entry
2609  // crud.  We cannot block on this call, no GC can happen.  Call should
2610  // restore return values to their stack-slots with the new SP.
2611  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2612  // Set an oopmap for the call site
2613  oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
2614
2615  // rax, contains the return result type
2616  __ push(rax);
2617
2618  __ get_thread(rcx);
2619  __ reset_last_Java_frame(rcx, false, false);
2620
2621  // Collect return values
2622  __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
2623  __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
2624
2625  // Clear floating point stack before returning to interpreter
2626  __ empty_FPU_stack();
2627
2628  // Check if we should push the float or double return value.
2629  Label results_done, yes_double_value;
2630  __ cmpl(Address(rsp, 0), T_DOUBLE);
2631  __ jcc (Assembler::zero, yes_double_value);
2632  __ cmpl(Address(rsp, 0), T_FLOAT);
2633  __ jcc (Assembler::notZero, results_done);
2634
2635  // return float value as expected by interpreter
2636  if( UseSSE>=1 ) __ movflt(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2637  else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2638  __ jmp(results_done);
2639
2640  // return double value as expected by interpreter
2641  __ bind(yes_double_value);
2642  if( UseSSE>=2 ) __ movdbl(xmm0, Address(rsp, (RegisterSaver::xmm0Offset() + additional_words + 1)*wordSize));
2643  else            __ fld_d(Address(rsp, (RegisterSaver::fpResultOffset() + additional_words + 1)*wordSize));
2644
2645  __ bind(results_done);
2646
2647  // Pop self-frame.
2648  __ leave();                              // Epilog!
2649
2650  // Jump to interpreter
2651  __ ret(0);
2652
2653  // -------------
2654  // make sure all code is generated
2655  masm->flush();
2656
2657  _deopt_blob = DeoptimizationBlob::create( &buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
2658  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2659}
2660
2661
2662#ifdef COMPILER2
2663//------------------------------generate_uncommon_trap_blob--------------------
2664void SharedRuntime::generate_uncommon_trap_blob() {
2665  // allocate space for the code
2666  ResourceMark rm;
2667  // setup code generation tools
2668  CodeBuffer   buffer("uncommon_trap_blob", 512, 512);
2669  MacroAssembler* masm = new MacroAssembler(&buffer);
2670
2671  enum frame_layout {
2672    arg0_off,      // thread                     sp + 0 // Arg location for
2673    arg1_off,      // unloaded_class_index       sp + 1 // calling C
2674    // The frame sender code expects that rbp will be in the "natural" place and
2675    // will override any oopMap setting for it. We must therefore force the layout
2676    // so that it agrees with the frame sender code.
2677    rbp_off,       // callee saved register      sp + 2
2678    return_off,    // slot for return address    sp + 3
2679    framesize
2680  };
2681
2682  address start = __ pc();
2683  // Push self-frame.
2684  __ subptr(rsp, return_off*wordSize);     // Epilog!
2685
2686  // rbp, is an implicitly saved callee saved register (i.e. the calling
2687  // convention will save restore it in prolog/epilog) Other than that
2688  // there are no callee save registers no that adapter frames are gone.
2689  __ movptr(Address(rsp, rbp_off*wordSize), rbp);
2690
2691  // Clear the floating point exception stack
2692  __ empty_FPU_stack();
2693
2694  // set last_Java_sp
2695  __ get_thread(rdx);
2696  __ set_last_Java_frame(rdx, noreg, noreg, NULL);
2697
2698  // Call C code.  Need thread but NOT official VM entry
2699  // crud.  We cannot block on this call, no GC can happen.  Call should
2700  // capture callee-saved registers as well as return values.
2701  __ movptr(Address(rsp, arg0_off*wordSize), rdx);
2702  // argument already in ECX
2703  __ movl(Address(rsp, arg1_off*wordSize),rcx);
2704  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
2705
2706  // Set an oopmap for the call site
2707  OopMapSet *oop_maps = new OopMapSet();
2708  OopMap* map =  new OopMap( framesize, 0 );
2709  // No oopMap for rbp, it is known implicitly
2710
2711  oop_maps->add_gc_map( __ pc()-start, map);
2712
2713  __ get_thread(rcx);
2714
2715  __ reset_last_Java_frame(rcx, false, false);
2716
2717  // Load UnrollBlock into EDI
2718  __ movptr(rdi, rax);
2719
2720  // Pop all the frames we must move/replace.
2721  //
2722  // Frame picture (youngest to oldest)
2723  // 1: self-frame (no frame link)
2724  // 2: deopting frame  (no frame link)
2725  // 3: caller of deopting frame (could be compiled/interpreted).
2726
2727  // Pop self-frame.  We have no frame, and must rely only on EAX and ESP.
2728  __ addptr(rsp,(framesize-1)*wordSize);     // Epilog!
2729
2730  // Pop deoptimized frame
2731  __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
2732  __ addptr(rsp, rcx);
2733
2734  // sp should be pointing at the return address to the caller (3)
2735
2736  // Stack bang to make sure there's enough room for these interpreter frames.
2737  if (UseStackBanging) {
2738    __ movl(rbx, Address(rdi ,Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes()));
2739    __ bang_stack_size(rbx, rcx);
2740  }
2741
2742
2743  // Load array of frame pcs into ECX
2744  __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
2745
2746  __ pop(rsi); // trash the pc
2747
2748  // Load array of frame sizes into ESI
2749  __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
2750
2751  Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
2752
2753  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
2754  __ movl(counter, rbx);
2755
2756  // Pick up the initial fp we should save
2757  __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
2758
2759  // Now adjust the caller's stack to make up for the extra locals
2760  // but record the original sp so that we can save it in the skeletal interpreter
2761  // frame and the stack walking of interpreter_sender will get the unextended sp
2762  // value and not the "real" sp value.
2763
2764  Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
2765  __ movptr(sp_temp, rsp);
2766  __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
2767  __ subptr(rsp, rbx);
2768
2769  // Push interpreter frames in a loop
2770  Label loop;
2771  __ bind(loop);
2772  __ movptr(rbx, Address(rsi, 0));      // Load frame size
2773#ifdef CC_INTERP
2774  __ subptr(rbx, 4*wordSize);           // we'll push pc and ebp by hand and
2775#ifdef ASSERT
2776  __ push(0xDEADDEAD);                  // Make a recognizable pattern
2777  __ push(0xDEADDEAD);                  // (parm to RecursiveInterpreter...)
2778#else /* ASSERT */
2779  __ subptr(rsp, 2*wordSize);           // skip the "static long no_param"
2780#endif /* ASSERT */
2781#else /* CC_INTERP */
2782  __ subptr(rbx, 2*wordSize);           // we'll push pc and rbp, by hand
2783#endif /* CC_INTERP */
2784  __ pushptr(Address(rcx, 0));          // save return address
2785  __ enter();                           // save old & set new rbp,
2786  __ subptr(rsp, rbx);                  // Prolog!
2787  __ movptr(rbx, sp_temp);              // sender's sp
2788#ifdef CC_INTERP
2789  __ movptr(Address(rbp,
2790                  -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
2791          rbx); // Make it walkable
2792#else /* CC_INTERP */
2793  // This value is corrected by layout_activation_impl
2794  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
2795  __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
2796#endif /* CC_INTERP */
2797  __ movptr(sp_temp, rsp);              // pass to next frame
2798  __ addptr(rsi, wordSize);             // Bump array pointer (sizes)
2799  __ addptr(rcx, wordSize);             // Bump array pointer (pcs)
2800  __ decrementl(counter);             // decrement counter
2801  __ jcc(Assembler::notZero, loop);
2802  __ pushptr(Address(rcx, 0));            // save final return address
2803
2804  // Re-push self-frame
2805  __ enter();                           // save old & set new rbp,
2806  __ subptr(rsp, (framesize-2) * wordSize);   // Prolog!
2807
2808
2809  // set last_Java_sp, last_Java_fp
2810  __ get_thread(rdi);
2811  __ set_last_Java_frame(rdi, noreg, rbp, NULL);
2812
2813  // Call C code.  Need thread but NOT official VM entry
2814  // crud.  We cannot block on this call, no GC can happen.  Call should
2815  // restore return values to their stack-slots with the new SP.
2816  __ movptr(Address(rsp,arg0_off*wordSize),rdi);
2817  __ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
2818  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
2819  // Set an oopmap for the call site
2820  oop_maps->add_gc_map( __ pc()-start, new OopMap( framesize, 0 ) );
2821
2822  __ get_thread(rdi);
2823  __ reset_last_Java_frame(rdi, true, false);
2824
2825  // Pop self-frame.
2826  __ leave();     // Epilog!
2827
2828  // Jump to interpreter
2829  __ ret(0);
2830
2831  // -------------
2832  // make sure all code is generated
2833  masm->flush();
2834
2835   _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, oop_maps, framesize);
2836}
2837#endif // COMPILER2
2838
2839//------------------------------generate_handler_blob------
2840//
2841// Generate a special Compile2Runtime blob that saves all registers,
2842// setup oopmap, and calls safepoint code to stop the compiled code for
2843// a safepoint.
2844//
2845static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return) {
2846
2847  // Account for thread arg in our frame
2848  const int additional_words = 1;
2849  int frame_size_in_words;
2850
2851  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2852
2853  ResourceMark rm;
2854  OopMapSet *oop_maps = new OopMapSet();
2855  OopMap* map;
2856
2857  // allocate space for the code
2858  // setup code generation tools
2859  CodeBuffer   buffer("handler_blob", 1024, 512);
2860  MacroAssembler* masm = new MacroAssembler(&buffer);
2861
2862  const Register java_thread = rdi; // callee-saved for VC++
2863  address start   = __ pc();
2864  address call_pc = NULL;
2865
2866  // If cause_return is true we are at a poll_return and there is
2867  // the return address on the stack to the caller on the nmethod
2868  // that is safepoint. We can leave this return on the stack and
2869  // effectively complete the return and safepoint in the caller.
2870  // Otherwise we push space for a return address that the safepoint
2871  // handler will install later to make the stack walking sensible.
2872  if( !cause_return )
2873    __ push(rbx);                // Make room for return address (or push it again)
2874
2875  map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
2876
2877  // The following is basically a call_VM. However, we need the precise
2878  // address of the call in order to generate an oopmap. Hence, we do all the
2879  // work ourselves.
2880
2881  // Push thread argument and setup last_Java_sp
2882  __ get_thread(java_thread);
2883  __ push(java_thread);
2884  __ set_last_Java_frame(java_thread, noreg, noreg, NULL);
2885
2886  // if this was not a poll_return then we need to correct the return address now.
2887  if( !cause_return ) {
2888    __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
2889    __ movptr(Address(rbp, wordSize), rax);
2890  }
2891
2892  // do the call
2893  __ call(RuntimeAddress(call_ptr));
2894
2895  // Set an oopmap for the call site.  This oopmap will map all
2896  // oop-registers and debug-info registers as callee-saved.  This
2897  // will allow deoptimization at this safepoint to find all possible
2898  // debug-info recordings, as well as let GC find all oops.
2899
2900  oop_maps->add_gc_map( __ pc() - start, map);
2901
2902  // Discard arg
2903  __ pop(rcx);
2904
2905  Label noException;
2906
2907  // Clear last_Java_sp again
2908  __ get_thread(java_thread);
2909  __ reset_last_Java_frame(java_thread, false, false);
2910
2911  __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2912  __ jcc(Assembler::equal, noException);
2913
2914  // Exception pending
2915
2916  RegisterSaver::restore_live_registers(masm);
2917
2918  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2919
2920  __ bind(noException);
2921
2922  // Normal exit, register restoring and exit
2923  RegisterSaver::restore_live_registers(masm);
2924
2925  __ ret(0);
2926
2927  // make sure all code is generated
2928  masm->flush();
2929
2930  // Fill-out other meta info
2931  return SafepointBlob::create(&buffer, oop_maps, frame_size_in_words);
2932}
2933
2934//
2935// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
2936//
2937// Generate a stub that calls into vm to find out the proper destination
2938// of a java call. All the argument registers are live at this point
2939// but since this is generic code we don't know what they are and the caller
2940// must do any gc of the args.
2941//
2942static RuntimeStub* generate_resolve_blob(address destination, const char* name) {
2943  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
2944
2945  // allocate space for the code
2946  ResourceMark rm;
2947
2948  CodeBuffer buffer(name, 1000, 512);
2949  MacroAssembler* masm                = new MacroAssembler(&buffer);
2950
2951  int frame_size_words;
2952  enum frame_layout {
2953                thread_off,
2954                extra_words };
2955
2956  OopMapSet *oop_maps = new OopMapSet();
2957  OopMap* map = NULL;
2958
2959  int start = __ offset();
2960
2961  map = RegisterSaver::save_live_registers(masm, extra_words, &frame_size_words);
2962
2963  int frame_complete = __ offset();
2964
2965  const Register thread = rdi;
2966  __ get_thread(rdi);
2967
2968  __ push(thread);
2969  __ set_last_Java_frame(thread, noreg, rbp, NULL);
2970
2971  __ call(RuntimeAddress(destination));
2972
2973
2974  // Set an oopmap for the call site.
2975  // We need this not only for callee-saved registers, but also for volatile
2976  // registers that the compiler might be keeping live across a safepoint.
2977
2978  oop_maps->add_gc_map( __ offset() - start, map);
2979
2980  // rax, contains the address we are going to jump to assuming no exception got installed
2981
2982  __ addptr(rsp, wordSize);
2983
2984  // clear last_Java_sp
2985  __ reset_last_Java_frame(thread, true, false);
2986  // check for pending exceptions
2987  Label pending;
2988  __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
2989  __ jcc(Assembler::notEqual, pending);
2990
2991  // get the returned methodOop
2992  __ movptr(rbx, Address(thread, JavaThread::vm_result_offset()));
2993  __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
2994
2995  __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
2996
2997  RegisterSaver::restore_live_registers(masm);
2998
2999  // We are back the the original state on entry and ready to go.
3000
3001  __ jmp(rax);
3002
3003  // Pending exception after the safepoint
3004
3005  __ bind(pending);
3006
3007  RegisterSaver::restore_live_registers(masm);
3008
3009  // exception pending => remove activation and forward to exception handler
3010
3011  __ get_thread(thread);
3012  __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
3013  __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
3014  __ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
3015
3016  // -------------
3017  // make sure all code is generated
3018  masm->flush();
3019
3020  // return the  blob
3021  // frame_size_words or bytes??
3022  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3023}
3024
3025void SharedRuntime::generate_stubs() {
3026
3027  _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method),
3028                                        "wrong_method_stub");
3029
3030  _ic_miss_blob      = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss),
3031                                        "ic_miss_stub");
3032
3033  _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C),
3034                                        "resolve_opt_virtual_call");
3035
3036  _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C),
3037                                        "resolve_virtual_call");
3038
3039  _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C),
3040                                        "resolve_static_call");
3041
3042  _polling_page_safepoint_handler_blob =
3043    generate_handler_blob(CAST_FROM_FN_PTR(address,
3044                   SafepointSynchronize::handle_polling_page_exception), false);
3045
3046  _polling_page_return_handler_blob =
3047    generate_handler_blob(CAST_FROM_FN_PTR(address,
3048                   SafepointSynchronize::handle_polling_page_exception), true);
3049
3050  generate_deopt_blob();
3051#ifdef COMPILER2
3052  generate_uncommon_trap_blob();
3053#endif // COMPILER2
3054}
3055