sharedRuntime_sparc.cpp revision 7837:9c3b4e28183c
1/*
2 * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.inline.hpp"
27#include "code/debugInfoRec.hpp"
28#include "code/icBuffer.hpp"
29#include "code/vtableStubs.hpp"
30#include "interpreter/interpreter.hpp"
31#include "oops/compiledICHolder.hpp"
32#include "prims/jvmtiRedefineClassesTrace.hpp"
33#include "runtime/sharedRuntime.hpp"
34#include "runtime/vframeArray.hpp"
35#include "vmreg_sparc.inline.hpp"
36#ifdef COMPILER1
37#include "c1/c1_Runtime1.hpp"
38#endif
39#ifdef COMPILER2
40#include "opto/runtime.hpp"
41#endif
42#ifdef SHARK
43#include "compiler/compileBroker.hpp"
44#include "shark/sharkCompiler.hpp"
45#endif
46
47#define __ masm->
48
49
50class RegisterSaver {
51
52  // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
53  // The Oregs are problematic. In the 32bit build the compiler can
54  // have O registers live with 64 bit quantities. A window save will
55  // cut the heads off of the registers. We have to do a very extensive
56  // stack dance to save and restore these properly.
57
58  // Note that the Oregs problem only exists if we block at either a polling
59  // page exception a compiled code safepoint that was not originally a call
60  // or deoptimize following one of these kinds of safepoints.
61
62  // Lots of registers to save.  For all builds, a window save will preserve
63  // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
64  // builds a window-save will preserve the %o registers.  In the LION build
65  // we need to save the 64-bit %o registers which requires we save them
66  // before the window-save (as then they become %i registers and get their
67  // heads chopped off on interrupt).  We have to save some %g registers here
68  // as well.
69  enum {
70    // This frame's save area.  Includes extra space for the native call:
71    // vararg's layout space and the like.  Briefly holds the caller's
72    // register save area.
73    call_args_area = frame::register_save_words_sp_offset +
74                     frame::memory_parameter_word_sp_offset*wordSize,
75    // Make sure save locations are always 8 byte aligned.
76    // can't use round_to because it doesn't produce compile time constant
77    start_of_extra_save_area = ((call_args_area + 7) & ~7),
78    g1_offset = start_of_extra_save_area, // g-regs needing saving
79    g3_offset = g1_offset+8,
80    g4_offset = g3_offset+8,
81    g5_offset = g4_offset+8,
82    o0_offset = g5_offset+8,
83    o1_offset = o0_offset+8,
84    o2_offset = o1_offset+8,
85    o3_offset = o2_offset+8,
86    o4_offset = o3_offset+8,
87    o5_offset = o4_offset+8,
88    start_of_flags_save_area = o5_offset+8,
89    ccr_offset = start_of_flags_save_area,
90    fsr_offset = ccr_offset + 8,
91    d00_offset = fsr_offset+8,  // Start of float save area
92    register_save_size = d00_offset+8*32
93  };
94
95
96  public:
97
98  static int Oexception_offset() { return o0_offset; };
99  static int G3_offset() { return g3_offset; };
100  static int G5_offset() { return g5_offset; };
101  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
102  static void restore_live_registers(MacroAssembler* masm);
103
104  // During deoptimization only the result register need to be restored
105  // all the other values have already been extracted.
106
107  static void restore_result_registers(MacroAssembler* masm);
108};
109
110OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
111  // Record volatile registers as callee-save values in an OopMap so their save locations will be
112  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
113  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
114  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
115  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
116  int i;
117  // Always make the frame size 16 byte aligned.
118  int frame_size = round_to(additional_frame_words + register_save_size, 16);
119  // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
120  int frame_size_in_slots = frame_size / sizeof(jint);
121  // CodeBlob frame size is in words.
122  *total_frame_words = frame_size / wordSize;
123  // OopMap* map = new OopMap(*total_frame_words, 0);
124  OopMap* map = new OopMap(frame_size_in_slots, 0);
125
126#if !defined(_LP64)
127
128  // Save 64-bit O registers; they will get their heads chopped off on a 'save'.
129  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
130  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
131  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
132  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
133  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
134  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
135#endif /* _LP64 */
136
137  __ save(SP, -frame_size, SP);
138
139#ifndef _LP64
140  // Reload the 64 bit Oregs. Although they are now Iregs we load them
141  // to Oregs here to avoid interrupts cutting off their heads
142
143  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
144  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
145  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
146  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
147  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
148  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
149
150  __ stx(O0, SP, o0_offset+STACK_BIAS);
151  map->set_callee_saved(VMRegImpl::stack2reg((o0_offset + 4)>>2), O0->as_VMReg());
152
153  __ stx(O1, SP, o1_offset+STACK_BIAS);
154
155  map->set_callee_saved(VMRegImpl::stack2reg((o1_offset + 4)>>2), O1->as_VMReg());
156
157  __ stx(O2, SP, o2_offset+STACK_BIAS);
158  map->set_callee_saved(VMRegImpl::stack2reg((o2_offset + 4)>>2), O2->as_VMReg());
159
160  __ stx(O3, SP, o3_offset+STACK_BIAS);
161  map->set_callee_saved(VMRegImpl::stack2reg((o3_offset + 4)>>2), O3->as_VMReg());
162
163  __ stx(O4, SP, o4_offset+STACK_BIAS);
164  map->set_callee_saved(VMRegImpl::stack2reg((o4_offset + 4)>>2), O4->as_VMReg());
165
166  __ stx(O5, SP, o5_offset+STACK_BIAS);
167  map->set_callee_saved(VMRegImpl::stack2reg((o5_offset + 4)>>2), O5->as_VMReg());
168#endif /* _LP64 */
169
170
171#ifdef _LP64
172  int debug_offset = 0;
173#else
174  int debug_offset = 4;
175#endif
176  // Save the G's
177  __ stx(G1, SP, g1_offset+STACK_BIAS);
178  map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
179
180  __ stx(G3, SP, g3_offset+STACK_BIAS);
181  map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
182
183  __ stx(G4, SP, g4_offset+STACK_BIAS);
184  map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
185
186  __ stx(G5, SP, g5_offset+STACK_BIAS);
187  map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
188
189  // This is really a waste but we'll keep things as they were for now
190  if (true) {
191#ifndef _LP64
192    map->set_callee_saved(VMRegImpl::stack2reg((o0_offset)>>2), O0->as_VMReg()->next());
193    map->set_callee_saved(VMRegImpl::stack2reg((o1_offset)>>2), O1->as_VMReg()->next());
194    map->set_callee_saved(VMRegImpl::stack2reg((o2_offset)>>2), O2->as_VMReg()->next());
195    map->set_callee_saved(VMRegImpl::stack2reg((o3_offset)>>2), O3->as_VMReg()->next());
196    map->set_callee_saved(VMRegImpl::stack2reg((o4_offset)>>2), O4->as_VMReg()->next());
197    map->set_callee_saved(VMRegImpl::stack2reg((o5_offset)>>2), O5->as_VMReg()->next());
198    map->set_callee_saved(VMRegImpl::stack2reg((g1_offset)>>2), G1->as_VMReg()->next());
199    map->set_callee_saved(VMRegImpl::stack2reg((g3_offset)>>2), G3->as_VMReg()->next());
200    map->set_callee_saved(VMRegImpl::stack2reg((g4_offset)>>2), G4->as_VMReg()->next());
201    map->set_callee_saved(VMRegImpl::stack2reg((g5_offset)>>2), G5->as_VMReg()->next());
202#endif /* _LP64 */
203  }
204
205
206  // Save the flags
207  __ rdccr( G5 );
208  __ stx(G5, SP, ccr_offset+STACK_BIAS);
209  __ stxfsr(SP, fsr_offset+STACK_BIAS);
210
211  // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
212  int offset = d00_offset;
213  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
214    FloatRegister f = as_FloatRegister(i);
215    __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
216    // Record as callee saved both halves of double registers (2 float registers).
217    map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
218    map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
219    offset += sizeof(double);
220  }
221
222  // And we're done.
223
224  return map;
225}
226
227
228// Pop the current frame and restore all the registers that we
229// saved.
230void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
231
232  // Restore all the FP registers
233  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
234    __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
235  }
236
237  __ ldx(SP, ccr_offset+STACK_BIAS, G1);
238  __ wrccr (G1) ;
239
240  // Restore the G's
241  // Note that G2 (AKA GThread) must be saved and restored separately.
242  // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
243
244  __ ldx(SP, g1_offset+STACK_BIAS, G1);
245  __ ldx(SP, g3_offset+STACK_BIAS, G3);
246  __ ldx(SP, g4_offset+STACK_BIAS, G4);
247  __ ldx(SP, g5_offset+STACK_BIAS, G5);
248
249
250#if !defined(_LP64)
251  // Restore the 64-bit O's.
252  __ ldx(SP, o0_offset+STACK_BIAS, O0);
253  __ ldx(SP, o1_offset+STACK_BIAS, O1);
254  __ ldx(SP, o2_offset+STACK_BIAS, O2);
255  __ ldx(SP, o3_offset+STACK_BIAS, O3);
256  __ ldx(SP, o4_offset+STACK_BIAS, O4);
257  __ ldx(SP, o5_offset+STACK_BIAS, O5);
258
259  // And temporarily place them in TLS
260
261  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
262  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
263  __ stx(O2, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8);
264  __ stx(O3, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8);
265  __ stx(O4, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8);
266  __ stx(O5, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8);
267#endif /* _LP64 */
268
269  // Restore flags
270
271  __ ldxfsr(SP, fsr_offset+STACK_BIAS);
272
273  __ restore();
274
275#if !defined(_LP64)
276  // Now reload the 64bit Oregs after we've restore the window.
277  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
278  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
279  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+2*8, O2);
280  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+3*8, O3);
281  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+4*8, O4);
282  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+5*8, O5);
283#endif /* _LP64 */
284
285}
286
287// Pop the current frame and restore the registers that might be holding
288// a result.
289void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
290
291#if !defined(_LP64)
292  // 32bit build returns longs in G1
293  __ ldx(SP, g1_offset+STACK_BIAS, G1);
294
295  // Retrieve the 64-bit O's.
296  __ ldx(SP, o0_offset+STACK_BIAS, O0);
297  __ ldx(SP, o1_offset+STACK_BIAS, O1);
298  // and save to TLS
299  __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
300  __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
301#endif /* _LP64 */
302
303  __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
304
305  __ restore();
306
307#if !defined(_LP64)
308  // Now reload the 64bit Oregs after we've restore the window.
309  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
310  __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
311#endif /* _LP64 */
312
313}
314
315// Is vector's size (in bytes) bigger than a size saved by default?
316// 8 bytes FP registers are saved by default on SPARC.
317bool SharedRuntime::is_wide_vector(int size) {
318  // Note, MaxVectorSize == 8 on SPARC.
319  assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
320  return size > 8;
321}
322
323// The java_calling_convention describes stack locations as ideal slots on
324// a frame with no abi restrictions. Since we must observe abi restrictions
325// (like the placement of the register window) the slots must be biased by
326// the following value.
327static int reg2offset(VMReg r) {
328  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
329}
330
331static VMRegPair reg64_to_VMRegPair(Register r) {
332  VMRegPair ret;
333  if (wordSize == 8) {
334    ret.set2(r->as_VMReg());
335  } else {
336    ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
337  }
338  return ret;
339}
340
341// ---------------------------------------------------------------------------
342// Read the array of BasicTypes from a signature, and compute where the
343// arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
344// quantities.  Values less than VMRegImpl::stack0 are registers, those above
345// refer to 4-byte stack slots.  All stack slots are based off of the window
346// top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
347// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
348// values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
349// integer registers.  Values 64-95 are the (32-bit only) float registers.
350// Each 32-bit quantity is given its own number, so the integer registers
351// (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
352// an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
353
354// Register results are passed in O0-O5, for outgoing call arguments.  To
355// convert to incoming arguments, convert all O's to I's.  The regs array
356// refer to the low and hi 32-bit words of 64-bit registers or stack slots.
357// If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
358// 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
359// passed (used as a placeholder for the other half of longs and doubles in
360// the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
361// regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
362// Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
363// == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
364// same VMRegPair.
365
366// Note: the INPUTS in sig_bt are in units of Java argument words, which are
367// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
368// units regardless of build.
369
370
371// ---------------------------------------------------------------------------
372// The compiled Java calling convention.  The Java convention always passes
373// 64-bit values in adjacent aligned locations (either registers or stack),
374// floats in float registers and doubles in aligned float pairs.  There is
375// no backing varargs store for values in registers.
376// In the 32-bit build, longs are passed on the stack (cannot be
377// passed in I's, because longs in I's get their heads chopped off at
378// interrupt).
379int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
380                                           VMRegPair *regs,
381                                           int total_args_passed,
382                                           int is_outgoing) {
383  assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
384
385  const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
386  const int flt_reg_max = 8;
387
388  int int_reg = 0;
389  int flt_reg = 0;
390  int slot = 0;
391
392  for (int i = 0; i < total_args_passed; i++) {
393    switch (sig_bt[i]) {
394    case T_INT:
395    case T_SHORT:
396    case T_CHAR:
397    case T_BYTE:
398    case T_BOOLEAN:
399#ifndef _LP64
400    case T_OBJECT:
401    case T_ARRAY:
402    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
403#endif // _LP64
404      if (int_reg < int_reg_max) {
405        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
406        regs[i].set1(r->as_VMReg());
407      } else {
408        regs[i].set1(VMRegImpl::stack2reg(slot++));
409      }
410      break;
411
412#ifdef _LP64
413    case T_LONG:
414      assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
415      // fall-through
416    case T_OBJECT:
417    case T_ARRAY:
418    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
419      if (int_reg < int_reg_max) {
420        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
421        regs[i].set2(r->as_VMReg());
422      } else {
423        slot = round_to(slot, 2);  // align
424        regs[i].set2(VMRegImpl::stack2reg(slot));
425        slot += 2;
426      }
427      break;
428#else
429    case T_LONG:
430      assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
431      // On 32-bit SPARC put longs always on the stack to keep the pressure off
432      // integer argument registers.  They should be used for oops.
433      slot = round_to(slot, 2);  // align
434      regs[i].set2(VMRegImpl::stack2reg(slot));
435      slot += 2;
436#endif
437      break;
438
439    case T_FLOAT:
440      if (flt_reg < flt_reg_max) {
441        FloatRegister r = as_FloatRegister(flt_reg++);
442        regs[i].set1(r->as_VMReg());
443      } else {
444        regs[i].set1(VMRegImpl::stack2reg(slot++));
445      }
446      break;
447
448    case T_DOUBLE:
449      assert(sig_bt[i+1] == T_VOID, "expecting half");
450      if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
451        flt_reg = round_to(flt_reg, 2);  // align
452        FloatRegister r = as_FloatRegister(flt_reg);
453        regs[i].set2(r->as_VMReg());
454        flt_reg += 2;
455      } else {
456        slot = round_to(slot, 2);  // align
457        regs[i].set2(VMRegImpl::stack2reg(slot));
458        slot += 2;
459      }
460      break;
461
462    case T_VOID:
463      regs[i].set_bad();   // Halves of longs & doubles
464      break;
465
466    default:
467      fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
468      break;
469    }
470  }
471
472  // retun the amount of stack space these arguments will need.
473  return slot;
474}
475
476// Helper class mostly to avoid passing masm everywhere, and handle
477// store displacement overflow logic.
478class AdapterGenerator {
479  MacroAssembler *masm;
480  Register Rdisp;
481  void set_Rdisp(Register r)  { Rdisp = r; }
482
483  void patch_callers_callsite();
484
485  // base+st_off points to top of argument
486  int arg_offset(const int st_off) { return st_off; }
487  int next_arg_offset(const int st_off) {
488    return st_off - Interpreter::stackElementSize;
489  }
490
491  // Argument slot values may be loaded first into a register because
492  // they might not fit into displacement.
493  RegisterOrConstant arg_slot(const int st_off);
494  RegisterOrConstant next_arg_slot(const int st_off);
495
496  // Stores long into offset pointed to by base
497  void store_c2i_long(Register r, Register base,
498                      const int st_off, bool is_stack);
499  void store_c2i_object(Register r, Register base,
500                        const int st_off);
501  void store_c2i_int(Register r, Register base,
502                     const int st_off);
503  void store_c2i_double(VMReg r_2,
504                        VMReg r_1, Register base, const int st_off);
505  void store_c2i_float(FloatRegister f, Register base,
506                       const int st_off);
507
508 public:
509  void gen_c2i_adapter(int total_args_passed,
510                              // VMReg max_arg,
511                              int comp_args_on_stack, // VMRegStackSlots
512                              const BasicType *sig_bt,
513                              const VMRegPair *regs,
514                              Label& skip_fixup);
515  void gen_i2c_adapter(int total_args_passed,
516                              // VMReg max_arg,
517                              int comp_args_on_stack, // VMRegStackSlots
518                              const BasicType *sig_bt,
519                              const VMRegPair *regs);
520
521  AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
522};
523
524
525// Patch the callers callsite with entry to compiled code if it exists.
526void AdapterGenerator::patch_callers_callsite() {
527  Label L;
528  __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
529  __ br_null(G3_scratch, false, Assembler::pt, L);
530  __ delayed()->nop();
531  // Call into the VM to patch the caller, then jump to compiled callee
532  __ save_frame(4);     // Args in compiled layout; do not blow them
533
534  // Must save all the live Gregs the list is:
535  // G1: 1st Long arg (32bit build)
536  // G2: global allocated to TLS
537  // G3: used in inline cache check (scratch)
538  // G4: 2nd Long arg (32bit build);
539  // G5: used in inline cache check (Method*)
540
541  // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
542
543#ifdef _LP64
544  // mov(s,d)
545  __ mov(G1, L1);
546  __ mov(G4, L4);
547  __ mov(G5_method, L5);
548  __ mov(G5_method, O0);         // VM needs target method
549  __ mov(I7, O1);                // VM needs caller's callsite
550  // Must be a leaf call...
551  // can be very far once the blob has been relocated
552  AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
553  __ relocate(relocInfo::runtime_call_type);
554  __ jumpl_to(dest, O7, O7);
555  __ delayed()->mov(G2_thread, L7_thread_cache);
556  __ mov(L7_thread_cache, G2_thread);
557  __ mov(L1, G1);
558  __ mov(L4, G4);
559  __ mov(L5, G5_method);
560#else
561  __ stx(G1, FP, -8 + STACK_BIAS);
562  __ stx(G4, FP, -16 + STACK_BIAS);
563  __ mov(G5_method, L5);
564  __ mov(G5_method, O0);         // VM needs target method
565  __ mov(I7, O1);                // VM needs caller's callsite
566  // Must be a leaf call...
567  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
568  __ delayed()->mov(G2_thread, L7_thread_cache);
569  __ mov(L7_thread_cache, G2_thread);
570  __ ldx(FP, -8 + STACK_BIAS, G1);
571  __ ldx(FP, -16 + STACK_BIAS, G4);
572  __ mov(L5, G5_method);
573#endif /* _LP64 */
574
575  __ restore();      // Restore args
576  __ bind(L);
577}
578
579
580RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
581  RegisterOrConstant roc(arg_offset(st_off));
582  return __ ensure_simm13_or_reg(roc, Rdisp);
583}
584
585RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
586  RegisterOrConstant roc(next_arg_offset(st_off));
587  return __ ensure_simm13_or_reg(roc, Rdisp);
588}
589
590
591// Stores long into offset pointed to by base
592void AdapterGenerator::store_c2i_long(Register r, Register base,
593                                      const int st_off, bool is_stack) {
594#ifdef _LP64
595  // In V9, longs are given 2 64-bit slots in the interpreter, but the
596  // data is passed in only 1 slot.
597  __ stx(r, base, next_arg_slot(st_off));
598#else
599#ifdef COMPILER2
600  // Misaligned store of 64-bit data
601  __ stw(r, base, arg_slot(st_off));    // lo bits
602  __ srlx(r, 32, r);
603  __ stw(r, base, next_arg_slot(st_off));  // hi bits
604#else
605  if (is_stack) {
606    // Misaligned store of 64-bit data
607    __ stw(r, base, arg_slot(st_off));    // lo bits
608    __ srlx(r, 32, r);
609    __ stw(r, base, next_arg_slot(st_off));  // hi bits
610  } else {
611    __ stw(r->successor(), base, arg_slot(st_off)     ); // lo bits
612    __ stw(r             , base, next_arg_slot(st_off)); // hi bits
613  }
614#endif // COMPILER2
615#endif // _LP64
616}
617
618void AdapterGenerator::store_c2i_object(Register r, Register base,
619                      const int st_off) {
620  __ st_ptr (r, base, arg_slot(st_off));
621}
622
623void AdapterGenerator::store_c2i_int(Register r, Register base,
624                   const int st_off) {
625  __ st (r, base, arg_slot(st_off));
626}
627
628// Stores into offset pointed to by base
629void AdapterGenerator::store_c2i_double(VMReg r_2,
630                      VMReg r_1, Register base, const int st_off) {
631#ifdef _LP64
632  // In V9, doubles are given 2 64-bit slots in the interpreter, but the
633  // data is passed in only 1 slot.
634  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
635#else
636  // Need to marshal 64-bit value from misaligned Lesp loads
637  __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
638  __ stf(FloatRegisterImpl::S, r_2->as_FloatRegister(), base, arg_slot(st_off) );
639#endif
640}
641
642void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
643                                       const int st_off) {
644  __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
645}
646
647void AdapterGenerator::gen_c2i_adapter(
648                            int total_args_passed,
649                            // VMReg max_arg,
650                            int comp_args_on_stack, // VMRegStackSlots
651                            const BasicType *sig_bt,
652                            const VMRegPair *regs,
653                            Label& L_skip_fixup) {
654
655  // Before we get into the guts of the C2I adapter, see if we should be here
656  // at all.  We've come from compiled code and are attempting to jump to the
657  // interpreter, which means the caller made a static call to get here
658  // (vcalls always get a compiled target if there is one).  Check for a
659  // compiled target.  If there is one, we need to patch the caller's call.
660  // However we will run interpreted if we come thru here. The next pass
661  // thru the call site will run compiled. If we ran compiled here then
662  // we can (theorectically) do endless i2c->c2i->i2c transitions during
663  // deopt/uncommon trap cycles. If we always go interpreted here then
664  // we can have at most one and don't need to play any tricks to keep
665  // from endlessly growing the stack.
666  //
667  // Actually if we detected that we had an i2c->c2i transition here we
668  // ought to be able to reset the world back to the state of the interpreted
669  // call and not bother building another interpreter arg area. We don't
670  // do that at this point.
671
672  patch_callers_callsite();
673
674  __ bind(L_skip_fixup);
675
676  // Since all args are passed on the stack, total_args_passed*wordSize is the
677  // space we need.  Add in varargs area needed by the interpreter. Round up
678  // to stack alignment.
679  const int arg_size = total_args_passed * Interpreter::stackElementSize;
680  const int varargs_area =
681                 (frame::varargs_offset - frame::register_save_words)*wordSize;
682  const int extraspace = round_to(arg_size + varargs_area, 2*wordSize);
683
684  const int bias = STACK_BIAS;
685  const int interp_arg_offset = frame::varargs_offset*wordSize +
686                        (total_args_passed-1)*Interpreter::stackElementSize;
687
688  const Register base = SP;
689
690  // Make some extra space on the stack.
691  __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
692  set_Rdisp(G3_scratch);
693
694  // Write the args into the outgoing interpreter space.
695  for (int i = 0; i < total_args_passed; i++) {
696    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
697    VMReg r_1 = regs[i].first();
698    VMReg r_2 = regs[i].second();
699    if (!r_1->is_valid()) {
700      assert(!r_2->is_valid(), "");
701      continue;
702    }
703    if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
704      RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
705      ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
706      r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
707      if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
708      else                  __ ldx(base, ld_off, G1_scratch);
709    }
710
711    if (r_1->is_Register()) {
712      Register r = r_1->as_Register()->after_restore();
713      if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
714        store_c2i_object(r, base, st_off);
715      } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
716        store_c2i_long(r, base, st_off, r_2->is_stack());
717      } else {
718        store_c2i_int(r, base, st_off);
719      }
720    } else {
721      assert(r_1->is_FloatRegister(), "");
722      if (sig_bt[i] == T_FLOAT) {
723        store_c2i_float(r_1->as_FloatRegister(), base, st_off);
724      } else {
725        assert(sig_bt[i] == T_DOUBLE, "wrong type");
726        store_c2i_double(r_2, r_1, base, st_off);
727      }
728    }
729  }
730
731  // Load the interpreter entry point.
732  __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
733
734  // Pass O5_savedSP as an argument to the interpreter.
735  // The interpreter will restore SP to this value before returning.
736  __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
737
738  __ mov((frame::varargs_offset)*wordSize -
739         1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
740  // Jump to the interpreter just as if interpreter was doing it.
741  __ jmpl(G3_scratch, 0, G0);
742  // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
743  // (really L0) is in use by the compiled frame as a generic temp.  However,
744  // the interpreter does not know where its args are without some kind of
745  // arg pointer being passed in.  Pass it in Gargs.
746  __ delayed()->add(SP, G1, Gargs);
747}
748
749static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
750                        address code_start, address code_end,
751                        Label& L_ok) {
752  Label L_fail;
753  __ set(ExternalAddress(code_start), temp_reg);
754  __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
755  __ cmp(pc_reg, temp_reg);
756  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
757  __ delayed()->add(temp_reg, temp2_reg, temp_reg);
758  __ cmp(pc_reg, temp_reg);
759  __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
760  __ bind(L_fail);
761}
762
763void AdapterGenerator::gen_i2c_adapter(
764                            int total_args_passed,
765                            // VMReg max_arg,
766                            int comp_args_on_stack, // VMRegStackSlots
767                            const BasicType *sig_bt,
768                            const VMRegPair *regs) {
769
770  // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
771  // layout.  Lesp was saved by the calling I-frame and will be restored on
772  // return.  Meanwhile, outgoing arg space is all owned by the callee
773  // C-frame, so we can mangle it at will.  After adjusting the frame size,
774  // hoist register arguments and repack other args according to the compiled
775  // code convention.  Finally, end in a jump to the compiled code.  The entry
776  // point address is the start of the buffer.
777
778  // We will only enter here from an interpreted frame and never from after
779  // passing thru a c2i. Azul allowed this but we do not. If we lose the
780  // race and use a c2i we will remain interpreted for the race loser(s).
781  // This removes all sorts of headaches on the x86 side and also eliminates
782  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
783
784  // More detail:
785  // Adapters can be frameless because they do not require the caller
786  // to perform additional cleanup work, such as correcting the stack pointer.
787  // An i2c adapter is frameless because the *caller* frame, which is interpreted,
788  // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
789  // even if a callee has modified the stack pointer.
790  // A c2i adapter is frameless because the *callee* frame, which is interpreted,
791  // routinely repairs its caller's stack pointer (from sender_sp, which is set
792  // up via the senderSP register).
793  // In other words, if *either* the caller or callee is interpreted, we can
794  // get the stack pointer repaired after a call.
795  // This is why c2i and i2c adapters cannot be indefinitely composed.
796  // In particular, if a c2i adapter were to somehow call an i2c adapter,
797  // both caller and callee would be compiled methods, and neither would
798  // clean up the stack pointer changes performed by the two adapters.
799  // If this happens, control eventually transfers back to the compiled
800  // caller, but with an uncorrected stack, causing delayed havoc.
801
802  if (VerifyAdapterCalls &&
803      (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
804    // So, let's test for cascading c2i/i2c adapters right now.
805    //  assert(Interpreter::contains($return_addr) ||
806    //         StubRoutines::contains($return_addr),
807    //         "i2c adapter must return to an interpreter frame");
808    __ block_comment("verify_i2c { ");
809    Label L_ok;
810    if (Interpreter::code() != NULL)
811      range_check(masm, O7, O0, O1,
812                  Interpreter::code()->code_start(), Interpreter::code()->code_end(),
813                  L_ok);
814    if (StubRoutines::code1() != NULL)
815      range_check(masm, O7, O0, O1,
816                  StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
817                  L_ok);
818    if (StubRoutines::code2() != NULL)
819      range_check(masm, O7, O0, O1,
820                  StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
821                  L_ok);
822    const char* msg = "i2c adapter must return to an interpreter frame";
823    __ block_comment(msg);
824    __ stop(msg);
825    __ bind(L_ok);
826    __ block_comment("} verify_i2ce ");
827  }
828
829  // As you can see from the list of inputs & outputs there are not a lot
830  // of temp registers to work with: mostly G1, G3 & G4.
831
832  // Inputs:
833  // G2_thread      - TLS
834  // G5_method      - Method oop
835  // G4 (Gargs)     - Pointer to interpreter's args
836  // O0..O4         - free for scratch
837  // O5_savedSP     - Caller's saved SP, to be restored if needed
838  // O6             - Current SP!
839  // O7             - Valid return address
840  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
841
842  // Outputs:
843  // G2_thread      - TLS
844  // O0-O5          - Outgoing args in compiled layout
845  // O6             - Adjusted or restored SP
846  // O7             - Valid return address
847  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
848  // F0-F7          - more outgoing args
849
850
851  // Gargs is the incoming argument base, and also an outgoing argument.
852  __ sub(Gargs, BytesPerWord, Gargs);
853
854  // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
855  // WITH O7 HOLDING A VALID RETURN PC
856  //
857  // |              |
858  // :  java stack  :
859  // |              |
860  // +--------------+ <--- start of outgoing args
861  // |   receiver   |   |
862  // : rest of args :   |---size is java-arg-words
863  // |              |   |
864  // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
865  // |              |   |
866  // :    unused    :   |---Space for max Java stack, plus stack alignment
867  // |              |   |
868  // +--------------+ <--- SP + 16*wordsize
869  // |              |
870  // :    window    :
871  // |              |
872  // +--------------+ <--- SP
873
874  // WE REPACK THE STACK.  We use the common calling convention layout as
875  // discovered by calling SharedRuntime::calling_convention.  We assume it
876  // causes an arbitrary shuffle of memory, which may require some register
877  // temps to do the shuffle.  We hope for (and optimize for) the case where
878  // temps are not needed.  We may have to resize the stack slightly, in case
879  // we need alignment padding (32-bit interpreter can pass longs & doubles
880  // misaligned, but the compilers expect them aligned).
881  //
882  // |              |
883  // :  java stack  :
884  // |              |
885  // +--------------+ <--- start of outgoing args
886  // |  pad, align  |   |
887  // +--------------+   |
888  // | ints, longs, |   |
889  // |    floats,   |   |---Outgoing stack args.
890  // :    doubles   :   |   First few args in registers.
891  // |              |   |
892  // +--------------+ <--- SP' + 16*wordsize
893  // |              |
894  // :    window    :
895  // |              |
896  // +--------------+ <--- SP'
897
898  // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
899  // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
900  // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
901
902  // Cut-out for having no stack args.  Since up to 6 args are passed
903  // in registers, we will commonly have no stack args.
904  if (comp_args_on_stack > 0) {
905    // Convert VMReg stack slots to words.
906    int comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
907    // Round up to miminum stack alignment, in wordSize
908    comp_words_on_stack = round_to(comp_words_on_stack, 2);
909    // Now compute the distance from Lesp to SP.  This calculation does not
910    // include the space for total_args_passed because Lesp has not yet popped
911    // the arguments.
912    __ sub(SP, (comp_words_on_stack)*wordSize, SP);
913  }
914
915  // Now generate the shuffle code.  Pick up all register args and move the
916  // rest through G1_scratch.
917  for (int i = 0; i < total_args_passed; i++) {
918    if (sig_bt[i] == T_VOID) {
919      // Longs and doubles are passed in native word order, but misaligned
920      // in the 32-bit build.
921      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
922      continue;
923    }
924
925    // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
926    // 32-bit build and aligned in the 64-bit build.  Look for the obvious
927    // ldx/lddf optimizations.
928
929    // Load in argument order going down.
930    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
931    set_Rdisp(G1_scratch);
932
933    VMReg r_1 = regs[i].first();
934    VMReg r_2 = regs[i].second();
935    if (!r_1->is_valid()) {
936      assert(!r_2->is_valid(), "");
937      continue;
938    }
939    if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
940      r_1 = F8->as_VMReg();        // as part of the load/store shuffle
941      if (r_2->is_valid()) r_2 = r_1->next();
942    }
943    if (r_1->is_Register()) {  // Register argument
944      Register r = r_1->as_Register()->after_restore();
945      if (!r_2->is_valid()) {
946        __ ld(Gargs, arg_slot(ld_off), r);
947      } else {
948#ifdef _LP64
949        // In V9, longs are given 2 64-bit slots in the interpreter, but the
950        // data is passed in only 1 slot.
951        RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
952              next_arg_slot(ld_off) : arg_slot(ld_off);
953        __ ldx(Gargs, slot, r);
954#else
955        fatal("longs should be on stack");
956#endif
957      }
958    } else {
959      assert(r_1->is_FloatRegister(), "");
960      if (!r_2->is_valid()) {
961        __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_1->as_FloatRegister());
962      } else {
963#ifdef _LP64
964        // In V9, doubles are given 2 64-bit slots in the interpreter, but the
965        // data is passed in only 1 slot.  This code also handles longs that
966        // are passed on the stack, but need a stack-to-stack move through a
967        // spare float register.
968        RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
969              next_arg_slot(ld_off) : arg_slot(ld_off);
970        __ ldf(FloatRegisterImpl::D, Gargs,                  slot, r_1->as_FloatRegister());
971#else
972        // Need to marshal 64-bit value from misaligned Lesp loads
973        __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
974        __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_2->as_FloatRegister());
975#endif
976      }
977    }
978    // Was the argument really intended to be on the stack, but was loaded
979    // into F8/F9?
980    if (regs[i].first()->is_stack()) {
981      assert(r_1->as_FloatRegister() == F8, "fix this code");
982      // Convert stack slot to an SP offset
983      int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
984      // Store down the shuffled stack word.  Target address _is_ aligned.
985      RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
986      if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
987      else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
988    }
989  }
990
991  // Jump to the compiled code just as if compiled code was doing it.
992  __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
993
994  // 6243940 We might end up in handle_wrong_method if
995  // the callee is deoptimized as we race thru here. If that
996  // happens we don't want to take a safepoint because the
997  // caller frame will look interpreted and arguments are now
998  // "compiled" so it is much better to make this transition
999  // invisible to the stack walking code. Unfortunately if
1000  // we try and find the callee by normal means a safepoint
1001  // is possible. So we stash the desired callee in the thread
1002  // and the vm will find there should this case occur.
1003  Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
1004  __ st_ptr(G5_method, callee_target_addr);
1005  __ jmpl(G3, 0, G0);
1006  __ delayed()->nop();
1007}
1008
1009// ---------------------------------------------------------------
1010AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
1011                                                            int total_args_passed,
1012                                                            // VMReg max_arg,
1013                                                            int comp_args_on_stack, // VMRegStackSlots
1014                                                            const BasicType *sig_bt,
1015                                                            const VMRegPair *regs,
1016                                                            AdapterFingerPrint* fingerprint) {
1017  address i2c_entry = __ pc();
1018
1019  AdapterGenerator agen(masm);
1020
1021  agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
1022
1023
1024  // -------------------------------------------------------------------------
1025  // Generate a C2I adapter.  On entry we know G5 holds the Method*.  The
1026  // args start out packed in the compiled layout.  They need to be unpacked
1027  // into the interpreter layout.  This will almost always require some stack
1028  // space.  We grow the current (compiled) stack, then repack the args.  We
1029  // finally end in a jump to the generic interpreter entry point.  On exit
1030  // from the interpreter, the interpreter will restore our SP (lest the
1031  // compiled code, which relys solely on SP and not FP, get sick).
1032
1033  address c2i_unverified_entry = __ pc();
1034  Label L_skip_fixup;
1035  {
1036    Register R_temp = G1;  // another scratch register
1037
1038    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1039
1040    __ verify_oop(O0);
1041    __ load_klass(O0, G3_scratch);
1042
1043    __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
1044    __ cmp(G3_scratch, R_temp);
1045
1046    Label ok, ok2;
1047    __ brx(Assembler::equal, false, Assembler::pt, ok);
1048    __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
1049    __ jump_to(ic_miss, G3_scratch);
1050    __ delayed()->nop();
1051
1052    __ bind(ok);
1053    // Method might have been compiled since the call site was patched to
1054    // interpreted if that is the case treat it as a miss so we can get
1055    // the call site corrected.
1056    __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
1057    __ bind(ok2);
1058    __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
1059    __ delayed()->nop();
1060    __ jump_to(ic_miss, G3_scratch);
1061    __ delayed()->nop();
1062
1063  }
1064
1065  address c2i_entry = __ pc();
1066
1067  agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
1068
1069  __ flush();
1070  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
1071
1072}
1073
1074// Helper function for native calling conventions
1075static VMReg int_stk_helper( int i ) {
1076  // Bias any stack based VMReg we get by ignoring the window area
1077  // but not the register parameter save area.
1078  //
1079  // This is strange for the following reasons. We'd normally expect
1080  // the calling convention to return an VMReg for a stack slot
1081  // completely ignoring any abi reserved area. C2 thinks of that
1082  // abi area as only out_preserve_stack_slots. This does not include
1083  // the area allocated by the C abi to store down integer arguments
1084  // because the java calling convention does not use it. So
1085  // since c2 assumes that there are only out_preserve_stack_slots
1086  // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
1087  // location the c calling convention must add in this bias amount
1088  // to make up for the fact that the out_preserve_stack_slots is
1089  // insufficient for C calls. What a mess. I sure hope those 6
1090  // stack words were worth it on every java call!
1091
1092  // Another way of cleaning this up would be for out_preserve_stack_slots
1093  // to take a parameter to say whether it was C or java calling conventions.
1094  // Then things might look a little better (but not much).
1095
1096  int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
1097  if( mem_parm_offset < 0 ) {
1098    return as_oRegister(i)->as_VMReg();
1099  } else {
1100    int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
1101    // Now return a biased offset that will be correct when out_preserve_slots is added back in
1102    return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
1103  }
1104}
1105
1106
1107int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1108                                         VMRegPair *regs,
1109                                         VMRegPair *regs2,
1110                                         int total_args_passed) {
1111    assert(regs2 == NULL, "not needed on sparc");
1112
1113    // Return the number of VMReg stack_slots needed for the args.
1114    // This value does not include an abi space (like register window
1115    // save area).
1116
1117    // The native convention is V8 if !LP64
1118    // The LP64 convention is the V9 convention which is slightly more sane.
1119
1120    // We return the amount of VMReg stack slots we need to reserve for all
1121    // the arguments NOT counting out_preserve_stack_slots. Since we always
1122    // have space for storing at least 6 registers to memory we start with that.
1123    // See int_stk_helper for a further discussion.
1124    int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
1125
1126#ifdef _LP64
1127    // V9 convention: All things "as-if" on double-wide stack slots.
1128    // Hoist any int/ptr/long's in the first 6 to int regs.
1129    // Hoist any flt/dbl's in the first 16 dbl regs.
1130    int j = 0;                  // Count of actual args, not HALVES
1131    VMRegPair param_array_reg;  // location of the argument in the parameter array
1132    for (int i = 0; i < total_args_passed; i++, j++) {
1133      param_array_reg.set_bad();
1134      switch (sig_bt[i]) {
1135      case T_BOOLEAN:
1136      case T_BYTE:
1137      case T_CHAR:
1138      case T_INT:
1139      case T_SHORT:
1140        regs[i].set1(int_stk_helper(j));
1141        break;
1142      case T_LONG:
1143        assert(sig_bt[i+1] == T_VOID, "expecting half");
1144      case T_ADDRESS: // raw pointers, like current thread, for VM calls
1145      case T_ARRAY:
1146      case T_OBJECT:
1147      case T_METADATA:
1148        regs[i].set2(int_stk_helper(j));
1149        break;
1150      case T_FLOAT:
1151        // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
1152        // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
1153        //
1154        // "When a callee prototype exists, and does not indicate variable arguments,
1155        // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
1156        // will be promoted to floating-point registers"
1157        //
1158        // By "promoted" it means that the argument is located in two places, an unused
1159        // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
1160        // float register.  In most cases, there are 6 or fewer arguments of any type,
1161        // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
1162        // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
1163        // require slots beyond that (up to %sp+BIAS+248).
1164        //
1165        {
1166          // V9ism: floats go in ODD registers and stack slots
1167          int float_index = 1 + (j << 1);
1168          param_array_reg.set1(VMRegImpl::stack2reg(float_index));
1169          if (j < 16) {
1170            regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
1171          } else {
1172            regs[i] = param_array_reg;
1173          }
1174        }
1175        break;
1176      case T_DOUBLE:
1177        {
1178          assert(sig_bt[i + 1] == T_VOID, "expecting half");
1179          // V9ism: doubles go in EVEN/ODD regs and stack slots
1180          int double_index = (j << 1);
1181          param_array_reg.set2(VMRegImpl::stack2reg(double_index));
1182          if (j < 16) {
1183            regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
1184          } else {
1185            // V9ism: doubles go in EVEN/ODD stack slots
1186            regs[i] = param_array_reg;
1187          }
1188        }
1189        break;
1190      case T_VOID:
1191        regs[i].set_bad();
1192        j--;
1193        break; // Do not count HALVES
1194      default:
1195        ShouldNotReachHere();
1196      }
1197      // Keep track of the deepest parameter array slot.
1198      if (!param_array_reg.first()->is_valid()) {
1199        param_array_reg = regs[i];
1200      }
1201      if (param_array_reg.first()->is_stack()) {
1202        int off = param_array_reg.first()->reg2stack();
1203        if (off > max_stack_slots) max_stack_slots = off;
1204      }
1205      if (param_array_reg.second()->is_stack()) {
1206        int off = param_array_reg.second()->reg2stack();
1207        if (off > max_stack_slots) max_stack_slots = off;
1208      }
1209    }
1210
1211#else // _LP64
1212    // V8 convention: first 6 things in O-regs, rest on stack.
1213    // Alignment is willy-nilly.
1214    for (int i = 0; i < total_args_passed; i++) {
1215      switch (sig_bt[i]) {
1216      case T_ADDRESS: // raw pointers, like current thread, for VM calls
1217      case T_ARRAY:
1218      case T_BOOLEAN:
1219      case T_BYTE:
1220      case T_CHAR:
1221      case T_FLOAT:
1222      case T_INT:
1223      case T_OBJECT:
1224      case T_METADATA:
1225      case T_SHORT:
1226        regs[i].set1(int_stk_helper(i));
1227        break;
1228      case T_DOUBLE:
1229      case T_LONG:
1230        assert(sig_bt[i + 1] == T_VOID, "expecting half");
1231        regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
1232        break;
1233      case T_VOID: regs[i].set_bad(); break;
1234      default:
1235        ShouldNotReachHere();
1236      }
1237      if (regs[i].first()->is_stack()) {
1238        int off = regs[i].first()->reg2stack();
1239        if (off > max_stack_slots) max_stack_slots = off;
1240      }
1241      if (regs[i].second()->is_stack()) {
1242        int off = regs[i].second()->reg2stack();
1243        if (off > max_stack_slots) max_stack_slots = off;
1244      }
1245    }
1246#endif // _LP64
1247
1248  return round_to(max_stack_slots + 1, 2);
1249
1250}
1251
1252
1253// ---------------------------------------------------------------------------
1254void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1255  switch (ret_type) {
1256  case T_FLOAT:
1257    __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1258    break;
1259  case T_DOUBLE:
1260    __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1261    break;
1262  }
1263}
1264
1265void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1266  switch (ret_type) {
1267  case T_FLOAT:
1268    __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1269    break;
1270  case T_DOUBLE:
1271    __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1272    break;
1273  }
1274}
1275
1276// Check and forward and pending exception.  Thread is stored in
1277// L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1278// is no exception handler.  We merely pop this frame off and throw the
1279// exception in the caller's frame.
1280static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1281  Label L;
1282  __ br_null(Rex_oop, false, Assembler::pt, L);
1283  __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1284  // Since this is a native call, we *know* the proper exception handler
1285  // without calling into the VM: it's the empty function.  Just pop this
1286  // frame and then jump to forward_exception_entry; O7 will contain the
1287  // native caller's return PC.
1288 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1289  __ jump_to(exception_entry, G3_scratch);
1290  __ delayed()->restore();      // Pop this frame off.
1291  __ bind(L);
1292}
1293
1294// A simple move of integer like type
1295static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1296  if (src.first()->is_stack()) {
1297    if (dst.first()->is_stack()) {
1298      // stack to stack
1299      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1300      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1301    } else {
1302      // stack to reg
1303      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1304    }
1305  } else if (dst.first()->is_stack()) {
1306    // reg to stack
1307    __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1308  } else {
1309    __ mov(src.first()->as_Register(), dst.first()->as_Register());
1310  }
1311}
1312
1313// On 64 bit we will store integer like items to the stack as
1314// 64 bits items (sparc abi) even though java would only store
1315// 32bits for a parameter. On 32bit it will simply be 32 bits
1316// So this routine will do 32->32 on 32bit and 32->64 on 64bit
1317static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1318  if (src.first()->is_stack()) {
1319    if (dst.first()->is_stack()) {
1320      // stack to stack
1321      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1322      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1323    } else {
1324      // stack to reg
1325      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1326    }
1327  } else if (dst.first()->is_stack()) {
1328    // reg to stack
1329    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1330  } else {
1331    __ mov(src.first()->as_Register(), dst.first()->as_Register());
1332  }
1333}
1334
1335
1336static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1337  if (src.first()->is_stack()) {
1338    if (dst.first()->is_stack()) {
1339      // stack to stack
1340      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1341      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1342    } else {
1343      // stack to reg
1344      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1345    }
1346  } else if (dst.first()->is_stack()) {
1347    // reg to stack
1348    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1349  } else {
1350    __ mov(src.first()->as_Register(), dst.first()->as_Register());
1351  }
1352}
1353
1354
1355// An oop arg. Must pass a handle not the oop itself
1356static void object_move(MacroAssembler* masm,
1357                        OopMap* map,
1358                        int oop_handle_offset,
1359                        int framesize_in_slots,
1360                        VMRegPair src,
1361                        VMRegPair dst,
1362                        bool is_receiver,
1363                        int* receiver_offset) {
1364
1365  // must pass a handle. First figure out the location we use as a handle
1366
1367  if (src.first()->is_stack()) {
1368    // Oop is already on the stack
1369    Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1370    __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1371    __ ld_ptr(rHandle, 0, L4);
1372#ifdef _LP64
1373    __ movr( Assembler::rc_z, L4, G0, rHandle );
1374#else
1375    __ tst( L4 );
1376    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1377#endif
1378    if (dst.first()->is_stack()) {
1379      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1380    }
1381    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1382    if (is_receiver) {
1383      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1384    }
1385    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1386  } else {
1387    // Oop is in an input register pass we must flush it to the stack
1388    const Register rOop = src.first()->as_Register();
1389    const Register rHandle = L5;
1390    int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1391    int offset = oop_slot * VMRegImpl::stack_slot_size;
1392    __ st_ptr(rOop, SP, offset + STACK_BIAS);
1393    if (is_receiver) {
1394       *receiver_offset = offset;
1395    }
1396    map->set_oop(VMRegImpl::stack2reg(oop_slot));
1397    __ add(SP, offset + STACK_BIAS, rHandle);
1398#ifdef _LP64
1399    __ movr( Assembler::rc_z, rOop, G0, rHandle );
1400#else
1401    __ tst( rOop );
1402    __ movcc( Assembler::zero, false, Assembler::icc, G0, rHandle );
1403#endif
1404
1405    if (dst.first()->is_stack()) {
1406      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1407    } else {
1408      __ mov(rHandle, dst.first()->as_Register());
1409    }
1410  }
1411}
1412
1413// A float arg may have to do float reg int reg conversion
1414static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1415  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1416
1417  if (src.first()->is_stack()) {
1418    if (dst.first()->is_stack()) {
1419      // stack to stack the easiest of the bunch
1420      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1421      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1422    } else {
1423      // stack to reg
1424      if (dst.first()->is_Register()) {
1425        __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1426      } else {
1427        __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1428      }
1429    }
1430  } else if (dst.first()->is_stack()) {
1431    // reg to stack
1432    if (src.first()->is_Register()) {
1433      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1434    } else {
1435      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1436    }
1437  } else {
1438    // reg to reg
1439    if (src.first()->is_Register()) {
1440      if (dst.first()->is_Register()) {
1441        // gpr -> gpr
1442        __ mov(src.first()->as_Register(), dst.first()->as_Register());
1443      } else {
1444        // gpr -> fpr
1445        __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1446        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1447      }
1448    } else if (dst.first()->is_Register()) {
1449      // fpr -> gpr
1450      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1451      __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1452    } else {
1453      // fpr -> fpr
1454      // In theory these overlap but the ordering is such that this is likely a nop
1455      if ( src.first() != dst.first()) {
1456        __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1457      }
1458    }
1459  }
1460}
1461
1462static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1463  VMRegPair src_lo(src.first());
1464  VMRegPair src_hi(src.second());
1465  VMRegPair dst_lo(dst.first());
1466  VMRegPair dst_hi(dst.second());
1467  simple_move32(masm, src_lo, dst_lo);
1468  simple_move32(masm, src_hi, dst_hi);
1469}
1470
1471// A long move
1472static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1473
1474  // Do the simple ones here else do two int moves
1475  if (src.is_single_phys_reg() ) {
1476    if (dst.is_single_phys_reg()) {
1477      __ mov(src.first()->as_Register(), dst.first()->as_Register());
1478    } else {
1479      // split src into two separate registers
1480      // Remember hi means hi address or lsw on sparc
1481      // Move msw to lsw
1482      if (dst.second()->is_reg()) {
1483        // MSW -> MSW
1484        __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1485        // Now LSW -> LSW
1486        // this will only move lo -> lo and ignore hi
1487        VMRegPair split(dst.second());
1488        simple_move32(masm, src, split);
1489      } else {
1490        VMRegPair split(src.first(), L4->as_VMReg());
1491        // MSW -> MSW (lo ie. first word)
1492        __ srax(src.first()->as_Register(), 32, L4);
1493        split_long_move(masm, split, dst);
1494      }
1495    }
1496  } else if (dst.is_single_phys_reg()) {
1497    if (src.is_adjacent_aligned_on_stack(2)) {
1498      __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1499    } else {
1500      // dst is a single reg.
1501      // Remember lo is low address not msb for stack slots
1502      // and lo is the "real" register for registers
1503      // src is
1504
1505      VMRegPair split;
1506
1507      if (src.first()->is_reg()) {
1508        // src.lo (msw) is a reg, src.hi is stk/reg
1509        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1510        split.set_pair(dst.first(), src.first());
1511      } else {
1512        // msw is stack move to L5
1513        // lsw is stack move to dst.lo (real reg)
1514        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1515        split.set_pair(dst.first(), L5->as_VMReg());
1516      }
1517
1518      // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1519      // msw   -> src.lo/L5,  lsw -> dst.lo
1520      split_long_move(masm, src, split);
1521
1522      // So dst now has the low order correct position the
1523      // msw half
1524      __ sllx(split.first()->as_Register(), 32, L5);
1525
1526      const Register d = dst.first()->as_Register();
1527      __ or3(L5, d, d);
1528    }
1529  } else {
1530    // For LP64 we can probably do better.
1531    split_long_move(masm, src, dst);
1532  }
1533}
1534
1535// A double move
1536static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1537
1538  // The painful thing here is that like long_move a VMRegPair might be
1539  // 1: a single physical register
1540  // 2: two physical registers (v8)
1541  // 3: a physical reg [lo] and a stack slot [hi] (v8)
1542  // 4: two stack slots
1543
1544  // Since src is always a java calling convention we know that the src pair
1545  // is always either all registers or all stack (and aligned?)
1546
1547  // in a register [lo] and a stack slot [hi]
1548  if (src.first()->is_stack()) {
1549    if (dst.first()->is_stack()) {
1550      // stack to stack the easiest of the bunch
1551      // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1552      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1553      __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1554      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1555      __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1556    } else {
1557      // stack to reg
1558      if (dst.second()->is_stack()) {
1559        // stack -> reg, stack -> stack
1560        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1561        if (dst.first()->is_Register()) {
1562          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1563        } else {
1564          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1565        }
1566        // This was missing. (very rare case)
1567        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1568      } else {
1569        // stack -> reg
1570        // Eventually optimize for alignment QQQ
1571        if (dst.first()->is_Register()) {
1572          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1573          __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1574        } else {
1575          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1576          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1577        }
1578      }
1579    }
1580  } else if (dst.first()->is_stack()) {
1581    // reg to stack
1582    if (src.first()->is_Register()) {
1583      // Eventually optimize for alignment QQQ
1584      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1585      if (src.second()->is_stack()) {
1586        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1587        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1588      } else {
1589        __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1590      }
1591    } else {
1592      // fpr to stack
1593      if (src.second()->is_stack()) {
1594        ShouldNotReachHere();
1595      } else {
1596        // Is the stack aligned?
1597        if (reg2offset(dst.first()) & 0x7) {
1598          // No do as pairs
1599          __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1600          __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1601        } else {
1602          __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1603        }
1604      }
1605    }
1606  } else {
1607    // reg to reg
1608    if (src.first()->is_Register()) {
1609      if (dst.first()->is_Register()) {
1610        // gpr -> gpr
1611        __ mov(src.first()->as_Register(), dst.first()->as_Register());
1612        __ mov(src.second()->as_Register(), dst.second()->as_Register());
1613      } else {
1614        // gpr -> fpr
1615        // ought to be able to do a single store
1616        __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1617        __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1618        // ought to be able to do a single load
1619        __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1620        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1621      }
1622    } else if (dst.first()->is_Register()) {
1623      // fpr -> gpr
1624      // ought to be able to do a single store
1625      __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1626      // ought to be able to do a single load
1627      // REMEMBER first() is low address not LSB
1628      __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1629      if (dst.second()->is_Register()) {
1630        __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1631      } else {
1632        __ ld(FP, -4 + STACK_BIAS, L4);
1633        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1634      }
1635    } else {
1636      // fpr -> fpr
1637      // In theory these overlap but the ordering is such that this is likely a nop
1638      if ( src.first() != dst.first()) {
1639        __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1640      }
1641    }
1642  }
1643}
1644
1645// Creates an inner frame if one hasn't already been created, and
1646// saves a copy of the thread in L7_thread_cache
1647static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1648  if (!*already_created) {
1649    __ save_frame(0);
1650    // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1651    // Don't use save_thread because it smashes G2 and we merely want to save a
1652    // copy
1653    __ mov(G2_thread, L7_thread_cache);
1654    *already_created = true;
1655  }
1656}
1657
1658
1659static void save_or_restore_arguments(MacroAssembler* masm,
1660                                      const int stack_slots,
1661                                      const int total_in_args,
1662                                      const int arg_save_area,
1663                                      OopMap* map,
1664                                      VMRegPair* in_regs,
1665                                      BasicType* in_sig_bt) {
1666  // if map is non-NULL then the code should store the values,
1667  // otherwise it should load them.
1668  if (map != NULL) {
1669    // Fill in the map
1670    for (int i = 0; i < total_in_args; i++) {
1671      if (in_sig_bt[i] == T_ARRAY) {
1672        if (in_regs[i].first()->is_stack()) {
1673          int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1674          map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1675        } else if (in_regs[i].first()->is_Register()) {
1676          map->set_oop(in_regs[i].first());
1677        } else {
1678          ShouldNotReachHere();
1679        }
1680      }
1681    }
1682  }
1683
1684  // Save or restore double word values
1685  int handle_index = 0;
1686  for (int i = 0; i < total_in_args; i++) {
1687    int slot = handle_index + arg_save_area;
1688    int offset = slot * VMRegImpl::stack_slot_size;
1689    if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1690      const Register reg = in_regs[i].first()->as_Register();
1691      if (reg->is_global()) {
1692        handle_index += 2;
1693        assert(handle_index <= stack_slots, "overflow");
1694        if (map != NULL) {
1695          __ stx(reg, SP, offset + STACK_BIAS);
1696        } else {
1697          __ ldx(SP, offset + STACK_BIAS, reg);
1698        }
1699      }
1700    } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1701      handle_index += 2;
1702      assert(handle_index <= stack_slots, "overflow");
1703      if (map != NULL) {
1704        __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1705      } else {
1706        __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1707      }
1708    }
1709  }
1710  // Save floats
1711  for (int i = 0; i < total_in_args; i++) {
1712    int slot = handle_index + arg_save_area;
1713    int offset = slot * VMRegImpl::stack_slot_size;
1714    if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1715      handle_index++;
1716      assert(handle_index <= stack_slots, "overflow");
1717      if (map != NULL) {
1718        __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1719      } else {
1720        __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1721      }
1722    }
1723  }
1724
1725}
1726
1727
1728// Check GC_locker::needs_gc and enter the runtime if it's true.  This
1729// keeps a new JNI critical region from starting until a GC has been
1730// forced.  Save down any oops in registers and describe them in an
1731// OopMap.
1732static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1733                                               const int stack_slots,
1734                                               const int total_in_args,
1735                                               const int arg_save_area,
1736                                               OopMapSet* oop_maps,
1737                                               VMRegPair* in_regs,
1738                                               BasicType* in_sig_bt) {
1739  __ block_comment("check GC_locker::needs_gc");
1740  Label cont;
1741  AddressLiteral sync_state(GC_locker::needs_gc_address());
1742  __ load_bool_contents(sync_state, G3_scratch);
1743  __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1744  __ delayed()->nop();
1745
1746  // Save down any values that are live in registers and call into the
1747  // runtime to halt for a GC
1748  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1749  save_or_restore_arguments(masm, stack_slots, total_in_args,
1750                            arg_save_area, map, in_regs, in_sig_bt);
1751
1752  __ mov(G2_thread, L7_thread_cache);
1753
1754  __ set_last_Java_frame(SP, noreg);
1755
1756  __ block_comment("block_for_jni_critical");
1757  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1758  __ delayed()->mov(L7_thread_cache, O0);
1759  oop_maps->add_gc_map( __ offset(), map);
1760
1761  __ restore_thread(L7_thread_cache); // restore G2_thread
1762  __ reset_last_Java_frame();
1763
1764  // Reload all the register arguments
1765  save_or_restore_arguments(masm, stack_slots, total_in_args,
1766                            arg_save_area, NULL, in_regs, in_sig_bt);
1767
1768  __ bind(cont);
1769#ifdef ASSERT
1770  if (StressCriticalJNINatives) {
1771    // Stress register saving
1772    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1773    save_or_restore_arguments(masm, stack_slots, total_in_args,
1774                              arg_save_area, map, in_regs, in_sig_bt);
1775    // Destroy argument registers
1776    for (int i = 0; i < total_in_args; i++) {
1777      if (in_regs[i].first()->is_Register()) {
1778        const Register reg = in_regs[i].first()->as_Register();
1779        if (reg->is_global()) {
1780          __ mov(G0, reg);
1781        }
1782      } else if (in_regs[i].first()->is_FloatRegister()) {
1783        __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1784      }
1785    }
1786
1787    save_or_restore_arguments(masm, stack_slots, total_in_args,
1788                              arg_save_area, NULL, in_regs, in_sig_bt);
1789  }
1790#endif
1791}
1792
1793// Unpack an array argument into a pointer to the body and the length
1794// if the array is non-null, otherwise pass 0 for both.
1795static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1796  // Pass the length, ptr pair
1797  Label is_null, done;
1798  if (reg.first()->is_stack()) {
1799    VMRegPair tmp  = reg64_to_VMRegPair(L2);
1800    // Load the arg up from the stack
1801    move_ptr(masm, reg, tmp);
1802    reg = tmp;
1803  }
1804  __ cmp(reg.first()->as_Register(), G0);
1805  __ brx(Assembler::equal, false, Assembler::pt, is_null);
1806  __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1807  move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1808  __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1809  move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1810  __ ba_short(done);
1811  __ bind(is_null);
1812  // Pass zeros
1813  move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1814  move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1815  __ bind(done);
1816}
1817
1818static void verify_oop_args(MacroAssembler* masm,
1819                            methodHandle method,
1820                            const BasicType* sig_bt,
1821                            const VMRegPair* regs) {
1822  Register temp_reg = G5_method;  // not part of any compiled calling seq
1823  if (VerifyOops) {
1824    for (int i = 0; i < method->size_of_parameters(); i++) {
1825      if (sig_bt[i] == T_OBJECT ||
1826          sig_bt[i] == T_ARRAY) {
1827        VMReg r = regs[i].first();
1828        assert(r->is_valid(), "bad oop arg");
1829        if (r->is_stack()) {
1830          RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1831          ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1832          __ ld_ptr(SP, ld_off, temp_reg);
1833          __ verify_oop(temp_reg);
1834        } else {
1835          __ verify_oop(r->as_Register());
1836        }
1837      }
1838    }
1839  }
1840}
1841
1842static void gen_special_dispatch(MacroAssembler* masm,
1843                                 methodHandle method,
1844                                 const BasicType* sig_bt,
1845                                 const VMRegPair* regs) {
1846  verify_oop_args(masm, method, sig_bt, regs);
1847  vmIntrinsics::ID iid = method->intrinsic_id();
1848
1849  // Now write the args into the outgoing interpreter space
1850  bool     has_receiver   = false;
1851  Register receiver_reg   = noreg;
1852  int      member_arg_pos = -1;
1853  Register member_reg     = noreg;
1854  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1855  if (ref_kind != 0) {
1856    member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1857    member_reg = G5_method;  // known to be free at this point
1858    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1859  } else if (iid == vmIntrinsics::_invokeBasic) {
1860    has_receiver = true;
1861  } else {
1862    fatal(err_msg_res("unexpected intrinsic id %d", iid));
1863  }
1864
1865  if (member_reg != noreg) {
1866    // Load the member_arg into register, if necessary.
1867    SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1868    VMReg r = regs[member_arg_pos].first();
1869    if (r->is_stack()) {
1870      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1871      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1872      __ ld_ptr(SP, ld_off, member_reg);
1873    } else {
1874      // no data motion is needed
1875      member_reg = r->as_Register();
1876    }
1877  }
1878
1879  if (has_receiver) {
1880    // Make sure the receiver is loaded into a register.
1881    assert(method->size_of_parameters() > 0, "oob");
1882    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1883    VMReg r = regs[0].first();
1884    assert(r->is_valid(), "bad receiver arg");
1885    if (r->is_stack()) {
1886      // Porting note:  This assumes that compiled calling conventions always
1887      // pass the receiver oop in a register.  If this is not true on some
1888      // platform, pick a temp and load the receiver from stack.
1889      fatal("receiver always in a register");
1890      receiver_reg = G3_scratch;  // known to be free at this point
1891      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1892      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1893      __ ld_ptr(SP, ld_off, receiver_reg);
1894    } else {
1895      // no data motion is needed
1896      receiver_reg = r->as_Register();
1897    }
1898  }
1899
1900  // Figure out which address we are really jumping to:
1901  MethodHandles::generate_method_handle_dispatch(masm, iid,
1902                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1903}
1904
1905// ---------------------------------------------------------------------------
1906// Generate a native wrapper for a given method.  The method takes arguments
1907// in the Java compiled code convention, marshals them to the native
1908// convention (handlizes oops, etc), transitions to native, makes the call,
1909// returns to java state (possibly blocking), unhandlizes any result and
1910// returns.
1911//
1912// Critical native functions are a shorthand for the use of
1913// GetPrimtiveArrayCritical and disallow the use of any other JNI
1914// functions.  The wrapper is expected to unpack the arguments before
1915// passing them to the callee and perform checks before and after the
1916// native call to ensure that they GC_locker
1917// lock_critical/unlock_critical semantics are followed.  Some other
1918// parts of JNI setup are skipped like the tear down of the JNI handle
1919// block and the check for pending exceptions it's impossible for them
1920// to be thrown.
1921//
1922// They are roughly structured like this:
1923//    if (GC_locker::needs_gc())
1924//      SharedRuntime::block_for_jni_critical();
1925//    tranistion to thread_in_native
1926//    unpack arrray arguments and call native entry point
1927//    check for safepoint in progress
1928//    check if any thread suspend flags are set
1929//      call into JVM and possible unlock the JNI critical
1930//      if a GC was suppressed while in the critical native.
1931//    transition back to thread_in_Java
1932//    return to caller
1933//
1934nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1935                                                methodHandle method,
1936                                                int compile_id,
1937                                                BasicType* in_sig_bt,
1938                                                VMRegPair* in_regs,
1939                                                BasicType ret_type) {
1940  if (method->is_method_handle_intrinsic()) {
1941    vmIntrinsics::ID iid = method->intrinsic_id();
1942    intptr_t start = (intptr_t)__ pc();
1943    int vep_offset = ((intptr_t)__ pc()) - start;
1944    gen_special_dispatch(masm,
1945                         method,
1946                         in_sig_bt,
1947                         in_regs);
1948    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1949    __ flush();
1950    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1951    return nmethod::new_native_nmethod(method,
1952                                       compile_id,
1953                                       masm->code(),
1954                                       vep_offset,
1955                                       frame_complete,
1956                                       stack_slots / VMRegImpl::slots_per_word,
1957                                       in_ByteSize(-1),
1958                                       in_ByteSize(-1),
1959                                       (OopMapSet*)NULL);
1960  }
1961  bool is_critical_native = true;
1962  address native_func = method->critical_native_function();
1963  if (native_func == NULL) {
1964    native_func = method->native_function();
1965    is_critical_native = false;
1966  }
1967  assert(native_func != NULL, "must have function");
1968
1969  // Native nmethod wrappers never take possesion of the oop arguments.
1970  // So the caller will gc the arguments. The only thing we need an
1971  // oopMap for is if the call is static
1972  //
1973  // An OopMap for lock (and class if static), and one for the VM call itself
1974  OopMapSet *oop_maps = new OopMapSet();
1975  intptr_t start = (intptr_t)__ pc();
1976
1977  // First thing make an ic check to see if we should even be here
1978  {
1979    Label L;
1980    const Register temp_reg = G3_scratch;
1981    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1982    __ verify_oop(O0);
1983    __ load_klass(O0, temp_reg);
1984    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
1985
1986    __ jump_to(ic_miss, temp_reg);
1987    __ delayed()->nop();
1988    __ align(CodeEntryAlignment);
1989    __ bind(L);
1990  }
1991
1992  int vep_offset = ((intptr_t)__ pc()) - start;
1993
1994#ifdef COMPILER1
1995  if (InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) {
1996    // Object.hashCode can pull the hashCode from the header word
1997    // instead of doing a full VM transition once it's been computed.
1998    // Since hashCode is usually polymorphic at call sites we can't do
1999    // this optimization at the call site without a lot of work.
2000    Label slowCase;
2001    Register receiver             = O0;
2002    Register result               = O0;
2003    Register header               = G3_scratch;
2004    Register hash                 = G3_scratch; // overwrite header value with hash value
2005    Register mask                 = G1;         // to get hash field from header
2006
2007    // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
2008    // We depend on hash_mask being at most 32 bits and avoid the use of
2009    // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
2010    // vm: see markOop.hpp.
2011    __ ld_ptr(receiver, oopDesc::mark_offset_in_bytes(), header);
2012    __ sethi(markOopDesc::hash_mask, mask);
2013    __ btst(markOopDesc::unlocked_value, header);
2014    __ br(Assembler::zero, false, Assembler::pn, slowCase);
2015    if (UseBiasedLocking) {
2016      // Check if biased and fall through to runtime if so
2017      __ delayed()->nop();
2018      __ btst(markOopDesc::biased_lock_bit_in_place, header);
2019      __ br(Assembler::notZero, false, Assembler::pn, slowCase);
2020    }
2021    __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
2022
2023    // Check for a valid (non-zero) hash code and get its value.
2024#ifdef _LP64
2025    __ srlx(header, markOopDesc::hash_shift, hash);
2026#else
2027    __ srl(header, markOopDesc::hash_shift, hash);
2028#endif
2029    __ andcc(hash, mask, hash);
2030    __ br(Assembler::equal, false, Assembler::pn, slowCase);
2031    __ delayed()->nop();
2032
2033    // leaf return.
2034    __ retl();
2035    __ delayed()->mov(hash, result);
2036    __ bind(slowCase);
2037  }
2038#endif // COMPILER1
2039
2040
2041  // We have received a description of where all the java arg are located
2042  // on entry to the wrapper. We need to convert these args to where
2043  // the jni function will expect them. To figure out where they go
2044  // we convert the java signature to a C signature by inserting
2045  // the hidden arguments as arg[0] and possibly arg[1] (static method)
2046
2047  const int total_in_args = method->size_of_parameters();
2048  int total_c_args = total_in_args;
2049  int total_save_slots = 6 * VMRegImpl::slots_per_word;
2050  if (!is_critical_native) {
2051    total_c_args += 1;
2052    if (method->is_static()) {
2053      total_c_args++;
2054    }
2055  } else {
2056    for (int i = 0; i < total_in_args; i++) {
2057      if (in_sig_bt[i] == T_ARRAY) {
2058        // These have to be saved and restored across the safepoint
2059        total_c_args++;
2060      }
2061    }
2062  }
2063
2064  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
2065  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
2066  BasicType* in_elem_bt = NULL;
2067
2068  int argc = 0;
2069  if (!is_critical_native) {
2070    out_sig_bt[argc++] = T_ADDRESS;
2071    if (method->is_static()) {
2072      out_sig_bt[argc++] = T_OBJECT;
2073    }
2074
2075    for (int i = 0; i < total_in_args ; i++ ) {
2076      out_sig_bt[argc++] = in_sig_bt[i];
2077    }
2078  } else {
2079    Thread* THREAD = Thread::current();
2080    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
2081    SignatureStream ss(method->signature());
2082    for (int i = 0; i < total_in_args ; i++ ) {
2083      if (in_sig_bt[i] == T_ARRAY) {
2084        // Arrays are passed as int, elem* pair
2085        out_sig_bt[argc++] = T_INT;
2086        out_sig_bt[argc++] = T_ADDRESS;
2087        Symbol* atype = ss.as_symbol(CHECK_NULL);
2088        const char* at = atype->as_C_string();
2089        if (strlen(at) == 2) {
2090          assert(at[0] == '[', "must be");
2091          switch (at[1]) {
2092            case 'B': in_elem_bt[i]  = T_BYTE; break;
2093            case 'C': in_elem_bt[i]  = T_CHAR; break;
2094            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
2095            case 'F': in_elem_bt[i]  = T_FLOAT; break;
2096            case 'I': in_elem_bt[i]  = T_INT; break;
2097            case 'J': in_elem_bt[i]  = T_LONG; break;
2098            case 'S': in_elem_bt[i]  = T_SHORT; break;
2099            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
2100            default: ShouldNotReachHere();
2101          }
2102        }
2103      } else {
2104        out_sig_bt[argc++] = in_sig_bt[i];
2105        in_elem_bt[i] = T_VOID;
2106      }
2107      if (in_sig_bt[i] != T_VOID) {
2108        assert(in_sig_bt[i] == ss.type(), "must match");
2109        ss.next();
2110      }
2111    }
2112  }
2113
2114  // Now figure out where the args must be stored and how much stack space
2115  // they require (neglecting out_preserve_stack_slots but space for storing
2116  // the 1st six register arguments). It's weird see int_stk_helper.
2117  //
2118  int out_arg_slots;
2119  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
2120
2121  if (is_critical_native) {
2122    // Critical natives may have to call out so they need a save area
2123    // for register arguments.
2124    int double_slots = 0;
2125    int single_slots = 0;
2126    for ( int i = 0; i < total_in_args; i++) {
2127      if (in_regs[i].first()->is_Register()) {
2128        const Register reg = in_regs[i].first()->as_Register();
2129        switch (in_sig_bt[i]) {
2130          case T_ARRAY:
2131          case T_BOOLEAN:
2132          case T_BYTE:
2133          case T_SHORT:
2134          case T_CHAR:
2135          case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
2136          case T_LONG: if (reg->is_global()) double_slots++; break;
2137          default:  ShouldNotReachHere();
2138        }
2139      } else if (in_regs[i].first()->is_FloatRegister()) {
2140        switch (in_sig_bt[i]) {
2141          case T_FLOAT:  single_slots++; break;
2142          case T_DOUBLE: double_slots++; break;
2143          default:  ShouldNotReachHere();
2144        }
2145      }
2146    }
2147    total_save_slots = double_slots * 2 + single_slots;
2148  }
2149
2150  // Compute framesize for the wrapper.  We need to handlize all oops in
2151  // registers. We must create space for them here that is disjoint from
2152  // the windowed save area because we have no control over when we might
2153  // flush the window again and overwrite values that gc has since modified.
2154  // (The live window race)
2155  //
2156  // We always just allocate 6 word for storing down these object. This allow
2157  // us to simply record the base and use the Ireg number to decide which
2158  // slot to use. (Note that the reg number is the inbound number not the
2159  // outbound number).
2160  // We must shuffle args to match the native convention, and include var-args space.
2161
2162  // Calculate the total number of stack slots we will need.
2163
2164  // First count the abi requirement plus all of the outgoing args
2165  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
2166
2167  // Now the space for the inbound oop handle area
2168
2169  int oop_handle_offset = round_to(stack_slots, 2);
2170  stack_slots += total_save_slots;
2171
2172  // Now any space we need for handlizing a klass if static method
2173
2174  int klass_slot_offset = 0;
2175  int klass_offset = -1;
2176  int lock_slot_offset = 0;
2177  bool is_static = false;
2178
2179  if (method->is_static()) {
2180    klass_slot_offset = stack_slots;
2181    stack_slots += VMRegImpl::slots_per_word;
2182    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2183    is_static = true;
2184  }
2185
2186  // Plus a lock if needed
2187
2188  if (method->is_synchronized()) {
2189    lock_slot_offset = stack_slots;
2190    stack_slots += VMRegImpl::slots_per_word;
2191  }
2192
2193  // Now a place to save return value or as a temporary for any gpr -> fpr moves
2194  stack_slots += 2;
2195
2196  // Ok The space we have allocated will look like:
2197  //
2198  //
2199  // FP-> |                     |
2200  //      |---------------------|
2201  //      | 2 slots for moves   |
2202  //      |---------------------|
2203  //      | lock box (if sync)  |
2204  //      |---------------------| <- lock_slot_offset
2205  //      | klass (if static)   |
2206  //      |---------------------| <- klass_slot_offset
2207  //      | oopHandle area      |
2208  //      |---------------------| <- oop_handle_offset
2209  //      | outbound memory     |
2210  //      | based arguments     |
2211  //      |                     |
2212  //      |---------------------|
2213  //      | vararg area         |
2214  //      |---------------------|
2215  //      |                     |
2216  // SP-> | out_preserved_slots |
2217  //
2218  //
2219
2220
2221  // Now compute actual number of stack words we need rounding to make
2222  // stack properly aligned.
2223  stack_slots = round_to(stack_slots, 2 * VMRegImpl::slots_per_word);
2224
2225  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2226
2227  // Generate stack overflow check before creating frame
2228  __ generate_stack_overflow_check(stack_size);
2229
2230  // Generate a new frame for the wrapper.
2231  __ save(SP, -stack_size, SP);
2232
2233  int frame_complete = ((intptr_t)__ pc()) - start;
2234
2235  __ verify_thread();
2236
2237  if (is_critical_native) {
2238    check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
2239                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2240  }
2241
2242  //
2243  // We immediately shuffle the arguments so that any vm call we have to
2244  // make from here on out (sync slow path, jvmti, etc.) we will have
2245  // captured the oops from our caller and have a valid oopMap for
2246  // them.
2247
2248  // -----------------
2249  // The Grand Shuffle
2250  //
2251  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2252  // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2253  // the class mirror instead of a receiver.  This pretty much guarantees that
2254  // register layout will not match.  We ignore these extra arguments during
2255  // the shuffle. The shuffle is described by the two calling convention
2256  // vectors we have in our possession. We simply walk the java vector to
2257  // get the source locations and the c vector to get the destinations.
2258  // Because we have a new window and the argument registers are completely
2259  // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2260  // here.
2261
2262  // This is a trick. We double the stack slots so we can claim
2263  // the oops in the caller's frame. Since we are sure to have
2264  // more args than the caller doubling is enough to make
2265  // sure we can capture all the incoming oop args from the
2266  // caller.
2267  //
2268  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2269  // Record sp-based slot for receiver on stack for non-static methods
2270  int receiver_offset = -1;
2271
2272  // We move the arguments backward because the floating point registers
2273  // destination will always be to a register with a greater or equal register
2274  // number or the stack.
2275
2276#ifdef ASSERT
2277  bool reg_destroyed[RegisterImpl::number_of_registers];
2278  bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2279  for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2280    reg_destroyed[r] = false;
2281  }
2282  for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2283    freg_destroyed[f] = false;
2284  }
2285
2286#endif /* ASSERT */
2287
2288  for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2289
2290#ifdef ASSERT
2291    if (in_regs[i].first()->is_Register()) {
2292      assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2293    } else if (in_regs[i].first()->is_FloatRegister()) {
2294      assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2295    }
2296    if (out_regs[c_arg].first()->is_Register()) {
2297      reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2298    } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2299      freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2300    }
2301#endif /* ASSERT */
2302
2303    switch (in_sig_bt[i]) {
2304      case T_ARRAY:
2305        if (is_critical_native) {
2306          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2307          c_arg--;
2308          break;
2309        }
2310      case T_OBJECT:
2311        assert(!is_critical_native, "no oop arguments");
2312        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2313                    ((i == 0) && (!is_static)),
2314                    &receiver_offset);
2315        break;
2316      case T_VOID:
2317        break;
2318
2319      case T_FLOAT:
2320        float_move(masm, in_regs[i], out_regs[c_arg]);
2321        break;
2322
2323      case T_DOUBLE:
2324        assert( i + 1 < total_in_args &&
2325                in_sig_bt[i + 1] == T_VOID &&
2326                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2327        double_move(masm, in_regs[i], out_regs[c_arg]);
2328        break;
2329
2330      case T_LONG :
2331        long_move(masm, in_regs[i], out_regs[c_arg]);
2332        break;
2333
2334      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2335
2336      default:
2337        move32_64(masm, in_regs[i], out_regs[c_arg]);
2338    }
2339  }
2340
2341  // Pre-load a static method's oop into O1.  Used both by locking code and
2342  // the normal JNI call code.
2343  if (method->is_static() && !is_critical_native) {
2344    __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2345
2346    // Now handlize the static class mirror in O1.  It's known not-null.
2347    __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2348    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2349    __ add(SP, klass_offset + STACK_BIAS, O1);
2350  }
2351
2352
2353  const Register L6_handle = L6;
2354
2355  if (method->is_synchronized()) {
2356    assert(!is_critical_native, "unhandled");
2357    __ mov(O1, L6_handle);
2358  }
2359
2360  // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2361  // except O6/O7. So if we must call out we must push a new frame. We immediately
2362  // push a new frame and flush the windows.
2363#ifdef _LP64
2364  intptr_t thepc = (intptr_t) __ pc();
2365  {
2366    address here = __ pc();
2367    // Call the next instruction
2368    __ call(here + 8, relocInfo::none);
2369    __ delayed()->nop();
2370  }
2371#else
2372  intptr_t thepc = __ load_pc_address(O7, 0);
2373#endif /* _LP64 */
2374
2375  // We use the same pc/oopMap repeatedly when we call out
2376  oop_maps->add_gc_map(thepc - start, map);
2377
2378  // O7 now has the pc loaded that we will use when we finally call to native.
2379
2380  // Save thread in L7; it crosses a bunch of VM calls below
2381  // Don't use save_thread because it smashes G2 and we merely
2382  // want to save a copy
2383  __ mov(G2_thread, L7_thread_cache);
2384
2385
2386  // If we create an inner frame once is plenty
2387  // when we create it we must also save G2_thread
2388  bool inner_frame_created = false;
2389
2390  // dtrace method entry support
2391  {
2392    SkipIfEqual skip_if(
2393      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2394    // create inner frame
2395    __ save_frame(0);
2396    __ mov(G2_thread, L7_thread_cache);
2397    __ set_metadata_constant(method(), O1);
2398    __ call_VM_leaf(L7_thread_cache,
2399         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2400         G2_thread, O1);
2401    __ restore();
2402  }
2403
2404  // RedefineClasses() tracing support for obsolete method entry
2405  if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2406    // create inner frame
2407    __ save_frame(0);
2408    __ mov(G2_thread, L7_thread_cache);
2409    __ set_metadata_constant(method(), O1);
2410    __ call_VM_leaf(L7_thread_cache,
2411         CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2412         G2_thread, O1);
2413    __ restore();
2414  }
2415
2416  // We are in the jni frame unless saved_frame is true in which case
2417  // we are in one frame deeper (the "inner" frame). If we are in the
2418  // "inner" frames the args are in the Iregs and if the jni frame then
2419  // they are in the Oregs.
2420  // If we ever need to go to the VM (for locking, jvmti) then
2421  // we will always be in the "inner" frame.
2422
2423  // Lock a synchronized method
2424  int lock_offset = -1;         // Set if locked
2425  if (method->is_synchronized()) {
2426    Register Roop = O1;
2427    const Register L3_box = L3;
2428
2429    create_inner_frame(masm, &inner_frame_created);
2430
2431    __ ld_ptr(I1, 0, O1);
2432    Label done;
2433
2434    lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2435    __ add(FP, lock_offset+STACK_BIAS, L3_box);
2436#ifdef ASSERT
2437    if (UseBiasedLocking) {
2438      // making the box point to itself will make it clear it went unused
2439      // but also be obviously invalid
2440      __ st_ptr(L3_box, L3_box, 0);
2441    }
2442#endif // ASSERT
2443    //
2444    // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2445    //
2446    __ compiler_lock_object(Roop, L1,    L3_box, L2);
2447    __ br(Assembler::equal, false, Assembler::pt, done);
2448    __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2449
2450
2451    // None of the above fast optimizations worked so we have to get into the
2452    // slow case of monitor enter.  Inline a special case of call_VM that
2453    // disallows any pending_exception.
2454    __ mov(Roop, O0);            // Need oop in O0
2455    __ mov(L3_box, O1);
2456
2457    // Record last_Java_sp, in case the VM code releases the JVM lock.
2458
2459    __ set_last_Java_frame(FP, I7);
2460
2461    // do the call
2462    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2463    __ delayed()->mov(L7_thread_cache, O2);
2464
2465    __ restore_thread(L7_thread_cache); // restore G2_thread
2466    __ reset_last_Java_frame();
2467
2468#ifdef ASSERT
2469    { Label L;
2470    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2471    __ br_null_short(O0, Assembler::pt, L);
2472    __ stop("no pending exception allowed on exit from IR::monitorenter");
2473    __ bind(L);
2474    }
2475#endif
2476    __ bind(done);
2477  }
2478
2479
2480  // Finally just about ready to make the JNI call
2481
2482  __ flushw();
2483  if (inner_frame_created) {
2484    __ restore();
2485  } else {
2486    // Store only what we need from this frame
2487    // QQQ I think that non-v9 (like we care) we don't need these saves
2488    // either as the flush traps and the current window goes too.
2489    __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2490    __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2491  }
2492
2493  // get JNIEnv* which is first argument to native
2494  if (!is_critical_native) {
2495    __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2496  }
2497
2498  // Use that pc we placed in O7 a while back as the current frame anchor
2499  __ set_last_Java_frame(SP, O7);
2500
2501  // We flushed the windows ages ago now mark them as flushed before transitioning.
2502  __ set(JavaFrameAnchor::flushed, G3_scratch);
2503  __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2504
2505  // Transition from _thread_in_Java to _thread_in_native.
2506  __ set(_thread_in_native, G3_scratch);
2507
2508#ifdef _LP64
2509  AddressLiteral dest(native_func);
2510  __ relocate(relocInfo::runtime_call_type);
2511  __ jumpl_to(dest, O7, O7);
2512#else
2513  __ call(native_func, relocInfo::runtime_call_type);
2514#endif
2515  __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2516
2517  __ restore_thread(L7_thread_cache); // restore G2_thread
2518
2519  // Unpack native results.  For int-types, we do any needed sign-extension
2520  // and move things into I0.  The return value there will survive any VM
2521  // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2522  // specially in the slow-path code.
2523  switch (ret_type) {
2524  case T_VOID:    break;        // Nothing to do!
2525  case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2526  case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2527  // In 64 bits build result is in O0, in O0, O1 in 32bit build
2528  case T_LONG:
2529#ifndef _LP64
2530                  __ mov(O1, I1);
2531#endif
2532                  // Fall thru
2533  case T_OBJECT:                // Really a handle
2534  case T_ARRAY:
2535  case T_INT:
2536                  __ mov(O0, I0);
2537                  break;
2538  case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2539  case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2540  case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2541  case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2542    break;                      // Cannot de-handlize until after reclaiming jvm_lock
2543  default:
2544    ShouldNotReachHere();
2545  }
2546
2547  Label after_transition;
2548  // must we block?
2549
2550  // Block, if necessary, before resuming in _thread_in_Java state.
2551  // In order for GC to work, don't clear the last_Java_sp until after blocking.
2552  { Label no_block;
2553    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2554
2555    // Switch thread to "native transition" state before reading the synchronization state.
2556    // This additional state is necessary because reading and testing the synchronization
2557    // state is not atomic w.r.t. GC, as this scenario demonstrates:
2558    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2559    //     VM thread changes sync state to synchronizing and suspends threads for GC.
2560    //     Thread A is resumed to finish this native method, but doesn't block here since it
2561    //     didn't see any synchronization is progress, and escapes.
2562    __ set(_thread_in_native_trans, G3_scratch);
2563    __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2564    if(os::is_MP()) {
2565      if (UseMembar) {
2566        // Force this write out before the read below
2567        __ membar(Assembler::StoreLoad);
2568      } else {
2569        // Write serialization page so VM thread can do a pseudo remote membar.
2570        // We use the current thread pointer to calculate a thread specific
2571        // offset to write to within the page. This minimizes bus traffic
2572        // due to cache line collision.
2573        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2574      }
2575    }
2576    __ load_contents(sync_state, G3_scratch);
2577    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2578
2579    Label L;
2580    Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2581    __ br(Assembler::notEqual, false, Assembler::pn, L);
2582    __ delayed()->ld(suspend_state, G3_scratch);
2583    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2584    __ bind(L);
2585
2586    // Block.  Save any potential method result value before the operation and
2587    // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2588    // lets us share the oopMap we used when we went native rather the create
2589    // a distinct one for this pc
2590    //
2591    save_native_result(masm, ret_type, stack_slots);
2592    if (!is_critical_native) {
2593      __ call_VM_leaf(L7_thread_cache,
2594                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2595                      G2_thread);
2596    } else {
2597      __ call_VM_leaf(L7_thread_cache,
2598                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2599                      G2_thread);
2600    }
2601
2602    // Restore any method result value
2603    restore_native_result(masm, ret_type, stack_slots);
2604
2605    if (is_critical_native) {
2606      // The call above performed the transition to thread_in_Java so
2607      // skip the transition logic below.
2608      __ ba(after_transition);
2609      __ delayed()->nop();
2610    }
2611
2612    __ bind(no_block);
2613  }
2614
2615  // thread state is thread_in_native_trans. Any safepoint blocking has already
2616  // happened so we can now change state to _thread_in_Java.
2617  __ set(_thread_in_Java, G3_scratch);
2618  __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2619  __ bind(after_transition);
2620
2621  Label no_reguard;
2622  __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2623  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2624
2625    save_native_result(masm, ret_type, stack_slots);
2626  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2627  __ delayed()->nop();
2628
2629  __ restore_thread(L7_thread_cache); // restore G2_thread
2630    restore_native_result(masm, ret_type, stack_slots);
2631
2632  __ bind(no_reguard);
2633
2634  // Handle possible exception (will unlock if necessary)
2635
2636  // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2637
2638  // Unlock
2639  if (method->is_synchronized()) {
2640    Label done;
2641    Register I2_ex_oop = I2;
2642    const Register L3_box = L3;
2643    // Get locked oop from the handle we passed to jni
2644    __ ld_ptr(L6_handle, 0, L4);
2645    __ add(SP, lock_offset+STACK_BIAS, L3_box);
2646    // Must save pending exception around the slow-path VM call.  Since it's a
2647    // leaf call, the pending exception (if any) can be kept in a register.
2648    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2649    // Now unlock
2650    //                       (Roop, Rmark, Rbox,   Rscratch)
2651    __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2652    __ br(Assembler::equal, false, Assembler::pt, done);
2653    __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2654
2655    // save and restore any potential method result value around the unlocking
2656    // operation.  Will save in I0 (or stack for FP returns).
2657    save_native_result(masm, ret_type, stack_slots);
2658
2659    // Must clear pending-exception before re-entering the VM.  Since this is
2660    // a leaf call, pending-exception-oop can be safely kept in a register.
2661    __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2662
2663    // slow case of monitor enter.  Inline a special case of call_VM that
2664    // disallows any pending_exception.
2665    __ mov(L3_box, O1);
2666
2667    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2668    __ delayed()->mov(L4, O0);              // Need oop in O0
2669
2670    __ restore_thread(L7_thread_cache); // restore G2_thread
2671
2672#ifdef ASSERT
2673    { Label L;
2674    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2675    __ br_null_short(O0, Assembler::pt, L);
2676    __ stop("no pending exception allowed on exit from IR::monitorexit");
2677    __ bind(L);
2678    }
2679#endif
2680    restore_native_result(masm, ret_type, stack_slots);
2681    // check_forward_pending_exception jump to forward_exception if any pending
2682    // exception is set.  The forward_exception routine expects to see the
2683    // exception in pending_exception and not in a register.  Kind of clumsy,
2684    // since all folks who branch to forward_exception must have tested
2685    // pending_exception first and hence have it in a register already.
2686    __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2687    __ bind(done);
2688  }
2689
2690  // Tell dtrace about this method exit
2691  {
2692    SkipIfEqual skip_if(
2693      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2694    save_native_result(masm, ret_type, stack_slots);
2695    __ set_metadata_constant(method(), O1);
2696    __ call_VM_leaf(L7_thread_cache,
2697       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2698       G2_thread, O1);
2699    restore_native_result(masm, ret_type, stack_slots);
2700  }
2701
2702  // Clear "last Java frame" SP and PC.
2703  __ verify_thread(); // G2_thread must be correct
2704  __ reset_last_Java_frame();
2705
2706  // Unpack oop result
2707  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2708      Label L;
2709      __ addcc(G0, I0, G0);
2710      __ brx(Assembler::notZero, true, Assembler::pt, L);
2711      __ delayed()->ld_ptr(I0, 0, I0);
2712      __ mov(G0, I0);
2713      __ bind(L);
2714      __ verify_oop(I0);
2715  }
2716
2717  if (!is_critical_native) {
2718    // reset handle block
2719    __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2720    __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2721
2722    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2723    check_forward_pending_exception(masm, G3_scratch);
2724  }
2725
2726
2727  // Return
2728
2729#ifndef _LP64
2730  if (ret_type == T_LONG) {
2731
2732    // Must leave proper result in O0,O1 and G1 (c2/tiered only)
2733    __ sllx(I0, 32, G1);          // Shift bits into high G1
2734    __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
2735    __ or3 (I1, G1, G1);          // OR 64 bits into G1
2736  }
2737#endif
2738
2739  __ ret();
2740  __ delayed()->restore();
2741
2742  __ flush();
2743
2744  nmethod *nm = nmethod::new_native_nmethod(method,
2745                                            compile_id,
2746                                            masm->code(),
2747                                            vep_offset,
2748                                            frame_complete,
2749                                            stack_slots / VMRegImpl::slots_per_word,
2750                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2751                                            in_ByteSize(lock_offset),
2752                                            oop_maps);
2753
2754  if (is_critical_native) {
2755    nm->set_lazy_critical_native(true);
2756  }
2757  return nm;
2758
2759}
2760
2761// this function returns the adjust size (in number of words) to a c2i adapter
2762// activation for use during deoptimization
2763int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2764  assert(callee_locals >= callee_parameters,
2765          "test and remove; got more parms than locals");
2766  if (callee_locals < callee_parameters)
2767    return 0;                   // No adjustment for negative locals
2768  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2769  return round_to(diff, WordsPerLong);
2770}
2771
2772// "Top of Stack" slots that may be unused by the calling convention but must
2773// otherwise be preserved.
2774// On Intel these are not necessary and the value can be zero.
2775// On Sparc this describes the words reserved for storing a register window
2776// when an interrupt occurs.
2777uint SharedRuntime::out_preserve_stack_slots() {
2778  return frame::register_save_words * VMRegImpl::slots_per_word;
2779}
2780
2781static void gen_new_frame(MacroAssembler* masm, bool deopt) {
2782//
2783// Common out the new frame generation for deopt and uncommon trap
2784//
2785  Register        G3pcs              = G3_scratch; // Array of new pcs (input)
2786  Register        Oreturn0           = O0;
2787  Register        Oreturn1           = O1;
2788  Register        O2UnrollBlock      = O2;
2789  Register        O3array            = O3;         // Array of frame sizes (input)
2790  Register        O4array_size       = O4;         // number of frames (input)
2791  Register        O7frame_size       = O7;         // number of frames (input)
2792
2793  __ ld_ptr(O3array, 0, O7frame_size);
2794  __ sub(G0, O7frame_size, O7frame_size);
2795  __ save(SP, O7frame_size, SP);
2796  __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
2797
2798  #ifdef ASSERT
2799  // make sure that the frames are aligned properly
2800#ifndef _LP64
2801  __ btst(wordSize*2-1, SP);
2802  __ breakpoint_trap(Assembler::notZero, Assembler::ptr_cc);
2803#endif
2804  #endif
2805
2806  // Deopt needs to pass some extra live values from frame to frame
2807
2808  if (deopt) {
2809    __ mov(Oreturn0->after_save(), Oreturn0);
2810    __ mov(Oreturn1->after_save(), Oreturn1);
2811  }
2812
2813  __ mov(O4array_size->after_save(), O4array_size);
2814  __ sub(O4array_size, 1, O4array_size);
2815  __ mov(O3array->after_save(), O3array);
2816  __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
2817  __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
2818
2819  #ifdef ASSERT
2820  // trash registers to show a clear pattern in backtraces
2821  __ set(0xDEAD0000, I0);
2822  __ add(I0,  2, I1);
2823  __ add(I0,  4, I2);
2824  __ add(I0,  6, I3);
2825  __ add(I0,  8, I4);
2826  // Don't touch I5 could have valuable savedSP
2827  __ set(0xDEADBEEF, L0);
2828  __ mov(L0, L1);
2829  __ mov(L0, L2);
2830  __ mov(L0, L3);
2831  __ mov(L0, L4);
2832  __ mov(L0, L5);
2833
2834  // trash the return value as there is nothing to return yet
2835  __ set(0xDEAD0001, O7);
2836  #endif
2837
2838  __ mov(SP, O5_savedSP);
2839}
2840
2841
2842static void make_new_frames(MacroAssembler* masm, bool deopt) {
2843  //
2844  // loop through the UnrollBlock info and create new frames
2845  //
2846  Register        G3pcs              = G3_scratch;
2847  Register        Oreturn0           = O0;
2848  Register        Oreturn1           = O1;
2849  Register        O2UnrollBlock      = O2;
2850  Register        O3array            = O3;
2851  Register        O4array_size       = O4;
2852  Label           loop;
2853
2854#ifdef ASSERT
2855  // Compilers generate code that bang the stack by as much as the
2856  // interpreter would need. So this stack banging should never
2857  // trigger a fault. Verify that it does not on non product builds.
2858  if (UseStackBanging) {
2859    // Get total frame size for interpreted frames
2860    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
2861    __ bang_stack_size(O4, O3, G3_scratch);
2862  }
2863#endif
2864
2865  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
2866  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
2867  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
2868
2869  // Adjust old interpreter frame to make space for new frame's extra java locals
2870  //
2871  // We capture the original sp for the transition frame only because it is needed in
2872  // order to properly calculate interpreter_sp_adjustment. Even though in real life
2873  // every interpreter frame captures a savedSP it is only needed at the transition
2874  // (fortunately). If we had to have it correct everywhere then we would need to
2875  // be told the sp_adjustment for each frame we create. If the frame size array
2876  // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
2877  // for each frame we create and keep up the illusion every where.
2878  //
2879
2880  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
2881  __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
2882  __ sub(SP, O7, SP);
2883
2884#ifdef ASSERT
2885  // make sure that there is at least one entry in the array
2886  __ tst(O4array_size);
2887  __ breakpoint_trap(Assembler::zero, Assembler::icc);
2888#endif
2889
2890  // Now push the new interpreter frames
2891  __ bind(loop);
2892
2893  // allocate a new frame, filling the registers
2894
2895  gen_new_frame(masm, deopt);        // allocate an interpreter frame
2896
2897  __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
2898  __ delayed()->add(O3array, wordSize, O3array);
2899  __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
2900
2901}
2902
2903//------------------------------generate_deopt_blob----------------------------
2904// Ought to generate an ideal graph & compile, but here's some SPARC ASM
2905// instead.
2906void SharedRuntime::generate_deopt_blob() {
2907  // allocate space for the code
2908  ResourceMark rm;
2909  // setup code generation tools
2910  int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
2911#ifdef ASSERT
2912  if (UseStackBanging) {
2913    pad += StackShadowPages*16 + 32;
2914  }
2915#endif
2916#ifdef _LP64
2917  CodeBuffer buffer("deopt_blob", 2100+pad, 512);
2918#else
2919  // Measured 8/7/03 at 1212 in 32bit debug build (no VerifyThread)
2920  // Measured 8/7/03 at 1396 in 32bit debug build (VerifyThread)
2921  CodeBuffer buffer("deopt_blob", 1600+pad, 512);
2922#endif /* _LP64 */
2923  MacroAssembler* masm               = new MacroAssembler(&buffer);
2924  FloatRegister   Freturn0           = F0;
2925  Register        Greturn1           = G1;
2926  Register        Oreturn0           = O0;
2927  Register        Oreturn1           = O1;
2928  Register        O2UnrollBlock      = O2;
2929  Register        L0deopt_mode       = L0;
2930  Register        G4deopt_mode       = G4_scratch;
2931  int             frame_size_words;
2932  Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
2933#if !defined(_LP64) && defined(COMPILER2)
2934  Address         saved_Greturn1_addr(FP, -sizeof(double) -sizeof(jlong) + STACK_BIAS);
2935#endif
2936  Label           cont;
2937
2938  OopMapSet *oop_maps = new OopMapSet();
2939
2940  //
2941  // This is the entry point for code which is returning to a de-optimized
2942  // frame.
2943  // The steps taken by this frame are as follows:
2944  //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
2945  //     and all potentially live registers (at a pollpoint many registers can be live).
2946  //
2947  //   - call the C routine: Deoptimization::fetch_unroll_info (this function
2948  //     returns information about the number and size of interpreter frames
2949  //     which are equivalent to the frame which is being deoptimized)
2950  //   - deallocate the unpack frame, restoring only results values. Other
2951  //     volatile registers will now be captured in the vframeArray as needed.
2952  //   - deallocate the deoptimization frame
2953  //   - in a loop using the information returned in the previous step
2954  //     push new interpreter frames (take care to propagate the return
2955  //     values through each new frame pushed)
2956  //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
2957  //   - call the C routine: Deoptimization::unpack_frames (this function
2958  //     lays out values on the interpreter frame which was just created)
2959  //   - deallocate the dummy unpack_frame
2960  //   - ensure that all the return values are correctly set and then do
2961  //     a return to the interpreter entry point
2962  //
2963  // Refer to the following methods for more information:
2964  //   - Deoptimization::fetch_unroll_info
2965  //   - Deoptimization::unpack_frames
2966
2967  OopMap* map = NULL;
2968
2969  int start = __ offset();
2970
2971  // restore G2, the trampoline destroyed it
2972  __ get_thread();
2973
2974  // On entry we have been called by the deoptimized nmethod with a call that
2975  // replaced the original call (or safepoint polling location) so the deoptimizing
2976  // pc is now in O7. Return values are still in the expected places
2977
2978  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2979  __ ba(cont);
2980  __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
2981
2982  int exception_offset = __ offset() - start;
2983
2984  // restore G2, the trampoline destroyed it
2985  __ get_thread();
2986
2987  // On entry we have been jumped to by the exception handler (or exception_blob
2988  // for server).  O0 contains the exception oop and O7 contains the original
2989  // exception pc.  So if we push a frame here it will look to the
2990  // stack walking code (fetch_unroll_info) just like a normal call so
2991  // state will be extracted normally.
2992
2993  // save exception oop in JavaThread and fall through into the
2994  // exception_in_tls case since they are handled in same way except
2995  // for where the pending exception is kept.
2996  __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
2997
2998  //
2999  // Vanilla deoptimization with an exception pending in exception_oop
3000  //
3001  int exception_in_tls_offset = __ offset() - start;
3002
3003  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3004  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3005
3006  // Restore G2_thread
3007  __ get_thread();
3008
3009#ifdef ASSERT
3010  {
3011    // verify that there is really an exception oop in exception_oop
3012    Label has_exception;
3013    __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
3014    __ br_notnull_short(Oexception, Assembler::pt, has_exception);
3015    __ stop("no exception in thread");
3016    __ bind(has_exception);
3017
3018    // verify that there is no pending exception
3019    Label no_pending_exception;
3020    Address exception_addr(G2_thread, Thread::pending_exception_offset());
3021    __ ld_ptr(exception_addr, Oexception);
3022    __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
3023    __ stop("must not have pending exception here");
3024    __ bind(no_pending_exception);
3025  }
3026#endif
3027
3028  __ ba(cont);
3029  __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
3030
3031  //
3032  // Reexecute entry, similar to c2 uncommon trap
3033  //
3034  int reexecute_offset = __ offset() - start;
3035
3036  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
3037  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3038
3039  __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
3040
3041  __ bind(cont);
3042
3043  __ set_last_Java_frame(SP, noreg);
3044
3045  // do the call by hand so we can get the oopmap
3046
3047  __ mov(G2_thread, L7_thread_cache);
3048  __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
3049  __ delayed()->mov(G2_thread, O0);
3050
3051  // Set an oopmap for the call site this describes all our saved volatile registers
3052
3053  oop_maps->add_gc_map( __ offset()-start, map);
3054
3055  __ mov(L7_thread_cache, G2_thread);
3056
3057  __ reset_last_Java_frame();
3058
3059  // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
3060  // so this move will survive
3061
3062  __ mov(L0deopt_mode, G4deopt_mode);
3063
3064  __ mov(O0, O2UnrollBlock->after_save());
3065
3066  RegisterSaver::restore_result_registers(masm);
3067
3068  Label noException;
3069  __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
3070
3071  // Move the pending exception from exception_oop to Oexception so
3072  // the pending exception will be picked up the interpreter.
3073  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
3074  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
3075  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
3076  __ bind(noException);
3077
3078  // deallocate the deoptimization frame taking care to preserve the return values
3079  __ mov(Oreturn0,     Oreturn0->after_save());
3080  __ mov(Oreturn1,     Oreturn1->after_save());
3081  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3082  __ restore();
3083
3084  // Allocate new interpreter frame(s) and possible c2i adapter frame
3085
3086  make_new_frames(masm, true);
3087
3088  // push a dummy "unpack_frame" taking care of float return values and
3089  // call Deoptimization::unpack_frames to have the unpacker layout
3090  // information in the interpreter frames just created and then return
3091  // to the interpreter entry point
3092  __ save(SP, -frame_size_words*wordSize, SP);
3093  __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
3094#if !defined(_LP64)
3095#if defined(COMPILER2)
3096  // 32-bit 1-register longs return longs in G1
3097  __ stx(Greturn1, saved_Greturn1_addr);
3098#endif
3099  __ set_last_Java_frame(SP, noreg);
3100  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
3101#else
3102  // LP64 uses g4 in set_last_Java_frame
3103  __ mov(G4deopt_mode, O1);
3104  __ set_last_Java_frame(SP, G0);
3105  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
3106#endif
3107  __ reset_last_Java_frame();
3108  __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
3109
3110#if !defined(_LP64) && defined(COMPILER2)
3111  // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
3112  // I0/I1 if the return value is long.
3113  Label not_long;
3114  __ cmp_and_br_short(O0,T_LONG, Assembler::notEqual, Assembler::pt, not_long);
3115  __ ldd(saved_Greturn1_addr,I0);
3116  __ bind(not_long);
3117#endif
3118  __ ret();
3119  __ delayed()->restore();
3120
3121  masm->flush();
3122  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
3123  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
3124}
3125
3126#ifdef COMPILER2
3127
3128//------------------------------generate_uncommon_trap_blob--------------------
3129// Ought to generate an ideal graph & compile, but here's some SPARC ASM
3130// instead.
3131void SharedRuntime::generate_uncommon_trap_blob() {
3132  // allocate space for the code
3133  ResourceMark rm;
3134  // setup code generation tools
3135  int pad = VerifyThread ? 512 : 0;
3136#ifdef ASSERT
3137  if (UseStackBanging) {
3138    pad += StackShadowPages*16 + 32;
3139  }
3140#endif
3141#ifdef _LP64
3142  CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3143#else
3144  // Measured 8/7/03 at 660 in 32bit debug build (no VerifyThread)
3145  // Measured 8/7/03 at 1028 in 32bit debug build (VerifyThread)
3146  CodeBuffer buffer("uncommon_trap_blob", 2000+pad, 512);
3147#endif
3148  MacroAssembler* masm               = new MacroAssembler(&buffer);
3149  Register        O2UnrollBlock      = O2;
3150  Register        O2klass_index      = O2;
3151
3152  //
3153  // This is the entry point for all traps the compiler takes when it thinks
3154  // it cannot handle further execution of compilation code. The frame is
3155  // deoptimized in these cases and converted into interpreter frames for
3156  // execution
3157  // The steps taken by this frame are as follows:
3158  //   - push a fake "unpack_frame"
3159  //   - call the C routine Deoptimization::uncommon_trap (this function
3160  //     packs the current compiled frame into vframe arrays and returns
3161  //     information about the number and size of interpreter frames which
3162  //     are equivalent to the frame which is being deoptimized)
3163  //   - deallocate the "unpack_frame"
3164  //   - deallocate the deoptimization frame
3165  //   - in a loop using the information returned in the previous step
3166  //     push interpreter frames;
3167  //   - create a dummy "unpack_frame"
3168  //   - call the C routine: Deoptimization::unpack_frames (this function
3169  //     lays out values on the interpreter frame which was just created)
3170  //   - deallocate the dummy unpack_frame
3171  //   - return to the interpreter entry point
3172  //
3173  //  Refer to the following methods for more information:
3174  //   - Deoptimization::uncommon_trap
3175  //   - Deoptimization::unpack_frame
3176
3177  // the unloaded class index is in O0 (first parameter to this blob)
3178
3179  // push a dummy "unpack_frame"
3180  // and call Deoptimization::uncommon_trap to pack the compiled frame into
3181  // vframe array and return the UnrollBlock information
3182  __ save_frame(0);
3183  __ set_last_Java_frame(SP, noreg);
3184  __ mov(I0, O2klass_index);
3185  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index);
3186  __ reset_last_Java_frame();
3187  __ mov(O0, O2UnrollBlock->after_save());
3188  __ restore();
3189
3190  // deallocate the deoptimized frame taking care to preserve the return values
3191  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3192  __ restore();
3193
3194  // Allocate new interpreter frame(s) and possible c2i adapter frame
3195
3196  make_new_frames(masm, false);
3197
3198  // push a dummy "unpack_frame" taking care of float return values and
3199  // call Deoptimization::unpack_frames to have the unpacker layout
3200  // information in the interpreter frames just created and then return
3201  // to the interpreter entry point
3202  __ save_frame(0);
3203  __ set_last_Java_frame(SP, noreg);
3204  __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3205  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3206  __ reset_last_Java_frame();
3207  __ ret();
3208  __ delayed()->restore();
3209
3210  masm->flush();
3211  _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3212}
3213
3214#endif // COMPILER2
3215
3216//------------------------------generate_handler_blob-------------------
3217//
3218// Generate a special Compile2Runtime blob that saves all registers, and sets
3219// up an OopMap.
3220//
3221// This blob is jumped to (via a breakpoint and the signal handler) from a
3222// safepoint in compiled code.  On entry to this blob, O7 contains the
3223// address in the original nmethod at which we should resume normal execution.
3224// Thus, this blob looks like a subroutine which must preserve lots of
3225// registers and return normally.  Note that O7 is never register-allocated,
3226// so it is guaranteed to be free here.
3227//
3228
3229// The hardest part of what this blob must do is to save the 64-bit %o
3230// registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3231// an interrupt will chop off their heads.  Making space in the caller's frame
3232// first will let us save the 64-bit %o's before save'ing, but we cannot hand
3233// the adjusted FP off to the GC stack-crawler: this will modify the caller's
3234// SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3235// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3236// Tricky, tricky, tricky...
3237
3238SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3239  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3240
3241  // allocate space for the code
3242  ResourceMark rm;
3243  // setup code generation tools
3244  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3245  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3246  // even larger with TraceJumps
3247  int pad = TraceJumps ? 512 : 0;
3248  CodeBuffer buffer("handler_blob", 1600 + pad, 512);
3249  MacroAssembler* masm                = new MacroAssembler(&buffer);
3250  int             frame_size_words;
3251  OopMapSet *oop_maps = new OopMapSet();
3252  OopMap* map = NULL;
3253
3254  int start = __ offset();
3255
3256  bool cause_return = (poll_type == POLL_AT_RETURN);
3257  // If this causes a return before the processing, then do a "restore"
3258  if (cause_return) {
3259    __ restore();
3260  } else {
3261    // Make it look like we were called via the poll
3262    // so that frame constructor always sees a valid return address
3263    __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3264    __ sub(O7, frame::pc_return_offset, O7);
3265  }
3266
3267  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3268
3269  // setup last_Java_sp (blows G4)
3270  __ set_last_Java_frame(SP, noreg);
3271
3272  // call into the runtime to handle illegal instructions exception
3273  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3274  __ mov(G2_thread, O0);
3275  __ save_thread(L7_thread_cache);
3276  __ call(call_ptr);
3277  __ delayed()->nop();
3278
3279  // Set an oopmap for the call site.
3280  // We need this not only for callee-saved registers, but also for volatile
3281  // registers that the compiler might be keeping live across a safepoint.
3282
3283  oop_maps->add_gc_map( __ offset() - start, map);
3284
3285  __ restore_thread(L7_thread_cache);
3286  // clear last_Java_sp
3287  __ reset_last_Java_frame();
3288
3289  // Check for exceptions
3290  Label pending;
3291
3292  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3293  __ br_notnull_short(O1, Assembler::pn, pending);
3294
3295  RegisterSaver::restore_live_registers(masm);
3296
3297  // We are back the the original state on entry and ready to go.
3298
3299  __ retl();
3300  __ delayed()->nop();
3301
3302  // Pending exception after the safepoint
3303
3304  __ bind(pending);
3305
3306  RegisterSaver::restore_live_registers(masm);
3307
3308  // We are back the the original state on entry.
3309
3310  // Tail-call forward_exception_entry, with the issuing PC in O7,
3311  // so it looks like the original nmethod called forward_exception_entry.
3312  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3313  __ JMP(O0, 0);
3314  __ delayed()->nop();
3315
3316  // -------------
3317  // make sure all code is generated
3318  masm->flush();
3319
3320  // return exception blob
3321  return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3322}
3323
3324//
3325// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3326//
3327// Generate a stub that calls into vm to find out the proper destination
3328// of a java call. All the argument registers are live at this point
3329// but since this is generic code we don't know what they are and the caller
3330// must do any gc of the args.
3331//
3332RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3333  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3334
3335  // allocate space for the code
3336  ResourceMark rm;
3337  // setup code generation tools
3338  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3339  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3340  // even larger with TraceJumps
3341  int pad = TraceJumps ? 512 : 0;
3342  CodeBuffer buffer(name, 1600 + pad, 512);
3343  MacroAssembler* masm                = new MacroAssembler(&buffer);
3344  int             frame_size_words;
3345  OopMapSet *oop_maps = new OopMapSet();
3346  OopMap* map = NULL;
3347
3348  int start = __ offset();
3349
3350  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3351
3352  int frame_complete = __ offset();
3353
3354  // setup last_Java_sp (blows G4)
3355  __ set_last_Java_frame(SP, noreg);
3356
3357  // call into the runtime to handle illegal instructions exception
3358  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3359  __ mov(G2_thread, O0);
3360  __ save_thread(L7_thread_cache);
3361  __ call(destination, relocInfo::runtime_call_type);
3362  __ delayed()->nop();
3363
3364  // O0 contains the address we are going to jump to assuming no exception got installed
3365
3366  // Set an oopmap for the call site.
3367  // We need this not only for callee-saved registers, but also for volatile
3368  // registers that the compiler might be keeping live across a safepoint.
3369
3370  oop_maps->add_gc_map( __ offset() - start, map);
3371
3372  __ restore_thread(L7_thread_cache);
3373  // clear last_Java_sp
3374  __ reset_last_Java_frame();
3375
3376  // Check for exceptions
3377  Label pending;
3378
3379  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3380  __ br_notnull_short(O1, Assembler::pn, pending);
3381
3382  // get the returned Method*
3383
3384  __ get_vm_result_2(G5_method);
3385  __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3386
3387  // O0 is where we want to jump, overwrite G3 which is saved and scratch
3388
3389  __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3390
3391  RegisterSaver::restore_live_registers(masm);
3392
3393  // We are back the the original state on entry and ready to go.
3394
3395  __ JMP(G3, 0);
3396  __ delayed()->nop();
3397
3398  // Pending exception after the safepoint
3399
3400  __ bind(pending);
3401
3402  RegisterSaver::restore_live_registers(masm);
3403
3404  // We are back the the original state on entry.
3405
3406  // Tail-call forward_exception_entry, with the issuing PC in O7,
3407  // so it looks like the original nmethod called forward_exception_entry.
3408  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3409  __ JMP(O0, 0);
3410  __ delayed()->nop();
3411
3412  // -------------
3413  // make sure all code is generated
3414  masm->flush();
3415
3416  // return the  blob
3417  // frame_size_words or bytes??
3418  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3419}
3420