sharedRuntime_sparc.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.inline.hpp"
27#include "code/debugInfoRec.hpp"
28#include "code/icBuffer.hpp"
29#include "code/vtableStubs.hpp"
30#include "interpreter/interpreter.hpp"
31#include "logging/log.hpp"
32#include "memory/resourceArea.hpp"
33#include "oops/compiledICHolder.hpp"
34#include "runtime/sharedRuntime.hpp"
35#include "runtime/vframeArray.hpp"
36#include "utilities/align.hpp"
37#include "vmreg_sparc.inline.hpp"
38#ifdef COMPILER1
39#include "c1/c1_Runtime1.hpp"
40#endif
41#ifdef COMPILER2
42#include "opto/runtime.hpp"
43#endif
44#ifdef SHARK
45#include "compiler/compileBroker.hpp"
46#include "shark/sharkCompiler.hpp"
47#endif
48#if INCLUDE_JVMCI
49#include "jvmci/jvmciJavaClasses.hpp"
50#endif
51
52#define __ masm->
53
54
55class RegisterSaver {
56
57  // Used for saving volatile registers. This is Gregs, Fregs, I/L/O.
58  // The Oregs are problematic. In the 32bit build the compiler can
59  // have O registers live with 64 bit quantities. A window save will
60  // cut the heads off of the registers. We have to do a very extensive
61  // stack dance to save and restore these properly.
62
63  // Note that the Oregs problem only exists if we block at either a polling
64  // page exception a compiled code safepoint that was not originally a call
65  // or deoptimize following one of these kinds of safepoints.
66
67  // Lots of registers to save.  For all builds, a window save will preserve
68  // the %i and %l registers.  For the 32-bit longs-in-two entries and 64-bit
69  // builds a window-save will preserve the %o registers.  In the LION build
70  // we need to save the 64-bit %o registers which requires we save them
71  // before the window-save (as then they become %i registers and get their
72  // heads chopped off on interrupt).  We have to save some %g registers here
73  // as well.
74  enum {
75    // This frame's save area.  Includes extra space for the native call:
76    // vararg's layout space and the like.  Briefly holds the caller's
77    // register save area.
78    call_args_area = frame::register_save_words_sp_offset +
79                     frame::memory_parameter_word_sp_offset*wordSize,
80    // Make sure save locations are always 8 byte aligned.
81    // can't use align_up because it doesn't produce compile time constant
82    start_of_extra_save_area = ((call_args_area + 7) & ~7),
83    g1_offset = start_of_extra_save_area, // g-regs needing saving
84    g3_offset = g1_offset+8,
85    g4_offset = g3_offset+8,
86    g5_offset = g4_offset+8,
87    o0_offset = g5_offset+8,
88    o1_offset = o0_offset+8,
89    o2_offset = o1_offset+8,
90    o3_offset = o2_offset+8,
91    o4_offset = o3_offset+8,
92    o5_offset = o4_offset+8,
93    start_of_flags_save_area = o5_offset+8,
94    ccr_offset = start_of_flags_save_area,
95    fsr_offset = ccr_offset + 8,
96    d00_offset = fsr_offset+8,  // Start of float save area
97    register_save_size = d00_offset+8*32
98  };
99
100
101  public:
102
103  static int Oexception_offset() { return o0_offset; };
104  static int G3_offset() { return g3_offset; };
105  static int G5_offset() { return g5_offset; };
106  static OopMap* save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words);
107  static void restore_live_registers(MacroAssembler* masm);
108
109  // During deoptimization only the result register need to be restored
110  // all the other values have already been extracted.
111
112  static void restore_result_registers(MacroAssembler* masm);
113};
114
115OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_frame_words, int* total_frame_words) {
116  // Record volatile registers as callee-save values in an OopMap so their save locations will be
117  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
118  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
119  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
120  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
121  int i;
122  // Always make the frame size 16 byte aligned.
123  int frame_size = align_up(additional_frame_words + register_save_size, 16);
124  // OopMap frame size is in c2 stack slots (sizeof(jint)) not bytes or words
125  int frame_size_in_slots = frame_size / sizeof(jint);
126  // CodeBlob frame size is in words.
127  *total_frame_words = frame_size / wordSize;
128  // OopMap* map = new OopMap(*total_frame_words, 0);
129  OopMap* map = new OopMap(frame_size_in_slots, 0);
130
131  __ save(SP, -frame_size, SP);
132
133
134  int debug_offset = 0;
135  // Save the G's
136  __ stx(G1, SP, g1_offset+STACK_BIAS);
137  map->set_callee_saved(VMRegImpl::stack2reg((g1_offset + debug_offset)>>2), G1->as_VMReg());
138
139  __ stx(G3, SP, g3_offset+STACK_BIAS);
140  map->set_callee_saved(VMRegImpl::stack2reg((g3_offset + debug_offset)>>2), G3->as_VMReg());
141
142  __ stx(G4, SP, g4_offset+STACK_BIAS);
143  map->set_callee_saved(VMRegImpl::stack2reg((g4_offset + debug_offset)>>2), G4->as_VMReg());
144
145  __ stx(G5, SP, g5_offset+STACK_BIAS);
146  map->set_callee_saved(VMRegImpl::stack2reg((g5_offset + debug_offset)>>2), G5->as_VMReg());
147
148  // This is really a waste but we'll keep things as they were for now
149  if (true) {
150  }
151
152
153  // Save the flags
154  __ rdccr( G5 );
155  __ stx(G5, SP, ccr_offset+STACK_BIAS);
156  __ stxfsr(SP, fsr_offset+STACK_BIAS);
157
158  // Save all the FP registers: 32 doubles (32 floats correspond to the 2 halves of the first 16 doubles)
159  int offset = d00_offset;
160  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
161    FloatRegister f = as_FloatRegister(i);
162    __ stf(FloatRegisterImpl::D,  f, SP, offset+STACK_BIAS);
163    // Record as callee saved both halves of double registers (2 float registers).
164    map->set_callee_saved(VMRegImpl::stack2reg(offset>>2), f->as_VMReg());
165    map->set_callee_saved(VMRegImpl::stack2reg((offset + sizeof(float))>>2), f->as_VMReg()->next());
166    offset += sizeof(double);
167  }
168
169  // And we're done.
170
171  return map;
172}
173
174
175// Pop the current frame and restore all the registers that we
176// saved.
177void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
178
179  // Restore all the FP registers
180  for( int i=0; i<FloatRegisterImpl::number_of_registers; i+=2 ) {
181    __ ldf(FloatRegisterImpl::D, SP, d00_offset+i*sizeof(float)+STACK_BIAS, as_FloatRegister(i));
182  }
183
184  __ ldx(SP, ccr_offset+STACK_BIAS, G1);
185  __ wrccr (G1) ;
186
187  // Restore the G's
188  // Note that G2 (AKA GThread) must be saved and restored separately.
189  // TODO-FIXME: save and restore some of the other ASRs, viz., %asi and %gsr.
190
191  __ ldx(SP, g1_offset+STACK_BIAS, G1);
192  __ ldx(SP, g3_offset+STACK_BIAS, G3);
193  __ ldx(SP, g4_offset+STACK_BIAS, G4);
194  __ ldx(SP, g5_offset+STACK_BIAS, G5);
195
196  // Restore flags
197
198  __ ldxfsr(SP, fsr_offset+STACK_BIAS);
199
200  __ restore();
201
202}
203
204// Pop the current frame and restore the registers that might be holding
205// a result.
206void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
207
208  __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
209
210  __ restore();
211
212}
213
214// Is vector's size (in bytes) bigger than a size saved by default?
215// 8 bytes FP registers are saved by default on SPARC.
216bool SharedRuntime::is_wide_vector(int size) {
217  // Note, MaxVectorSize == 8 on SPARC.
218  assert(size <= 8, "%d bytes vectors are not supported", size);
219  return size > 8;
220}
221
222size_t SharedRuntime::trampoline_size() {
223  return 40;
224}
225
226void SharedRuntime::generate_trampoline(MacroAssembler *masm, address destination) {
227  __ set((intptr_t)destination, G3_scratch);
228  __ JMP(G3_scratch, 0);
229  __ delayed()->nop();
230}
231
232// The java_calling_convention describes stack locations as ideal slots on
233// a frame with no abi restrictions. Since we must observe abi restrictions
234// (like the placement of the register window) the slots must be biased by
235// the following value.
236static int reg2offset(VMReg r) {
237  return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
238}
239
240static VMRegPair reg64_to_VMRegPair(Register r) {
241  VMRegPair ret;
242  if (wordSize == 8) {
243    ret.set2(r->as_VMReg());
244  } else {
245    ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
246  }
247  return ret;
248}
249
250// ---------------------------------------------------------------------------
251// Read the array of BasicTypes from a signature, and compute where the
252// arguments should go.  Values in the VMRegPair regs array refer to 4-byte (VMRegImpl::stack_slot_size)
253// quantities.  Values less than VMRegImpl::stack0 are registers, those above
254// refer to 4-byte stack slots.  All stack slots are based off of the window
255// top.  VMRegImpl::stack0 refers to the first slot past the 16-word window,
256// and VMRegImpl::stack0+1 refers to the memory word 4-byes higher.  Register
257// values 0-63 (up to RegisterImpl::number_of_registers) are the 64-bit
258// integer registers.  Values 64-95 are the (32-bit only) float registers.
259// Each 32-bit quantity is given its own number, so the integer registers
260// (in either 32- or 64-bit builds) use 2 numbers.  For example, there is
261// an O0-low and an O0-high.  Essentially, all int register numbers are doubled.
262
263// Register results are passed in O0-O5, for outgoing call arguments.  To
264// convert to incoming arguments, convert all O's to I's.  The regs array
265// refer to the low and hi 32-bit words of 64-bit registers or stack slots.
266// If the regs[].second() field is set to VMRegImpl::Bad(), it means it's unused (a
267// 32-bit value was passed).  If both are VMRegImpl::Bad(), it means no value was
268// passed (used as a placeholder for the other half of longs and doubles in
269// the 64-bit build).  regs[].second() is either VMRegImpl::Bad() or regs[].second() is
270// regs[].first()+1 (regs[].first() may be misaligned in the C calling convention).
271// Sparc never passes a value in regs[].second() but not regs[].first() (regs[].first()
272// == VMRegImpl::Bad() && regs[].second() != VMRegImpl::Bad()) nor unrelated values in the
273// same VMRegPair.
274
275// Note: the INPUTS in sig_bt are in units of Java argument words, which are
276// either 32-bit or 64-bit depending on the build.  The OUTPUTS are in 32-bit
277// units regardless of build.
278
279
280// ---------------------------------------------------------------------------
281// The compiled Java calling convention.  The Java convention always passes
282// 64-bit values in adjacent aligned locations (either registers or stack),
283// floats in float registers and doubles in aligned float pairs.  There is
284// no backing varargs store for values in registers.
285// In the 32-bit build, longs are passed on the stack (cannot be
286// passed in I's, because longs in I's get their heads chopped off at
287// interrupt).
288int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
289                                           VMRegPair *regs,
290                                           int total_args_passed,
291                                           int is_outgoing) {
292  assert(F31->as_VMReg()->is_reg(), "overlapping stack/register numbers");
293
294  const int int_reg_max = SPARC_ARGS_IN_REGS_NUM;
295  const int flt_reg_max = 8;
296
297  int int_reg = 0;
298  int flt_reg = 0;
299  int slot = 0;
300
301  for (int i = 0; i < total_args_passed; i++) {
302    switch (sig_bt[i]) {
303    case T_INT:
304    case T_SHORT:
305    case T_CHAR:
306    case T_BYTE:
307    case T_BOOLEAN:
308      if (int_reg < int_reg_max) {
309        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
310        regs[i].set1(r->as_VMReg());
311      } else {
312        regs[i].set1(VMRegImpl::stack2reg(slot++));
313      }
314      break;
315
316    case T_LONG:
317      assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting VOID in other half");
318      // fall-through
319    case T_OBJECT:
320    case T_ARRAY:
321    case T_ADDRESS: // Used, e.g., in slow-path locking for the lock's stack address
322      if (int_reg < int_reg_max) {
323        Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
324        regs[i].set2(r->as_VMReg());
325      } else {
326        slot = align_up(slot, 2);  // align
327        regs[i].set2(VMRegImpl::stack2reg(slot));
328        slot += 2;
329      }
330      break;
331
332    case T_FLOAT:
333      if (flt_reg < flt_reg_max) {
334        FloatRegister r = as_FloatRegister(flt_reg++);
335        regs[i].set1(r->as_VMReg());
336      } else {
337        regs[i].set1(VMRegImpl::stack2reg(slot++));
338      }
339      break;
340
341    case T_DOUBLE:
342      assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
343      if (align_up(flt_reg, 2) + 1 < flt_reg_max) {
344        flt_reg = align_up(flt_reg, 2);  // align
345        FloatRegister r = as_FloatRegister(flt_reg);
346        regs[i].set2(r->as_VMReg());
347        flt_reg += 2;
348      } else {
349        slot = align_up(slot, 2);  // align
350        regs[i].set2(VMRegImpl::stack2reg(slot));
351        slot += 2;
352      }
353      break;
354
355    case T_VOID:
356      regs[i].set_bad();   // Halves of longs & doubles
357      break;
358
359    default:
360      fatal("unknown basic type %d", sig_bt[i]);
361      break;
362    }
363  }
364
365  // retun the amount of stack space these arguments will need.
366  return slot;
367}
368
369// Helper class mostly to avoid passing masm everywhere, and handle
370// store displacement overflow logic.
371class AdapterGenerator {
372  MacroAssembler *masm;
373  Register Rdisp;
374  void set_Rdisp(Register r)  { Rdisp = r; }
375
376  void patch_callers_callsite();
377
378  // base+st_off points to top of argument
379  int arg_offset(const int st_off) { return st_off; }
380  int next_arg_offset(const int st_off) {
381    return st_off - Interpreter::stackElementSize;
382  }
383
384  // Argument slot values may be loaded first into a register because
385  // they might not fit into displacement.
386  RegisterOrConstant arg_slot(const int st_off);
387  RegisterOrConstant next_arg_slot(const int st_off);
388
389  // Stores long into offset pointed to by base
390  void store_c2i_long(Register r, Register base,
391                      const int st_off, bool is_stack);
392  void store_c2i_object(Register r, Register base,
393                        const int st_off);
394  void store_c2i_int(Register r, Register base,
395                     const int st_off);
396  void store_c2i_double(VMReg r_2,
397                        VMReg r_1, Register base, const int st_off);
398  void store_c2i_float(FloatRegister f, Register base,
399                       const int st_off);
400
401 public:
402  void gen_c2i_adapter(int total_args_passed,
403                              // VMReg max_arg,
404                              int comp_args_on_stack, // VMRegStackSlots
405                              const BasicType *sig_bt,
406                              const VMRegPair *regs,
407                              Label& skip_fixup);
408  void gen_i2c_adapter(int total_args_passed,
409                       // VMReg max_arg,
410                       int comp_args_on_stack, // VMRegStackSlots
411                       const BasicType *sig_bt,
412                       const VMRegPair *regs);
413
414  AdapterGenerator(MacroAssembler *_masm) : masm(_masm) {}
415};
416
417
418// Patch the callers callsite with entry to compiled code if it exists.
419void AdapterGenerator::patch_callers_callsite() {
420  Label L;
421  __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
422  __ br_null(G3_scratch, false, Assembler::pt, L);
423  __ delayed()->nop();
424  // Call into the VM to patch the caller, then jump to compiled callee
425  __ save_frame(4);     // Args in compiled layout; do not blow them
426
427  // Must save all the live Gregs the list is:
428  // G1: 1st Long arg (32bit build)
429  // G2: global allocated to TLS
430  // G3: used in inline cache check (scratch)
431  // G4: 2nd Long arg (32bit build);
432  // G5: used in inline cache check (Method*)
433
434  // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops.
435
436  // mov(s,d)
437  __ mov(G1, L1);
438  __ mov(G4, L4);
439  __ mov(G5_method, L5);
440  __ mov(G5_method, O0);         // VM needs target method
441  __ mov(I7, O1);                // VM needs caller's callsite
442  // Must be a leaf call...
443  // can be very far once the blob has been relocated
444  AddressLiteral dest(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite));
445  __ relocate(relocInfo::runtime_call_type);
446  __ jumpl_to(dest, O7, O7);
447  __ delayed()->mov(G2_thread, L7_thread_cache);
448  __ mov(L7_thread_cache, G2_thread);
449  __ mov(L1, G1);
450  __ mov(L4, G4);
451  __ mov(L5, G5_method);
452
453  __ restore();      // Restore args
454  __ bind(L);
455}
456
457
458RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
459  RegisterOrConstant roc(arg_offset(st_off));
460  return __ ensure_simm13_or_reg(roc, Rdisp);
461}
462
463RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
464  RegisterOrConstant roc(next_arg_offset(st_off));
465  return __ ensure_simm13_or_reg(roc, Rdisp);
466}
467
468
469// Stores long into offset pointed to by base
470void AdapterGenerator::store_c2i_long(Register r, Register base,
471                                      const int st_off, bool is_stack) {
472  // In V9, longs are given 2 64-bit slots in the interpreter, but the
473  // data is passed in only 1 slot.
474  __ stx(r, base, next_arg_slot(st_off));
475}
476
477void AdapterGenerator::store_c2i_object(Register r, Register base,
478                      const int st_off) {
479  __ st_ptr (r, base, arg_slot(st_off));
480}
481
482void AdapterGenerator::store_c2i_int(Register r, Register base,
483                   const int st_off) {
484  __ st (r, base, arg_slot(st_off));
485}
486
487// Stores into offset pointed to by base
488void AdapterGenerator::store_c2i_double(VMReg r_2,
489                      VMReg r_1, Register base, const int st_off) {
490  // In V9, doubles are given 2 64-bit slots in the interpreter, but the
491  // data is passed in only 1 slot.
492  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), base, next_arg_slot(st_off));
493}
494
495void AdapterGenerator::store_c2i_float(FloatRegister f, Register base,
496                                       const int st_off) {
497  __ stf(FloatRegisterImpl::S, f, base, arg_slot(st_off));
498}
499
500void AdapterGenerator::gen_c2i_adapter(
501                            int total_args_passed,
502                            // VMReg max_arg,
503                            int comp_args_on_stack, // VMRegStackSlots
504                            const BasicType *sig_bt,
505                            const VMRegPair *regs,
506                            Label& L_skip_fixup) {
507
508  // Before we get into the guts of the C2I adapter, see if we should be here
509  // at all.  We've come from compiled code and are attempting to jump to the
510  // interpreter, which means the caller made a static call to get here
511  // (vcalls always get a compiled target if there is one).  Check for a
512  // compiled target.  If there is one, we need to patch the caller's call.
513  // However we will run interpreted if we come thru here. The next pass
514  // thru the call site will run compiled. If we ran compiled here then
515  // we can (theorectically) do endless i2c->c2i->i2c transitions during
516  // deopt/uncommon trap cycles. If we always go interpreted here then
517  // we can have at most one and don't need to play any tricks to keep
518  // from endlessly growing the stack.
519  //
520  // Actually if we detected that we had an i2c->c2i transition here we
521  // ought to be able to reset the world back to the state of the interpreted
522  // call and not bother building another interpreter arg area. We don't
523  // do that at this point.
524
525  patch_callers_callsite();
526
527  __ bind(L_skip_fixup);
528
529  // Since all args are passed on the stack, total_args_passed*wordSize is the
530  // space we need.  Add in varargs area needed by the interpreter. Round up
531  // to stack alignment.
532  const int arg_size = total_args_passed * Interpreter::stackElementSize;
533  const int varargs_area =
534                 (frame::varargs_offset - frame::register_save_words)*wordSize;
535  const int extraspace = align_up(arg_size + varargs_area, 2*wordSize);
536
537  const int bias = STACK_BIAS;
538  const int interp_arg_offset = frame::varargs_offset*wordSize +
539                        (total_args_passed-1)*Interpreter::stackElementSize;
540
541  const Register base = SP;
542
543  // Make some extra space on the stack.
544  __ sub(SP, __ ensure_simm13_or_reg(extraspace, G3_scratch), SP);
545  set_Rdisp(G3_scratch);
546
547  // Write the args into the outgoing interpreter space.
548  for (int i = 0; i < total_args_passed; i++) {
549    const int st_off = interp_arg_offset - (i*Interpreter::stackElementSize) + bias;
550    VMReg r_1 = regs[i].first();
551    VMReg r_2 = regs[i].second();
552    if (!r_1->is_valid()) {
553      assert(!r_2->is_valid(), "");
554      continue;
555    }
556    if (r_1->is_stack()) {        // Pretend stack targets are loaded into G1
557      RegisterOrConstant ld_off = reg2offset(r_1) + extraspace + bias;
558      ld_off = __ ensure_simm13_or_reg(ld_off, Rdisp);
559      r_1 = G1_scratch->as_VMReg();// as part of the load/store shuffle
560      if (!r_2->is_valid()) __ ld (base, ld_off, G1_scratch);
561      else                  __ ldx(base, ld_off, G1_scratch);
562    }
563
564    if (r_1->is_Register()) {
565      Register r = r_1->as_Register()->after_restore();
566      if (sig_bt[i] == T_OBJECT || sig_bt[i] == T_ARRAY) {
567        store_c2i_object(r, base, st_off);
568      } else if (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
569        store_c2i_long(r, base, st_off, r_2->is_stack());
570      } else {
571        store_c2i_int(r, base, st_off);
572      }
573    } else {
574      assert(r_1->is_FloatRegister(), "");
575      if (sig_bt[i] == T_FLOAT) {
576        store_c2i_float(r_1->as_FloatRegister(), base, st_off);
577      } else {
578        assert(sig_bt[i] == T_DOUBLE, "wrong type");
579        store_c2i_double(r_2, r_1, base, st_off);
580      }
581    }
582  }
583
584  // Load the interpreter entry point.
585  __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch);
586
587  // Pass O5_savedSP as an argument to the interpreter.
588  // The interpreter will restore SP to this value before returning.
589  __ add(SP, __ ensure_simm13_or_reg(extraspace, G1), O5_savedSP);
590
591  __ mov((frame::varargs_offset)*wordSize -
592         1*Interpreter::stackElementSize+bias+BytesPerWord, G1);
593  // Jump to the interpreter just as if interpreter was doing it.
594  __ jmpl(G3_scratch, 0, G0);
595  // Setup Lesp for the call.  Cannot actually set Lesp as the current Lesp
596  // (really L0) is in use by the compiled frame as a generic temp.  However,
597  // the interpreter does not know where its args are without some kind of
598  // arg pointer being passed in.  Pass it in Gargs.
599  __ delayed()->add(SP, G1, Gargs);
600}
601
602static void range_check(MacroAssembler* masm, Register pc_reg, Register temp_reg, Register temp2_reg,
603                        address code_start, address code_end,
604                        Label& L_ok) {
605  Label L_fail;
606  __ set(ExternalAddress(code_start), temp_reg);
607  __ set(pointer_delta(code_end, code_start, 1), temp2_reg);
608  __ cmp(pc_reg, temp_reg);
609  __ brx(Assembler::lessEqualUnsigned, false, Assembler::pn, L_fail);
610  __ delayed()->add(temp_reg, temp2_reg, temp_reg);
611  __ cmp(pc_reg, temp_reg);
612  __ cmp_and_brx_short(pc_reg, temp_reg, Assembler::lessUnsigned, Assembler::pt, L_ok);
613  __ bind(L_fail);
614}
615
616void AdapterGenerator::gen_i2c_adapter(int total_args_passed,
617                                       // VMReg max_arg,
618                                       int comp_args_on_stack, // VMRegStackSlots
619                                       const BasicType *sig_bt,
620                                       const VMRegPair *regs) {
621  // Generate an I2C adapter: adjust the I-frame to make space for the C-frame
622  // layout.  Lesp was saved by the calling I-frame and will be restored on
623  // return.  Meanwhile, outgoing arg space is all owned by the callee
624  // C-frame, so we can mangle it at will.  After adjusting the frame size,
625  // hoist register arguments and repack other args according to the compiled
626  // code convention.  Finally, end in a jump to the compiled code.  The entry
627  // point address is the start of the buffer.
628
629  // We will only enter here from an interpreted frame and never from after
630  // passing thru a c2i. Azul allowed this but we do not. If we lose the
631  // race and use a c2i we will remain interpreted for the race loser(s).
632  // This removes all sorts of headaches on the x86 side and also eliminates
633  // the possibility of having c2i -> i2c -> c2i -> ... endless transitions.
634
635  // More detail:
636  // Adapters can be frameless because they do not require the caller
637  // to perform additional cleanup work, such as correcting the stack pointer.
638  // An i2c adapter is frameless because the *caller* frame, which is interpreted,
639  // routinely repairs its own stack pointer (from interpreter_frame_last_sp),
640  // even if a callee has modified the stack pointer.
641  // A c2i adapter is frameless because the *callee* frame, which is interpreted,
642  // routinely repairs its caller's stack pointer (from sender_sp, which is set
643  // up via the senderSP register).
644  // In other words, if *either* the caller or callee is interpreted, we can
645  // get the stack pointer repaired after a call.
646  // This is why c2i and i2c adapters cannot be indefinitely composed.
647  // In particular, if a c2i adapter were to somehow call an i2c adapter,
648  // both caller and callee would be compiled methods, and neither would
649  // clean up the stack pointer changes performed by the two adapters.
650  // If this happens, control eventually transfers back to the compiled
651  // caller, but with an uncorrected stack, causing delayed havoc.
652
653  if (VerifyAdapterCalls &&
654      (Interpreter::code() != NULL || StubRoutines::code1() != NULL)) {
655    // So, let's test for cascading c2i/i2c adapters right now.
656    //  assert(Interpreter::contains($return_addr) ||
657    //         StubRoutines::contains($return_addr),
658    //         "i2c adapter must return to an interpreter frame");
659    __ block_comment("verify_i2c { ");
660    Label L_ok;
661    if (Interpreter::code() != NULL)
662      range_check(masm, O7, O0, O1,
663                  Interpreter::code()->code_start(), Interpreter::code()->code_end(),
664                  L_ok);
665    if (StubRoutines::code1() != NULL)
666      range_check(masm, O7, O0, O1,
667                  StubRoutines::code1()->code_begin(), StubRoutines::code1()->code_end(),
668                  L_ok);
669    if (StubRoutines::code2() != NULL)
670      range_check(masm, O7, O0, O1,
671                  StubRoutines::code2()->code_begin(), StubRoutines::code2()->code_end(),
672                  L_ok);
673    const char* msg = "i2c adapter must return to an interpreter frame";
674    __ block_comment(msg);
675    __ stop(msg);
676    __ bind(L_ok);
677    __ block_comment("} verify_i2ce ");
678  }
679
680  // As you can see from the list of inputs & outputs there are not a lot
681  // of temp registers to work with: mostly G1, G3 & G4.
682
683  // Inputs:
684  // G2_thread      - TLS
685  // G5_method      - Method oop
686  // G4 (Gargs)     - Pointer to interpreter's args
687  // O0..O4         - free for scratch
688  // O5_savedSP     - Caller's saved SP, to be restored if needed
689  // O6             - Current SP!
690  // O7             - Valid return address
691  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
692
693  // Outputs:
694  // G2_thread      - TLS
695  // O0-O5          - Outgoing args in compiled layout
696  // O6             - Adjusted or restored SP
697  // O7             - Valid return address
698  // L0-L7, I0-I7   - Caller's temps (no frame pushed yet)
699  // F0-F7          - more outgoing args
700
701
702  // Gargs is the incoming argument base, and also an outgoing argument.
703  __ sub(Gargs, BytesPerWord, Gargs);
704
705  // ON ENTRY TO THE CODE WE ARE MAKING, WE HAVE AN INTERPRETED FRAME
706  // WITH O7 HOLDING A VALID RETURN PC
707  //
708  // |              |
709  // :  java stack  :
710  // |              |
711  // +--------------+ <--- start of outgoing args
712  // |   receiver   |   |
713  // : rest of args :   |---size is java-arg-words
714  // |              |   |
715  // +--------------+ <--- O4_args (misaligned) and Lesp if prior is not C2I
716  // |              |   |
717  // :    unused    :   |---Space for max Java stack, plus stack alignment
718  // |              |   |
719  // +--------------+ <--- SP + 16*wordsize
720  // |              |
721  // :    window    :
722  // |              |
723  // +--------------+ <--- SP
724
725  // WE REPACK THE STACK.  We use the common calling convention layout as
726  // discovered by calling SharedRuntime::calling_convention.  We assume it
727  // causes an arbitrary shuffle of memory, which may require some register
728  // temps to do the shuffle.  We hope for (and optimize for) the case where
729  // temps are not needed.  We may have to resize the stack slightly, in case
730  // we need alignment padding (32-bit interpreter can pass longs & doubles
731  // misaligned, but the compilers expect them aligned).
732  //
733  // |              |
734  // :  java stack  :
735  // |              |
736  // +--------------+ <--- start of outgoing args
737  // |  pad, align  |   |
738  // +--------------+   |
739  // | ints, longs, |   |
740  // |    floats,   |   |---Outgoing stack args.
741  // :    doubles   :   |   First few args in registers.
742  // |              |   |
743  // +--------------+ <--- SP' + 16*wordsize
744  // |              |
745  // :    window    :
746  // |              |
747  // +--------------+ <--- SP'
748
749  // ON EXIT FROM THE CODE WE ARE MAKING, WE STILL HAVE AN INTERPRETED FRAME
750  // WITH O7 HOLDING A VALID RETURN PC - ITS JUST THAT THE ARGS ARE NOW SETUP
751  // FOR COMPILED CODE AND THE FRAME SLIGHTLY GROWN.
752
753  // Cut-out for having no stack args.  Since up to 6 args are passed
754  // in registers, we will commonly have no stack args.
755  if (comp_args_on_stack > 0) {
756    // Convert VMReg stack slots to words.
757    int comp_words_on_stack = align_up(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
758    // Round up to miminum stack alignment, in wordSize
759    comp_words_on_stack = align_up(comp_words_on_stack, 2);
760    // Now compute the distance from Lesp to SP.  This calculation does not
761    // include the space for total_args_passed because Lesp has not yet popped
762    // the arguments.
763    __ sub(SP, (comp_words_on_stack)*wordSize, SP);
764  }
765
766  // Now generate the shuffle code.  Pick up all register args and move the
767  // rest through G1_scratch.
768  for (int i = 0; i < total_args_passed; i++) {
769    if (sig_bt[i] == T_VOID) {
770      // Longs and doubles are passed in native word order, but misaligned
771      // in the 32-bit build.
772      assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
773      continue;
774    }
775
776    // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
777    // 32-bit build and aligned in the 64-bit build.  Look for the obvious
778    // ldx/lddf optimizations.
779
780    // Load in argument order going down.
781    const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize;
782    set_Rdisp(G1_scratch);
783
784    VMReg r_1 = regs[i].first();
785    VMReg r_2 = regs[i].second();
786    if (!r_1->is_valid()) {
787      assert(!r_2->is_valid(), "");
788      continue;
789    }
790    if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
791      r_1 = F8->as_VMReg();        // as part of the load/store shuffle
792      if (r_2->is_valid()) r_2 = r_1->next();
793    }
794    if (r_1->is_Register()) {  // Register argument
795      Register r = r_1->as_Register()->after_restore();
796      if (!r_2->is_valid()) {
797        __ ld(Gargs, arg_slot(ld_off), r);
798      } else {
799        // In V9, longs are given 2 64-bit slots in the interpreter, but the
800        // data is passed in only 1 slot.
801        RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
802              next_arg_slot(ld_off) : arg_slot(ld_off);
803        __ ldx(Gargs, slot, r);
804      }
805    } else {
806      assert(r_1->is_FloatRegister(), "");
807      if (!r_2->is_valid()) {
808        __ ldf(FloatRegisterImpl::S, Gargs,      arg_slot(ld_off), r_1->as_FloatRegister());
809      } else {
810        // In V9, doubles are given 2 64-bit slots in the interpreter, but the
811        // data is passed in only 1 slot.  This code also handles longs that
812        // are passed on the stack, but need a stack-to-stack move through a
813        // spare float register.
814        RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
815              next_arg_slot(ld_off) : arg_slot(ld_off);
816        __ ldf(FloatRegisterImpl::D, Gargs,                  slot, r_1->as_FloatRegister());
817      }
818    }
819    // Was the argument really intended to be on the stack, but was loaded
820    // into F8/F9?
821    if (regs[i].first()->is_stack()) {
822      assert(r_1->as_FloatRegister() == F8, "fix this code");
823      // Convert stack slot to an SP offset
824      int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
825      // Store down the shuffled stack word.  Target address _is_ aligned.
826      RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
827      if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
828      else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
829    }
830  }
831
832  // Jump to the compiled code just as if compiled code was doing it.
833  __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3);
834#if INCLUDE_JVMCI
835  if (EnableJVMCI) {
836    // check if this call should be routed towards a specific entry point
837    __ ld(Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), G1);
838    __ cmp(G0, G1);
839    Label no_alternative_target;
840    __ br(Assembler::equal, false, Assembler::pn, no_alternative_target);
841    __ delayed()->nop();
842
843    __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset()), G3);
844    __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
845
846    __ bind(no_alternative_target);
847  }
848#endif // INCLUDE_JVMCI
849
850  // 6243940 We might end up in handle_wrong_method if
851  // the callee is deoptimized as we race thru here. If that
852  // happens we don't want to take a safepoint because the
853  // caller frame will look interpreted and arguments are now
854  // "compiled" so it is much better to make this transition
855  // invisible to the stack walking code. Unfortunately if
856  // we try and find the callee by normal means a safepoint
857  // is possible. So we stash the desired callee in the thread
858  // and the vm will find there should this case occur.
859  Address callee_target_addr(G2_thread, JavaThread::callee_target_offset());
860  __ st_ptr(G5_method, callee_target_addr);
861  __ jmpl(G3, 0, G0);
862  __ delayed()->nop();
863}
864
865void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
866                                    int total_args_passed,
867                                    int comp_args_on_stack,
868                                    const BasicType *sig_bt,
869                                    const VMRegPair *regs) {
870  AdapterGenerator agen(masm);
871  agen.gen_i2c_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs);
872}
873
874// ---------------------------------------------------------------
875AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
876                                                            int total_args_passed,
877                                                            // VMReg max_arg,
878                                                            int comp_args_on_stack, // VMRegStackSlots
879                                                            const BasicType *sig_bt,
880                                                            const VMRegPair *regs,
881                                                            AdapterFingerPrint* fingerprint) {
882  address i2c_entry = __ pc();
883
884  gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs);
885
886
887  // -------------------------------------------------------------------------
888  // Generate a C2I adapter.  On entry we know G5 holds the Method*.  The
889  // args start out packed in the compiled layout.  They need to be unpacked
890  // into the interpreter layout.  This will almost always require some stack
891  // space.  We grow the current (compiled) stack, then repack the args.  We
892  // finally end in a jump to the generic interpreter entry point.  On exit
893  // from the interpreter, the interpreter will restore our SP (lest the
894  // compiled code, which relys solely on SP and not FP, get sick).
895
896  address c2i_unverified_entry = __ pc();
897  Label L_skip_fixup;
898  {
899    Register R_temp = G1;  // another scratch register
900
901    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
902
903    __ verify_oop(O0);
904    __ load_klass(O0, G3_scratch);
905
906    __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp);
907    __ cmp(G3_scratch, R_temp);
908
909    Label ok, ok2;
910    __ brx(Assembler::equal, false, Assembler::pt, ok);
911    __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method);
912    __ jump_to(ic_miss, G3_scratch);
913    __ delayed()->nop();
914
915    __ bind(ok);
916    // Method might have been compiled since the call site was patched to
917    // interpreted if that is the case treat it as a miss so we can get
918    // the call site corrected.
919    __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch);
920    __ bind(ok2);
921    __ br_null(G3_scratch, false, Assembler::pt, L_skip_fixup);
922    __ delayed()->nop();
923    __ jump_to(ic_miss, G3_scratch);
924    __ delayed()->nop();
925
926  }
927
928  address c2i_entry = __ pc();
929  AdapterGenerator agen(masm);
930  agen.gen_c2i_adapter(total_args_passed, comp_args_on_stack, sig_bt, regs, L_skip_fixup);
931
932  __ flush();
933  return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
934
935}
936
937// Helper function for native calling conventions
938static VMReg int_stk_helper( int i ) {
939  // Bias any stack based VMReg we get by ignoring the window area
940  // but not the register parameter save area.
941  //
942  // This is strange for the following reasons. We'd normally expect
943  // the calling convention to return an VMReg for a stack slot
944  // completely ignoring any abi reserved area. C2 thinks of that
945  // abi area as only out_preserve_stack_slots. This does not include
946  // the area allocated by the C abi to store down integer arguments
947  // because the java calling convention does not use it. So
948  // since c2 assumes that there are only out_preserve_stack_slots
949  // to bias the optoregs (which impacts VMRegs) when actually referencing any actual stack
950  // location the c calling convention must add in this bias amount
951  // to make up for the fact that the out_preserve_stack_slots is
952  // insufficient for C calls. What a mess. I sure hope those 6
953  // stack words were worth it on every java call!
954
955  // Another way of cleaning this up would be for out_preserve_stack_slots
956  // to take a parameter to say whether it was C or java calling conventions.
957  // Then things might look a little better (but not much).
958
959  int mem_parm_offset = i - SPARC_ARGS_IN_REGS_NUM;
960  if( mem_parm_offset < 0 ) {
961    return as_oRegister(i)->as_VMReg();
962  } else {
963    int actual_offset = (mem_parm_offset + frame::memory_parameter_word_sp_offset) * VMRegImpl::slots_per_word;
964    // Now return a biased offset that will be correct when out_preserve_slots is added back in
965    return VMRegImpl::stack2reg(actual_offset - SharedRuntime::out_preserve_stack_slots());
966  }
967}
968
969
970int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
971                                         VMRegPair *regs,
972                                         VMRegPair *regs2,
973                                         int total_args_passed) {
974    assert(regs2 == NULL, "not needed on sparc");
975
976    // Return the number of VMReg stack_slots needed for the args.
977    // This value does not include an abi space (like register window
978    // save area).
979
980    // The native convention is V8 if !LP64
981    // The LP64 convention is the V9 convention which is slightly more sane.
982
983    // We return the amount of VMReg stack slots we need to reserve for all
984    // the arguments NOT counting out_preserve_stack_slots. Since we always
985    // have space for storing at least 6 registers to memory we start with that.
986    // See int_stk_helper for a further discussion.
987    int max_stack_slots = (frame::varargs_offset * VMRegImpl::slots_per_word) - SharedRuntime::out_preserve_stack_slots();
988
989    // V9 convention: All things "as-if" on double-wide stack slots.
990    // Hoist any int/ptr/long's in the first 6 to int regs.
991    // Hoist any flt/dbl's in the first 16 dbl regs.
992    int j = 0;                  // Count of actual args, not HALVES
993    VMRegPair param_array_reg;  // location of the argument in the parameter array
994    for (int i = 0; i < total_args_passed; i++, j++) {
995      param_array_reg.set_bad();
996      switch (sig_bt[i]) {
997      case T_BOOLEAN:
998      case T_BYTE:
999      case T_CHAR:
1000      case T_INT:
1001      case T_SHORT:
1002        regs[i].set1(int_stk_helper(j));
1003        break;
1004      case T_LONG:
1005        assert((i + 1) < total_args_passed && sig_bt[i+1] == T_VOID, "expecting half");
1006      case T_ADDRESS: // raw pointers, like current thread, for VM calls
1007      case T_ARRAY:
1008      case T_OBJECT:
1009      case T_METADATA:
1010        regs[i].set2(int_stk_helper(j));
1011        break;
1012      case T_FLOAT:
1013        // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
1014        // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
1015        //
1016        // "When a callee prototype exists, and does not indicate variable arguments,
1017        // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
1018        // will be promoted to floating-point registers"
1019        //
1020        // By "promoted" it means that the argument is located in two places, an unused
1021        // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
1022        // float register.  In most cases, there are 6 or fewer arguments of any type,
1023        // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
1024        // serve as shadow slots.  Per the spec floating point registers %d6 to %d16
1025        // require slots beyond that (up to %sp+BIAS+248).
1026        //
1027        {
1028          // V9ism: floats go in ODD registers and stack slots
1029          int float_index = 1 + (j << 1);
1030          param_array_reg.set1(VMRegImpl::stack2reg(float_index));
1031          if (j < 16) {
1032            regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
1033          } else {
1034            regs[i] = param_array_reg;
1035          }
1036        }
1037        break;
1038      case T_DOUBLE:
1039        {
1040          assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half");
1041          // V9ism: doubles go in EVEN/ODD regs and stack slots
1042          int double_index = (j << 1);
1043          param_array_reg.set2(VMRegImpl::stack2reg(double_index));
1044          if (j < 16) {
1045            regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
1046          } else {
1047            // V9ism: doubles go in EVEN/ODD stack slots
1048            regs[i] = param_array_reg;
1049          }
1050        }
1051        break;
1052      case T_VOID:
1053        regs[i].set_bad();
1054        j--;
1055        break; // Do not count HALVES
1056      default:
1057        ShouldNotReachHere();
1058      }
1059      // Keep track of the deepest parameter array slot.
1060      if (!param_array_reg.first()->is_valid()) {
1061        param_array_reg = regs[i];
1062      }
1063      if (param_array_reg.first()->is_stack()) {
1064        int off = param_array_reg.first()->reg2stack();
1065        if (off > max_stack_slots) max_stack_slots = off;
1066      }
1067      if (param_array_reg.second()->is_stack()) {
1068        int off = param_array_reg.second()->reg2stack();
1069        if (off > max_stack_slots) max_stack_slots = off;
1070      }
1071    }
1072  return align_up(max_stack_slots + 1, 2);
1073
1074}
1075
1076
1077// ---------------------------------------------------------------------------
1078void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1079  switch (ret_type) {
1080  case T_FLOAT:
1081    __ stf(FloatRegisterImpl::S, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS);
1082    break;
1083  case T_DOUBLE:
1084    __ stf(FloatRegisterImpl::D, F0, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS);
1085    break;
1086  }
1087}
1088
1089void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
1090  switch (ret_type) {
1091  case T_FLOAT:
1092    __ ldf(FloatRegisterImpl::S, SP, frame_slots*VMRegImpl::stack_slot_size - 4+STACK_BIAS, F0);
1093    break;
1094  case T_DOUBLE:
1095    __ ldf(FloatRegisterImpl::D, SP, frame_slots*VMRegImpl::stack_slot_size - 8+STACK_BIAS, F0);
1096    break;
1097  }
1098}
1099
1100// Check and forward and pending exception.  Thread is stored in
1101// L7_thread_cache and possibly NOT in G2_thread.  Since this is a native call, there
1102// is no exception handler.  We merely pop this frame off and throw the
1103// exception in the caller's frame.
1104static void check_forward_pending_exception(MacroAssembler *masm, Register Rex_oop) {
1105  Label L;
1106  __ br_null(Rex_oop, false, Assembler::pt, L);
1107  __ delayed()->mov(L7_thread_cache, G2_thread); // restore in case we have exception
1108  // Since this is a native call, we *know* the proper exception handler
1109  // without calling into the VM: it's the empty function.  Just pop this
1110  // frame and then jump to forward_exception_entry; O7 will contain the
1111  // native caller's return PC.
1112 AddressLiteral exception_entry(StubRoutines::forward_exception_entry());
1113  __ jump_to(exception_entry, G3_scratch);
1114  __ delayed()->restore();      // Pop this frame off.
1115  __ bind(L);
1116}
1117
1118// A simple move of integer like type
1119static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1120  if (src.first()->is_stack()) {
1121    if (dst.first()->is_stack()) {
1122      // stack to stack
1123      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1124      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1125    } else {
1126      // stack to reg
1127      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1128    }
1129  } else if (dst.first()->is_stack()) {
1130    // reg to stack
1131    __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1132  } else {
1133    __ mov(src.first()->as_Register(), dst.first()->as_Register());
1134  }
1135}
1136
1137// On 64 bit we will store integer like items to the stack as
1138// 64 bits items (sparc abi) even though java would only store
1139// 32bits for a parameter. On 32bit it will simply be 32 bits
1140// So this routine will do 32->32 on 32bit and 32->64 on 64bit
1141static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1142  if (src.first()->is_stack()) {
1143    if (dst.first()->is_stack()) {
1144      // stack to stack
1145      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1146      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1147    } else {
1148      // stack to reg
1149      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1150    }
1151  } else if (dst.first()->is_stack()) {
1152    // reg to stack
1153    // Some compilers (gcc) expect a clean 32 bit value on function entry
1154    __ signx(src.first()->as_Register(), L5);
1155    __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1156  } else {
1157    // Some compilers (gcc) expect a clean 32 bit value on function entry
1158    __ signx(src.first()->as_Register(), dst.first()->as_Register());
1159  }
1160}
1161
1162
1163static void move_ptr(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1164  if (src.first()->is_stack()) {
1165    if (dst.first()->is_stack()) {
1166      // stack to stack
1167      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1168      __ st_ptr(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1169    } else {
1170      // stack to reg
1171      __ ld_ptr(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1172    }
1173  } else if (dst.first()->is_stack()) {
1174    // reg to stack
1175    __ st_ptr(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1176  } else {
1177    __ mov(src.first()->as_Register(), dst.first()->as_Register());
1178  }
1179}
1180
1181
1182// An oop arg. Must pass a handle not the oop itself
1183static void object_move(MacroAssembler* masm,
1184                        OopMap* map,
1185                        int oop_handle_offset,
1186                        int framesize_in_slots,
1187                        VMRegPair src,
1188                        VMRegPair dst,
1189                        bool is_receiver,
1190                        int* receiver_offset) {
1191
1192  // must pass a handle. First figure out the location we use as a handle
1193
1194  if (src.first()->is_stack()) {
1195    // Oop is already on the stack
1196    Register rHandle = dst.first()->is_stack() ? L5 : dst.first()->as_Register();
1197    __ add(FP, reg2offset(src.first()) + STACK_BIAS, rHandle);
1198    __ ld_ptr(rHandle, 0, L4);
1199    __ movr( Assembler::rc_z, L4, G0, rHandle );
1200    if (dst.first()->is_stack()) {
1201      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1202    }
1203    int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1204    if (is_receiver) {
1205      *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
1206    }
1207    map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
1208  } else {
1209    // Oop is in an input register pass we must flush it to the stack
1210    const Register rOop = src.first()->as_Register();
1211    const Register rHandle = L5;
1212    int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
1213    int offset = oop_slot * VMRegImpl::stack_slot_size;
1214    __ st_ptr(rOop, SP, offset + STACK_BIAS);
1215    if (is_receiver) {
1216       *receiver_offset = offset;
1217    }
1218    map->set_oop(VMRegImpl::stack2reg(oop_slot));
1219    __ add(SP, offset + STACK_BIAS, rHandle);
1220    __ movr( Assembler::rc_z, rOop, G0, rHandle );
1221
1222    if (dst.first()->is_stack()) {
1223      __ st_ptr(rHandle, SP, reg2offset(dst.first()) + STACK_BIAS);
1224    } else {
1225      __ mov(rHandle, dst.first()->as_Register());
1226    }
1227  }
1228}
1229
1230// A float arg may have to do float reg int reg conversion
1231static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1232  assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move");
1233
1234  if (src.first()->is_stack()) {
1235    if (dst.first()->is_stack()) {
1236      // stack to stack the easiest of the bunch
1237      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1238      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1239    } else {
1240      // stack to reg
1241      if (dst.first()->is_Register()) {
1242        __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1243      } else {
1244        __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1245      }
1246    }
1247  } else if (dst.first()->is_stack()) {
1248    // reg to stack
1249    if (src.first()->is_Register()) {
1250      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1251    } else {
1252      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1253    }
1254  } else {
1255    // reg to reg
1256    if (src.first()->is_Register()) {
1257      if (dst.first()->is_Register()) {
1258        // gpr -> gpr
1259        __ mov(src.first()->as_Register(), dst.first()->as_Register());
1260      } else {
1261        // gpr -> fpr
1262        __ st(src.first()->as_Register(), FP, -4 + STACK_BIAS);
1263        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.first()->as_FloatRegister());
1264      }
1265    } else if (dst.first()->is_Register()) {
1266      // fpr -> gpr
1267      __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), FP, -4 + STACK_BIAS);
1268      __ ld(FP, -4 + STACK_BIAS, dst.first()->as_Register());
1269    } else {
1270      // fpr -> fpr
1271      // In theory these overlap but the ordering is such that this is likely a nop
1272      if ( src.first() != dst.first()) {
1273        __ fmov(FloatRegisterImpl::S, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1274      }
1275    }
1276  }
1277}
1278
1279static void split_long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1280  VMRegPair src_lo(src.first());
1281  VMRegPair src_hi(src.second());
1282  VMRegPair dst_lo(dst.first());
1283  VMRegPair dst_hi(dst.second());
1284  simple_move32(masm, src_lo, dst_lo);
1285  simple_move32(masm, src_hi, dst_hi);
1286}
1287
1288// A long move
1289static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1290
1291  // Do the simple ones here else do two int moves
1292  if (src.is_single_phys_reg() ) {
1293    if (dst.is_single_phys_reg()) {
1294      __ mov(src.first()->as_Register(), dst.first()->as_Register());
1295    } else {
1296      // split src into two separate registers
1297      // Remember hi means hi address or lsw on sparc
1298      // Move msw to lsw
1299      if (dst.second()->is_reg()) {
1300        // MSW -> MSW
1301        __ srax(src.first()->as_Register(), 32, dst.first()->as_Register());
1302        // Now LSW -> LSW
1303        // this will only move lo -> lo and ignore hi
1304        VMRegPair split(dst.second());
1305        simple_move32(masm, src, split);
1306      } else {
1307        VMRegPair split(src.first(), L4->as_VMReg());
1308        // MSW -> MSW (lo ie. first word)
1309        __ srax(src.first()->as_Register(), 32, L4);
1310        split_long_move(masm, split, dst);
1311      }
1312    }
1313  } else if (dst.is_single_phys_reg()) {
1314    if (src.is_adjacent_aligned_on_stack(2)) {
1315      __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1316    } else {
1317      // dst is a single reg.
1318      // Remember lo is low address not msb for stack slots
1319      // and lo is the "real" register for registers
1320      // src is
1321
1322      VMRegPair split;
1323
1324      if (src.first()->is_reg()) {
1325        // src.lo (msw) is a reg, src.hi is stk/reg
1326        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> src.lo [the MSW is in the LSW of the reg]
1327        split.set_pair(dst.first(), src.first());
1328      } else {
1329        // msw is stack move to L5
1330        // lsw is stack move to dst.lo (real reg)
1331        // we will move: src.hi (LSW) -> dst.lo, src.lo (MSW) -> L5
1332        split.set_pair(dst.first(), L5->as_VMReg());
1333      }
1334
1335      // src.lo -> src.lo/L5, src.hi -> dst.lo (the real reg)
1336      // msw   -> src.lo/L5,  lsw -> dst.lo
1337      split_long_move(masm, src, split);
1338
1339      // So dst now has the low order correct position the
1340      // msw half
1341      __ sllx(split.first()->as_Register(), 32, L5);
1342
1343      const Register d = dst.first()->as_Register();
1344      __ or3(L5, d, d);
1345    }
1346  } else {
1347    // For LP64 we can probably do better.
1348    split_long_move(masm, src, dst);
1349  }
1350}
1351
1352// A double move
1353static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
1354
1355  // The painful thing here is that like long_move a VMRegPair might be
1356  // 1: a single physical register
1357  // 2: two physical registers (v8)
1358  // 3: a physical reg [lo] and a stack slot [hi] (v8)
1359  // 4: two stack slots
1360
1361  // Since src is always a java calling convention we know that the src pair
1362  // is always either all registers or all stack (and aligned?)
1363
1364  // in a register [lo] and a stack slot [hi]
1365  if (src.first()->is_stack()) {
1366    if (dst.first()->is_stack()) {
1367      // stack to stack the easiest of the bunch
1368      // ought to be a way to do this where if alignment is ok we use ldd/std when possible
1369      __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
1370      __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1371      __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
1372      __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1373    } else {
1374      // stack to reg
1375      if (dst.second()->is_stack()) {
1376        // stack -> reg, stack -> stack
1377        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1378        if (dst.first()->is_Register()) {
1379          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1380        } else {
1381          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1382        }
1383        // This was missing. (very rare case)
1384        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1385      } else {
1386        // stack -> reg
1387        // Eventually optimize for alignment QQQ
1388        if (dst.first()->is_Register()) {
1389          __ ld(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
1390          __ ld(FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_Register());
1391        } else {
1392          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_FloatRegister());
1393          __ ldf(FloatRegisterImpl::S, FP, reg2offset(src.second()) + STACK_BIAS, dst.second()->as_FloatRegister());
1394        }
1395      }
1396    }
1397  } else if (dst.first()->is_stack()) {
1398    // reg to stack
1399    if (src.first()->is_Register()) {
1400      // Eventually optimize for alignment QQQ
1401      __ st(src.first()->as_Register(), SP, reg2offset(dst.first()) + STACK_BIAS);
1402      if (src.second()->is_stack()) {
1403        __ ld(FP, reg2offset(src.second()) + STACK_BIAS, L4);
1404        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1405      } else {
1406        __ st(src.second()->as_Register(), SP, reg2offset(dst.second()) + STACK_BIAS);
1407      }
1408    } else {
1409      // fpr to stack
1410      if (src.second()->is_stack()) {
1411        ShouldNotReachHere();
1412      } else {
1413        // Is the stack aligned?
1414        if (reg2offset(dst.first()) & 0x7) {
1415          // No do as pairs
1416          __ stf(FloatRegisterImpl::S, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1417          __ stf(FloatRegisterImpl::S, src.second()->as_FloatRegister(), SP, reg2offset(dst.second()) + STACK_BIAS);
1418        } else {
1419          __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), SP, reg2offset(dst.first()) + STACK_BIAS);
1420        }
1421      }
1422    }
1423  } else {
1424    // reg to reg
1425    if (src.first()->is_Register()) {
1426      if (dst.first()->is_Register()) {
1427        // gpr -> gpr
1428        __ mov(src.first()->as_Register(), dst.first()->as_Register());
1429        __ mov(src.second()->as_Register(), dst.second()->as_Register());
1430      } else {
1431        // gpr -> fpr
1432        // ought to be able to do a single store
1433        __ stx(src.first()->as_Register(), FP, -8 + STACK_BIAS);
1434        __ stx(src.second()->as_Register(), FP, -4 + STACK_BIAS);
1435        // ought to be able to do a single load
1436        __ ldf(FloatRegisterImpl::S, FP, -8 + STACK_BIAS, dst.first()->as_FloatRegister());
1437        __ ldf(FloatRegisterImpl::S, FP, -4 + STACK_BIAS, dst.second()->as_FloatRegister());
1438      }
1439    } else if (dst.first()->is_Register()) {
1440      // fpr -> gpr
1441      // ought to be able to do a single store
1442      __ stf(FloatRegisterImpl::D, src.first()->as_FloatRegister(), FP, -8 + STACK_BIAS);
1443      // ought to be able to do a single load
1444      // REMEMBER first() is low address not LSB
1445      __ ld(FP, -8 + STACK_BIAS, dst.first()->as_Register());
1446      if (dst.second()->is_Register()) {
1447        __ ld(FP, -4 + STACK_BIAS, dst.second()->as_Register());
1448      } else {
1449        __ ld(FP, -4 + STACK_BIAS, L4);
1450        __ st(L4, SP, reg2offset(dst.second()) + STACK_BIAS);
1451      }
1452    } else {
1453      // fpr -> fpr
1454      // In theory these overlap but the ordering is such that this is likely a nop
1455      if ( src.first() != dst.first()) {
1456        __ fmov(FloatRegisterImpl::D, src.first()->as_FloatRegister(), dst.first()->as_FloatRegister());
1457      }
1458    }
1459  }
1460}
1461
1462// Creates an inner frame if one hasn't already been created, and
1463// saves a copy of the thread in L7_thread_cache
1464static void create_inner_frame(MacroAssembler* masm, bool* already_created) {
1465  if (!*already_created) {
1466    __ save_frame(0);
1467    // Save thread in L7 (INNER FRAME); it crosses a bunch of VM calls below
1468    // Don't use save_thread because it smashes G2 and we merely want to save a
1469    // copy
1470    __ mov(G2_thread, L7_thread_cache);
1471    *already_created = true;
1472  }
1473}
1474
1475
1476static void save_or_restore_arguments(MacroAssembler* masm,
1477                                      const int stack_slots,
1478                                      const int total_in_args,
1479                                      const int arg_save_area,
1480                                      OopMap* map,
1481                                      VMRegPair* in_regs,
1482                                      BasicType* in_sig_bt) {
1483  // if map is non-NULL then the code should store the values,
1484  // otherwise it should load them.
1485  if (map != NULL) {
1486    // Fill in the map
1487    for (int i = 0; i < total_in_args; i++) {
1488      if (in_sig_bt[i] == T_ARRAY) {
1489        if (in_regs[i].first()->is_stack()) {
1490          int offset_in_older_frame = in_regs[i].first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
1491          map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + stack_slots));
1492        } else if (in_regs[i].first()->is_Register()) {
1493          map->set_oop(in_regs[i].first());
1494        } else {
1495          ShouldNotReachHere();
1496        }
1497      }
1498    }
1499  }
1500
1501  // Save or restore double word values
1502  int handle_index = 0;
1503  for (int i = 0; i < total_in_args; i++) {
1504    int slot = handle_index + arg_save_area;
1505    int offset = slot * VMRegImpl::stack_slot_size;
1506    if (in_sig_bt[i] == T_LONG && in_regs[i].first()->is_Register()) {
1507      const Register reg = in_regs[i].first()->as_Register();
1508      if (reg->is_global()) {
1509        handle_index += 2;
1510        assert(handle_index <= stack_slots, "overflow");
1511        if (map != NULL) {
1512          __ stx(reg, SP, offset + STACK_BIAS);
1513        } else {
1514          __ ldx(SP, offset + STACK_BIAS, reg);
1515        }
1516      }
1517    } else if (in_sig_bt[i] == T_DOUBLE && in_regs[i].first()->is_FloatRegister()) {
1518      handle_index += 2;
1519      assert(handle_index <= stack_slots, "overflow");
1520      if (map != NULL) {
1521        __ stf(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1522      } else {
1523        __ ldf(FloatRegisterImpl::D, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1524      }
1525    }
1526  }
1527  // Save floats
1528  for (int i = 0; i < total_in_args; i++) {
1529    int slot = handle_index + arg_save_area;
1530    int offset = slot * VMRegImpl::stack_slot_size;
1531    if (in_sig_bt[i] == T_FLOAT && in_regs[i].first()->is_FloatRegister()) {
1532      handle_index++;
1533      assert(handle_index <= stack_slots, "overflow");
1534      if (map != NULL) {
1535        __ stf(FloatRegisterImpl::S, in_regs[i].first()->as_FloatRegister(), SP, offset + STACK_BIAS);
1536      } else {
1537        __ ldf(FloatRegisterImpl::S, SP, offset + STACK_BIAS, in_regs[i].first()->as_FloatRegister());
1538      }
1539    }
1540  }
1541
1542}
1543
1544
1545// Check GCLocker::needs_gc and enter the runtime if it's true.  This
1546// keeps a new JNI critical region from starting until a GC has been
1547// forced.  Save down any oops in registers and describe them in an
1548// OopMap.
1549static void check_needs_gc_for_critical_native(MacroAssembler* masm,
1550                                               const int stack_slots,
1551                                               const int total_in_args,
1552                                               const int arg_save_area,
1553                                               OopMapSet* oop_maps,
1554                                               VMRegPair* in_regs,
1555                                               BasicType* in_sig_bt) {
1556  __ block_comment("check GCLocker::needs_gc");
1557  Label cont;
1558  AddressLiteral sync_state(GCLocker::needs_gc_address());
1559  __ load_bool_contents(sync_state, G3_scratch);
1560  __ cmp_zero_and_br(Assembler::equal, G3_scratch, cont);
1561  __ delayed()->nop();
1562
1563  // Save down any values that are live in registers and call into the
1564  // runtime to halt for a GC
1565  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1566  save_or_restore_arguments(masm, stack_slots, total_in_args,
1567                            arg_save_area, map, in_regs, in_sig_bt);
1568
1569  __ mov(G2_thread, L7_thread_cache);
1570
1571  __ set_last_Java_frame(SP, noreg);
1572
1573  __ block_comment("block_for_jni_critical");
1574  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::block_for_jni_critical), relocInfo::runtime_call_type);
1575  __ delayed()->mov(L7_thread_cache, O0);
1576  oop_maps->add_gc_map( __ offset(), map);
1577
1578  __ restore_thread(L7_thread_cache); // restore G2_thread
1579  __ reset_last_Java_frame();
1580
1581  // Reload all the register arguments
1582  save_or_restore_arguments(masm, stack_slots, total_in_args,
1583                            arg_save_area, NULL, in_regs, in_sig_bt);
1584
1585  __ bind(cont);
1586#ifdef ASSERT
1587  if (StressCriticalJNINatives) {
1588    // Stress register saving
1589    OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
1590    save_or_restore_arguments(masm, stack_slots, total_in_args,
1591                              arg_save_area, map, in_regs, in_sig_bt);
1592    // Destroy argument registers
1593    for (int i = 0; i < total_in_args; i++) {
1594      if (in_regs[i].first()->is_Register()) {
1595        const Register reg = in_regs[i].first()->as_Register();
1596        if (reg->is_global()) {
1597          __ mov(G0, reg);
1598        }
1599      } else if (in_regs[i].first()->is_FloatRegister()) {
1600        __ fneg(FloatRegisterImpl::D, in_regs[i].first()->as_FloatRegister(), in_regs[i].first()->as_FloatRegister());
1601      }
1602    }
1603
1604    save_or_restore_arguments(masm, stack_slots, total_in_args,
1605                              arg_save_area, NULL, in_regs, in_sig_bt);
1606  }
1607#endif
1608}
1609
1610// Unpack an array argument into a pointer to the body and the length
1611// if the array is non-null, otherwise pass 0 for both.
1612static void unpack_array_argument(MacroAssembler* masm, VMRegPair reg, BasicType in_elem_type, VMRegPair body_arg, VMRegPair length_arg) {
1613  // Pass the length, ptr pair
1614  Label is_null, done;
1615  if (reg.first()->is_stack()) {
1616    VMRegPair tmp  = reg64_to_VMRegPair(L2);
1617    // Load the arg up from the stack
1618    move_ptr(masm, reg, tmp);
1619    reg = tmp;
1620  }
1621  __ cmp(reg.first()->as_Register(), G0);
1622  __ brx(Assembler::equal, false, Assembler::pt, is_null);
1623  __ delayed()->add(reg.first()->as_Register(), arrayOopDesc::base_offset_in_bytes(in_elem_type), L4);
1624  move_ptr(masm, reg64_to_VMRegPair(L4), body_arg);
1625  __ ld(reg.first()->as_Register(), arrayOopDesc::length_offset_in_bytes(), L4);
1626  move32_64(masm, reg64_to_VMRegPair(L4), length_arg);
1627  __ ba_short(done);
1628  __ bind(is_null);
1629  // Pass zeros
1630  move_ptr(masm, reg64_to_VMRegPair(G0), body_arg);
1631  move32_64(masm, reg64_to_VMRegPair(G0), length_arg);
1632  __ bind(done);
1633}
1634
1635static void verify_oop_args(MacroAssembler* masm,
1636                            methodHandle method,
1637                            const BasicType* sig_bt,
1638                            const VMRegPair* regs) {
1639  Register temp_reg = G5_method;  // not part of any compiled calling seq
1640  if (VerifyOops) {
1641    for (int i = 0; i < method->size_of_parameters(); i++) {
1642      if (sig_bt[i] == T_OBJECT ||
1643          sig_bt[i] == T_ARRAY) {
1644        VMReg r = regs[i].first();
1645        assert(r->is_valid(), "bad oop arg");
1646        if (r->is_stack()) {
1647          RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1648          ld_off = __ ensure_simm13_or_reg(ld_off, temp_reg);
1649          __ ld_ptr(SP, ld_off, temp_reg);
1650          __ verify_oop(temp_reg);
1651        } else {
1652          __ verify_oop(r->as_Register());
1653        }
1654      }
1655    }
1656  }
1657}
1658
1659static void gen_special_dispatch(MacroAssembler* masm,
1660                                 methodHandle method,
1661                                 const BasicType* sig_bt,
1662                                 const VMRegPair* regs) {
1663  verify_oop_args(masm, method, sig_bt, regs);
1664  vmIntrinsics::ID iid = method->intrinsic_id();
1665
1666  // Now write the args into the outgoing interpreter space
1667  bool     has_receiver   = false;
1668  Register receiver_reg   = noreg;
1669  int      member_arg_pos = -1;
1670  Register member_reg     = noreg;
1671  int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1672  if (ref_kind != 0) {
1673    member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1674    member_reg = G5_method;  // known to be free at this point
1675    has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1676  } else if (iid == vmIntrinsics::_invokeBasic) {
1677    has_receiver = true;
1678  } else {
1679    fatal("unexpected intrinsic id %d", iid);
1680  }
1681
1682  if (member_reg != noreg) {
1683    // Load the member_arg into register, if necessary.
1684    SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1685    VMReg r = regs[member_arg_pos].first();
1686    if (r->is_stack()) {
1687      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1688      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1689      __ ld_ptr(SP, ld_off, member_reg);
1690    } else {
1691      // no data motion is needed
1692      member_reg = r->as_Register();
1693    }
1694  }
1695
1696  if (has_receiver) {
1697    // Make sure the receiver is loaded into a register.
1698    assert(method->size_of_parameters() > 0, "oob");
1699    assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");
1700    VMReg r = regs[0].first();
1701    assert(r->is_valid(), "bad receiver arg");
1702    if (r->is_stack()) {
1703      // Porting note:  This assumes that compiled calling conventions always
1704      // pass the receiver oop in a register.  If this is not true on some
1705      // platform, pick a temp and load the receiver from stack.
1706      fatal("receiver always in a register");
1707      receiver_reg = G3_scratch;  // known to be free at this point
1708      RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1709      ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1710      __ ld_ptr(SP, ld_off, receiver_reg);
1711    } else {
1712      // no data motion is needed
1713      receiver_reg = r->as_Register();
1714    }
1715  }
1716
1717  // Figure out which address we are really jumping to:
1718  MethodHandles::generate_method_handle_dispatch(masm, iid,
1719                                                 receiver_reg, member_reg, /*for_compiler_entry:*/ true);
1720}
1721
1722// ---------------------------------------------------------------------------
1723// Generate a native wrapper for a given method.  The method takes arguments
1724// in the Java compiled code convention, marshals them to the native
1725// convention (handlizes oops, etc), transitions to native, makes the call,
1726// returns to java state (possibly blocking), unhandlizes any result and
1727// returns.
1728//
1729// Critical native functions are a shorthand for the use of
1730// GetPrimtiveArrayCritical and disallow the use of any other JNI
1731// functions.  The wrapper is expected to unpack the arguments before
1732// passing them to the callee and perform checks before and after the
1733// native call to ensure that they GCLocker
1734// lock_critical/unlock_critical semantics are followed.  Some other
1735// parts of JNI setup are skipped like the tear down of the JNI handle
1736// block and the check for pending exceptions it's impossible for them
1737// to be thrown.
1738//
1739// They are roughly structured like this:
1740//    if (GCLocker::needs_gc())
1741//      SharedRuntime::block_for_jni_critical();
1742//    tranistion to thread_in_native
1743//    unpack arrray arguments and call native entry point
1744//    check for safepoint in progress
1745//    check if any thread suspend flags are set
1746//      call into JVM and possible unlock the JNI critical
1747//      if a GC was suppressed while in the critical native.
1748//    transition back to thread_in_Java
1749//    return to caller
1750//
1751nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
1752                                                const methodHandle& method,
1753                                                int compile_id,
1754                                                BasicType* in_sig_bt,
1755                                                VMRegPair* in_regs,
1756                                                BasicType ret_type) {
1757  if (method->is_method_handle_intrinsic()) {
1758    vmIntrinsics::ID iid = method->intrinsic_id();
1759    intptr_t start = (intptr_t)__ pc();
1760    int vep_offset = ((intptr_t)__ pc()) - start;
1761    gen_special_dispatch(masm,
1762                         method,
1763                         in_sig_bt,
1764                         in_regs);
1765    int frame_complete = ((intptr_t)__ pc()) - start;  // not complete, period
1766    __ flush();
1767    int stack_slots = SharedRuntime::out_preserve_stack_slots();  // no out slots at all, actually
1768    return nmethod::new_native_nmethod(method,
1769                                       compile_id,
1770                                       masm->code(),
1771                                       vep_offset,
1772                                       frame_complete,
1773                                       stack_slots / VMRegImpl::slots_per_word,
1774                                       in_ByteSize(-1),
1775                                       in_ByteSize(-1),
1776                                       (OopMapSet*)NULL);
1777  }
1778  bool is_critical_native = true;
1779  address native_func = method->critical_native_function();
1780  if (native_func == NULL) {
1781    native_func = method->native_function();
1782    is_critical_native = false;
1783  }
1784  assert(native_func != NULL, "must have function");
1785
1786  // Native nmethod wrappers never take possesion of the oop arguments.
1787  // So the caller will gc the arguments. The only thing we need an
1788  // oopMap for is if the call is static
1789  //
1790  // An OopMap for lock (and class if static), and one for the VM call itself
1791  OopMapSet *oop_maps = new OopMapSet();
1792  intptr_t start = (intptr_t)__ pc();
1793
1794  // First thing make an ic check to see if we should even be here
1795  {
1796    Label L;
1797    const Register temp_reg = G3_scratch;
1798    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
1799    __ verify_oop(O0);
1800    __ load_klass(O0, temp_reg);
1801    __ cmp_and_brx_short(temp_reg, G5_inline_cache_reg, Assembler::equal, Assembler::pt, L);
1802
1803    __ jump_to(ic_miss, temp_reg);
1804    __ delayed()->nop();
1805    __ align(CodeEntryAlignment);
1806    __ bind(L);
1807  }
1808
1809  int vep_offset = ((intptr_t)__ pc()) - start;
1810
1811#ifdef COMPILER1
1812  if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
1813    // Object.hashCode, System.identityHashCode can pull the hashCode from the
1814    // header word instead of doing a full VM transition once it's been computed.
1815    // Since hashCode is usually polymorphic at call sites we can't do this
1816    // optimization at the call site without a lot of work.
1817    Label slowCase;
1818    Label done;
1819    Register obj_reg              = O0;
1820    Register result               = O0;
1821    Register header               = G3_scratch;
1822    Register hash                 = G3_scratch; // overwrite header value with hash value
1823    Register mask                 = G1;         // to get hash field from header
1824
1825    // Unlike for Object.hashCode, System.identityHashCode is static method and
1826    // gets object as argument instead of the receiver.
1827    if (method->intrinsic_id() == vmIntrinsics::_identityHashCode) {
1828      assert(method->is_static(), "method should be static");
1829      // return 0 for null reference input
1830      __ br_null(obj_reg, false, Assembler::pn, done);
1831      __ delayed()->mov(obj_reg, hash);
1832    }
1833
1834    // Read the header and build a mask to get its hash field.  Give up if the object is not unlocked.
1835    // We depend on hash_mask being at most 32 bits and avoid the use of
1836    // hash_mask_in_place because it could be larger than 32 bits in a 64-bit
1837    // vm: see markOop.hpp.
1838    __ ld_ptr(obj_reg, oopDesc::mark_offset_in_bytes(), header);
1839    __ sethi(markOopDesc::hash_mask, mask);
1840    __ btst(markOopDesc::unlocked_value, header);
1841    __ br(Assembler::zero, false, Assembler::pn, slowCase);
1842    if (UseBiasedLocking) {
1843      // Check if biased and fall through to runtime if so
1844      __ delayed()->nop();
1845      __ btst(markOopDesc::biased_lock_bit_in_place, header);
1846      __ br(Assembler::notZero, false, Assembler::pn, slowCase);
1847    }
1848    __ delayed()->or3(mask, markOopDesc::hash_mask & 0x3ff, mask);
1849
1850    // Check for a valid (non-zero) hash code and get its value.
1851    __ srlx(header, markOopDesc::hash_shift, hash);
1852    __ andcc(hash, mask, hash);
1853    __ br(Assembler::equal, false, Assembler::pn, slowCase);
1854    __ delayed()->nop();
1855
1856    // leaf return.
1857    __ bind(done);
1858    __ retl();
1859    __ delayed()->mov(hash, result);
1860    __ bind(slowCase);
1861  }
1862#endif // COMPILER1
1863
1864
1865  // We have received a description of where all the java arg are located
1866  // on entry to the wrapper. We need to convert these args to where
1867  // the jni function will expect them. To figure out where they go
1868  // we convert the java signature to a C signature by inserting
1869  // the hidden arguments as arg[0] and possibly arg[1] (static method)
1870
1871  const int total_in_args = method->size_of_parameters();
1872  int total_c_args = total_in_args;
1873  int total_save_slots = 6 * VMRegImpl::slots_per_word;
1874  if (!is_critical_native) {
1875    total_c_args += 1;
1876    if (method->is_static()) {
1877      total_c_args++;
1878    }
1879  } else {
1880    for (int i = 0; i < total_in_args; i++) {
1881      if (in_sig_bt[i] == T_ARRAY) {
1882        // These have to be saved and restored across the safepoint
1883        total_c_args++;
1884      }
1885    }
1886  }
1887
1888  BasicType* out_sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_c_args);
1889  VMRegPair* out_regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_c_args);
1890  BasicType* in_elem_bt = NULL;
1891
1892  int argc = 0;
1893  if (!is_critical_native) {
1894    out_sig_bt[argc++] = T_ADDRESS;
1895    if (method->is_static()) {
1896      out_sig_bt[argc++] = T_OBJECT;
1897    }
1898
1899    for (int i = 0; i < total_in_args ; i++ ) {
1900      out_sig_bt[argc++] = in_sig_bt[i];
1901    }
1902  } else {
1903    Thread* THREAD = Thread::current();
1904    in_elem_bt = NEW_RESOURCE_ARRAY(BasicType, total_in_args);
1905    SignatureStream ss(method->signature());
1906    for (int i = 0; i < total_in_args ; i++ ) {
1907      if (in_sig_bt[i] == T_ARRAY) {
1908        // Arrays are passed as int, elem* pair
1909        out_sig_bt[argc++] = T_INT;
1910        out_sig_bt[argc++] = T_ADDRESS;
1911        Symbol* atype = ss.as_symbol(CHECK_NULL);
1912        const char* at = atype->as_C_string();
1913        if (strlen(at) == 2) {
1914          assert(at[0] == '[', "must be");
1915          switch (at[1]) {
1916            case 'B': in_elem_bt[i]  = T_BYTE; break;
1917            case 'C': in_elem_bt[i]  = T_CHAR; break;
1918            case 'D': in_elem_bt[i]  = T_DOUBLE; break;
1919            case 'F': in_elem_bt[i]  = T_FLOAT; break;
1920            case 'I': in_elem_bt[i]  = T_INT; break;
1921            case 'J': in_elem_bt[i]  = T_LONG; break;
1922            case 'S': in_elem_bt[i]  = T_SHORT; break;
1923            case 'Z': in_elem_bt[i]  = T_BOOLEAN; break;
1924            default: ShouldNotReachHere();
1925          }
1926        }
1927      } else {
1928        out_sig_bt[argc++] = in_sig_bt[i];
1929        in_elem_bt[i] = T_VOID;
1930      }
1931      if (in_sig_bt[i] != T_VOID) {
1932        assert(in_sig_bt[i] == ss.type(), "must match");
1933        ss.next();
1934      }
1935    }
1936  }
1937
1938  // Now figure out where the args must be stored and how much stack space
1939  // they require (neglecting out_preserve_stack_slots but space for storing
1940  // the 1st six register arguments). It's weird see int_stk_helper.
1941  //
1942  int out_arg_slots;
1943  out_arg_slots = c_calling_convention(out_sig_bt, out_regs, NULL, total_c_args);
1944
1945  if (is_critical_native) {
1946    // Critical natives may have to call out so they need a save area
1947    // for register arguments.
1948    int double_slots = 0;
1949    int single_slots = 0;
1950    for ( int i = 0; i < total_in_args; i++) {
1951      if (in_regs[i].first()->is_Register()) {
1952        const Register reg = in_regs[i].first()->as_Register();
1953        switch (in_sig_bt[i]) {
1954          case T_ARRAY:
1955          case T_BOOLEAN:
1956          case T_BYTE:
1957          case T_SHORT:
1958          case T_CHAR:
1959          case T_INT:  assert(reg->is_in(), "don't need to save these"); break;
1960          case T_LONG: if (reg->is_global()) double_slots++; break;
1961          default:  ShouldNotReachHere();
1962        }
1963      } else if (in_regs[i].first()->is_FloatRegister()) {
1964        switch (in_sig_bt[i]) {
1965          case T_FLOAT:  single_slots++; break;
1966          case T_DOUBLE: double_slots++; break;
1967          default:  ShouldNotReachHere();
1968        }
1969      }
1970    }
1971    total_save_slots = double_slots * 2 + single_slots;
1972  }
1973
1974  // Compute framesize for the wrapper.  We need to handlize all oops in
1975  // registers. We must create space for them here that is disjoint from
1976  // the windowed save area because we have no control over when we might
1977  // flush the window again and overwrite values that gc has since modified.
1978  // (The live window race)
1979  //
1980  // We always just allocate 6 word for storing down these object. This allow
1981  // us to simply record the base and use the Ireg number to decide which
1982  // slot to use. (Note that the reg number is the inbound number not the
1983  // outbound number).
1984  // We must shuffle args to match the native convention, and include var-args space.
1985
1986  // Calculate the total number of stack slots we will need.
1987
1988  // First count the abi requirement plus all of the outgoing args
1989  int stack_slots = SharedRuntime::out_preserve_stack_slots() + out_arg_slots;
1990
1991  // Now the space for the inbound oop handle area
1992
1993  int oop_handle_offset = align_up(stack_slots, 2);
1994  stack_slots += total_save_slots;
1995
1996  // Now any space we need for handlizing a klass if static method
1997
1998  int klass_slot_offset = 0;
1999  int klass_offset = -1;
2000  int lock_slot_offset = 0;
2001  bool is_static = false;
2002
2003  if (method->is_static()) {
2004    klass_slot_offset = stack_slots;
2005    stack_slots += VMRegImpl::slots_per_word;
2006    klass_offset = klass_slot_offset * VMRegImpl::stack_slot_size;
2007    is_static = true;
2008  }
2009
2010  // Plus a lock if needed
2011
2012  if (method->is_synchronized()) {
2013    lock_slot_offset = stack_slots;
2014    stack_slots += VMRegImpl::slots_per_word;
2015  }
2016
2017  // Now a place to save return value or as a temporary for any gpr -> fpr moves
2018  stack_slots += 2;
2019
2020  // Ok The space we have allocated will look like:
2021  //
2022  //
2023  // FP-> |                     |
2024  //      |---------------------|
2025  //      | 2 slots for moves   |
2026  //      |---------------------|
2027  //      | lock box (if sync)  |
2028  //      |---------------------| <- lock_slot_offset
2029  //      | klass (if static)   |
2030  //      |---------------------| <- klass_slot_offset
2031  //      | oopHandle area      |
2032  //      |---------------------| <- oop_handle_offset
2033  //      | outbound memory     |
2034  //      | based arguments     |
2035  //      |                     |
2036  //      |---------------------|
2037  //      | vararg area         |
2038  //      |---------------------|
2039  //      |                     |
2040  // SP-> | out_preserved_slots |
2041  //
2042  //
2043
2044
2045  // Now compute actual number of stack words we need rounding to make
2046  // stack properly aligned.
2047  stack_slots = align_up(stack_slots, 2 * VMRegImpl::slots_per_word);
2048
2049  int stack_size = stack_slots * VMRegImpl::stack_slot_size;
2050
2051  // Generate stack overflow check before creating frame
2052  __ generate_stack_overflow_check(stack_size);
2053
2054  // Generate a new frame for the wrapper.
2055  __ save(SP, -stack_size, SP);
2056
2057  int frame_complete = ((intptr_t)__ pc()) - start;
2058
2059  __ verify_thread();
2060
2061  if (is_critical_native) {
2062    check_needs_gc_for_critical_native(masm, stack_slots,  total_in_args,
2063                                       oop_handle_offset, oop_maps, in_regs, in_sig_bt);
2064  }
2065
2066  //
2067  // We immediately shuffle the arguments so that any vm call we have to
2068  // make from here on out (sync slow path, jvmti, etc.) we will have
2069  // captured the oops from our caller and have a valid oopMap for
2070  // them.
2071
2072  // -----------------
2073  // The Grand Shuffle
2074  //
2075  // Natives require 1 or 2 extra arguments over the normal ones: the JNIEnv*
2076  // (derived from JavaThread* which is in L7_thread_cache) and, if static,
2077  // the class mirror instead of a receiver.  This pretty much guarantees that
2078  // register layout will not match.  We ignore these extra arguments during
2079  // the shuffle. The shuffle is described by the two calling convention
2080  // vectors we have in our possession. We simply walk the java vector to
2081  // get the source locations and the c vector to get the destinations.
2082  // Because we have a new window and the argument registers are completely
2083  // disjoint ( I0 -> O1, I1 -> O2, ...) we have nothing to worry about
2084  // here.
2085
2086  // This is a trick. We double the stack slots so we can claim
2087  // the oops in the caller's frame. Since we are sure to have
2088  // more args than the caller doubling is enough to make
2089  // sure we can capture all the incoming oop args from the
2090  // caller.
2091  //
2092  OopMap* map = new OopMap(stack_slots * 2, 0 /* arg_slots*/);
2093  // Record sp-based slot for receiver on stack for non-static methods
2094  int receiver_offset = -1;
2095
2096  // We move the arguments backward because the floating point registers
2097  // destination will always be to a register with a greater or equal register
2098  // number or the stack.
2099
2100#ifdef ASSERT
2101  bool reg_destroyed[RegisterImpl::number_of_registers];
2102  bool freg_destroyed[FloatRegisterImpl::number_of_registers];
2103  for ( int r = 0 ; r < RegisterImpl::number_of_registers ; r++ ) {
2104    reg_destroyed[r] = false;
2105  }
2106  for ( int f = 0 ; f < FloatRegisterImpl::number_of_registers ; f++ ) {
2107    freg_destroyed[f] = false;
2108  }
2109
2110#endif /* ASSERT */
2111
2112  for ( int i = total_in_args - 1, c_arg = total_c_args - 1; i >= 0 ; i--, c_arg-- ) {
2113
2114#ifdef ASSERT
2115    if (in_regs[i].first()->is_Register()) {
2116      assert(!reg_destroyed[in_regs[i].first()->as_Register()->encoding()], "ack!");
2117    } else if (in_regs[i].first()->is_FloatRegister()) {
2118      assert(!freg_destroyed[in_regs[i].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)], "ack!");
2119    }
2120    if (out_regs[c_arg].first()->is_Register()) {
2121      reg_destroyed[out_regs[c_arg].first()->as_Register()->encoding()] = true;
2122    } else if (out_regs[c_arg].first()->is_FloatRegister()) {
2123      freg_destroyed[out_regs[c_arg].first()->as_FloatRegister()->encoding(FloatRegisterImpl::S)] = true;
2124    }
2125#endif /* ASSERT */
2126
2127    switch (in_sig_bt[i]) {
2128      case T_ARRAY:
2129        if (is_critical_native) {
2130          unpack_array_argument(masm, in_regs[i], in_elem_bt[i], out_regs[c_arg], out_regs[c_arg - 1]);
2131          c_arg--;
2132          break;
2133        }
2134      case T_OBJECT:
2135        assert(!is_critical_native, "no oop arguments");
2136        object_move(masm, map, oop_handle_offset, stack_slots, in_regs[i], out_regs[c_arg],
2137                    ((i == 0) && (!is_static)),
2138                    &receiver_offset);
2139        break;
2140      case T_VOID:
2141        break;
2142
2143      case T_FLOAT:
2144        float_move(masm, in_regs[i], out_regs[c_arg]);
2145        break;
2146
2147      case T_DOUBLE:
2148        assert( i + 1 < total_in_args &&
2149                in_sig_bt[i + 1] == T_VOID &&
2150                out_sig_bt[c_arg+1] == T_VOID, "bad arg list");
2151        double_move(masm, in_regs[i], out_regs[c_arg]);
2152        break;
2153
2154      case T_LONG :
2155        long_move(masm, in_regs[i], out_regs[c_arg]);
2156        break;
2157
2158      case T_ADDRESS: assert(false, "found T_ADDRESS in java args");
2159
2160      default:
2161        move32_64(masm, in_regs[i], out_regs[c_arg]);
2162    }
2163  }
2164
2165  // Pre-load a static method's oop into O1.  Used both by locking code and
2166  // the normal JNI call code.
2167  if (method->is_static() && !is_critical_native) {
2168    __ set_oop_constant(JNIHandles::make_local(method->method_holder()->java_mirror()), O1);
2169
2170    // Now handlize the static class mirror in O1.  It's known not-null.
2171    __ st_ptr(O1, SP, klass_offset + STACK_BIAS);
2172    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
2173    __ add(SP, klass_offset + STACK_BIAS, O1);
2174  }
2175
2176
2177  const Register L6_handle = L6;
2178
2179  if (method->is_synchronized()) {
2180    assert(!is_critical_native, "unhandled");
2181    __ mov(O1, L6_handle);
2182  }
2183
2184  // We have all of the arguments setup at this point. We MUST NOT touch any Oregs
2185  // except O6/O7. So if we must call out we must push a new frame. We immediately
2186  // push a new frame and flush the windows.
2187  intptr_t thepc = (intptr_t) __ pc();
2188  {
2189    address here = __ pc();
2190    // Call the next instruction
2191    __ call(here + 8, relocInfo::none);
2192    __ delayed()->nop();
2193  }
2194
2195  // We use the same pc/oopMap repeatedly when we call out
2196  oop_maps->add_gc_map(thepc - start, map);
2197
2198  // O7 now has the pc loaded that we will use when we finally call to native.
2199
2200  // Save thread in L7; it crosses a bunch of VM calls below
2201  // Don't use save_thread because it smashes G2 and we merely
2202  // want to save a copy
2203  __ mov(G2_thread, L7_thread_cache);
2204
2205
2206  // If we create an inner frame once is plenty
2207  // when we create it we must also save G2_thread
2208  bool inner_frame_created = false;
2209
2210  // dtrace method entry support
2211  {
2212    SkipIfEqual skip_if(
2213      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2214    // create inner frame
2215    __ save_frame(0);
2216    __ mov(G2_thread, L7_thread_cache);
2217    __ set_metadata_constant(method(), O1);
2218    __ call_VM_leaf(L7_thread_cache,
2219         CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2220         G2_thread, O1);
2221    __ restore();
2222  }
2223
2224  // RedefineClasses() tracing support for obsolete method entry
2225  if (log_is_enabled(Trace, redefine, class, obsolete)) {
2226    // create inner frame
2227    __ save_frame(0);
2228    __ mov(G2_thread, L7_thread_cache);
2229    __ set_metadata_constant(method(), O1);
2230    __ call_VM_leaf(L7_thread_cache,
2231         CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2232         G2_thread, O1);
2233    __ restore();
2234  }
2235
2236  // We are in the jni frame unless saved_frame is true in which case
2237  // we are in one frame deeper (the "inner" frame). If we are in the
2238  // "inner" frames the args are in the Iregs and if the jni frame then
2239  // they are in the Oregs.
2240  // If we ever need to go to the VM (for locking, jvmti) then
2241  // we will always be in the "inner" frame.
2242
2243  // Lock a synchronized method
2244  int lock_offset = -1;         // Set if locked
2245  if (method->is_synchronized()) {
2246    Register Roop = O1;
2247    const Register L3_box = L3;
2248
2249    create_inner_frame(masm, &inner_frame_created);
2250
2251    __ ld_ptr(I1, 0, O1);
2252    Label done;
2253
2254    lock_offset = (lock_slot_offset * VMRegImpl::stack_slot_size);
2255    __ add(FP, lock_offset+STACK_BIAS, L3_box);
2256#ifdef ASSERT
2257    if (UseBiasedLocking) {
2258      // making the box point to itself will make it clear it went unused
2259      // but also be obviously invalid
2260      __ st_ptr(L3_box, L3_box, 0);
2261    }
2262#endif // ASSERT
2263    //
2264    // Compiler_lock_object (Roop, Rmark, Rbox, Rscratch) -- kills Rmark, Rbox, Rscratch
2265    //
2266    __ compiler_lock_object(Roop, L1,    L3_box, L2);
2267    __ br(Assembler::equal, false, Assembler::pt, done);
2268    __ delayed() -> add(FP, lock_offset+STACK_BIAS, L3_box);
2269
2270
2271    // None of the above fast optimizations worked so we have to get into the
2272    // slow case of monitor enter.  Inline a special case of call_VM that
2273    // disallows any pending_exception.
2274    __ mov(Roop, O0);            // Need oop in O0
2275    __ mov(L3_box, O1);
2276
2277    // Record last_Java_sp, in case the VM code releases the JVM lock.
2278
2279    __ set_last_Java_frame(FP, I7);
2280
2281    // do the call
2282    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), relocInfo::runtime_call_type);
2283    __ delayed()->mov(L7_thread_cache, O2);
2284
2285    __ restore_thread(L7_thread_cache); // restore G2_thread
2286    __ reset_last_Java_frame();
2287
2288#ifdef ASSERT
2289    { Label L;
2290    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2291    __ br_null_short(O0, Assembler::pt, L);
2292    __ stop("no pending exception allowed on exit from IR::monitorenter");
2293    __ bind(L);
2294    }
2295#endif
2296    __ bind(done);
2297  }
2298
2299
2300  // Finally just about ready to make the JNI call
2301
2302  __ flushw();
2303  if (inner_frame_created) {
2304    __ restore();
2305  } else {
2306    // Store only what we need from this frame
2307    // QQQ I think that non-v9 (like we care) we don't need these saves
2308    // either as the flush traps and the current window goes too.
2309    __ st_ptr(FP, SP, FP->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2310    __ st_ptr(I7, SP, I7->sp_offset_in_saved_window()*wordSize + STACK_BIAS);
2311  }
2312
2313  // get JNIEnv* which is first argument to native
2314  if (!is_critical_native) {
2315    __ add(G2_thread, in_bytes(JavaThread::jni_environment_offset()), O0);
2316  }
2317
2318  // Use that pc we placed in O7 a while back as the current frame anchor
2319  __ set_last_Java_frame(SP, O7);
2320
2321  // We flushed the windows ages ago now mark them as flushed before transitioning.
2322  __ set(JavaFrameAnchor::flushed, G3_scratch);
2323  __ st(G3_scratch, G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
2324
2325  // Transition from _thread_in_Java to _thread_in_native.
2326  __ set(_thread_in_native, G3_scratch);
2327
2328  AddressLiteral dest(native_func);
2329  __ relocate(relocInfo::runtime_call_type);
2330  __ jumpl_to(dest, O7, O7);
2331  __ delayed()->st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2332
2333  __ restore_thread(L7_thread_cache); // restore G2_thread
2334
2335  // Unpack native results.  For int-types, we do any needed sign-extension
2336  // and move things into I0.  The return value there will survive any VM
2337  // calls for blocking or unlocking.  An FP or OOP result (handle) is done
2338  // specially in the slow-path code.
2339  switch (ret_type) {
2340  case T_VOID:    break;        // Nothing to do!
2341  case T_FLOAT:   break;        // Got it where we want it (unless slow-path)
2342  case T_DOUBLE:  break;        // Got it where we want it (unless slow-path)
2343  // In 64 bits build result is in O0, in O0, O1 in 32bit build
2344  case T_LONG:
2345                  // Fall thru
2346  case T_OBJECT:                // Really a handle
2347  case T_ARRAY:
2348  case T_INT:
2349                  __ mov(O0, I0);
2350                  break;
2351  case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, I0); break; // !0 => true; 0 => false
2352  case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, I0);   break;
2353  case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, I0);   break; // cannot use and3, 0xFFFF too big as immediate value!
2354  case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, I0);   break;
2355    break;                      // Cannot de-handlize until after reclaiming jvm_lock
2356  default:
2357    ShouldNotReachHere();
2358  }
2359
2360  Label after_transition;
2361  // must we block?
2362
2363  // Block, if necessary, before resuming in _thread_in_Java state.
2364  // In order for GC to work, don't clear the last_Java_sp until after blocking.
2365  { Label no_block;
2366    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
2367
2368    // Switch thread to "native transition" state before reading the synchronization state.
2369    // This additional state is necessary because reading and testing the synchronization
2370    // state is not atomic w.r.t. GC, as this scenario demonstrates:
2371    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
2372    //     VM thread changes sync state to synchronizing and suspends threads for GC.
2373    //     Thread A is resumed to finish this native method, but doesn't block here since it
2374    //     didn't see any synchronization is progress, and escapes.
2375    __ set(_thread_in_native_trans, G3_scratch);
2376    __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2377    if(os::is_MP()) {
2378      if (UseMembar) {
2379        // Force this write out before the read below
2380        __ membar(Assembler::StoreLoad);
2381      } else {
2382        // Write serialization page so VM thread can do a pseudo remote membar.
2383        // We use the current thread pointer to calculate a thread specific
2384        // offset to write to within the page. This minimizes bus traffic
2385        // due to cache line collision.
2386        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
2387      }
2388    }
2389    __ load_contents(sync_state, G3_scratch);
2390    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
2391
2392    Label L;
2393    Address suspend_state(G2_thread, JavaThread::suspend_flags_offset());
2394    __ br(Assembler::notEqual, false, Assembler::pn, L);
2395    __ delayed()->ld(suspend_state, G3_scratch);
2396    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
2397    __ bind(L);
2398
2399    // Block.  Save any potential method result value before the operation and
2400    // use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
2401    // lets us share the oopMap we used when we went native rather the create
2402    // a distinct one for this pc
2403    //
2404    save_native_result(masm, ret_type, stack_slots);
2405    if (!is_critical_native) {
2406      __ call_VM_leaf(L7_thread_cache,
2407                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
2408                      G2_thread);
2409    } else {
2410      __ call_VM_leaf(L7_thread_cache,
2411                      CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans_and_transition),
2412                      G2_thread);
2413    }
2414
2415    // Restore any method result value
2416    restore_native_result(masm, ret_type, stack_slots);
2417
2418    if (is_critical_native) {
2419      // The call above performed the transition to thread_in_Java so
2420      // skip the transition logic below.
2421      __ ba(after_transition);
2422      __ delayed()->nop();
2423    }
2424
2425    __ bind(no_block);
2426  }
2427
2428  // thread state is thread_in_native_trans. Any safepoint blocking has already
2429  // happened so we can now change state to _thread_in_Java.
2430  __ set(_thread_in_Java, G3_scratch);
2431  __ st(G3_scratch, G2_thread, JavaThread::thread_state_offset());
2432  __ bind(after_transition);
2433
2434  Label no_reguard;
2435  __ ld(G2_thread, JavaThread::stack_guard_state_offset(), G3_scratch);
2436  __ cmp_and_br_short(G3_scratch, JavaThread::stack_guard_yellow_reserved_disabled, Assembler::notEqual, Assembler::pt, no_reguard);
2437
2438    save_native_result(masm, ret_type, stack_slots);
2439  __ call(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
2440  __ delayed()->nop();
2441
2442  __ restore_thread(L7_thread_cache); // restore G2_thread
2443    restore_native_result(masm, ret_type, stack_slots);
2444
2445  __ bind(no_reguard);
2446
2447  // Handle possible exception (will unlock if necessary)
2448
2449  // native result if any is live in freg or I0 (and I1 if long and 32bit vm)
2450
2451  // Unlock
2452  if (method->is_synchronized()) {
2453    Label done;
2454    Register I2_ex_oop = I2;
2455    const Register L3_box = L3;
2456    // Get locked oop from the handle we passed to jni
2457    __ ld_ptr(L6_handle, 0, L4);
2458    __ add(SP, lock_offset+STACK_BIAS, L3_box);
2459    // Must save pending exception around the slow-path VM call.  Since it's a
2460    // leaf call, the pending exception (if any) can be kept in a register.
2461    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), I2_ex_oop);
2462    // Now unlock
2463    //                       (Roop, Rmark, Rbox,   Rscratch)
2464    __ compiler_unlock_object(L4,   L1,    L3_box, L2);
2465    __ br(Assembler::equal, false, Assembler::pt, done);
2466    __ delayed()-> add(SP, lock_offset+STACK_BIAS, L3_box);
2467
2468    // save and restore any potential method result value around the unlocking
2469    // operation.  Will save in I0 (or stack for FP returns).
2470    save_native_result(masm, ret_type, stack_slots);
2471
2472    // Must clear pending-exception before re-entering the VM.  Since this is
2473    // a leaf call, pending-exception-oop can be safely kept in a register.
2474    __ st_ptr(G0, G2_thread, in_bytes(Thread::pending_exception_offset()));
2475
2476    // slow case of monitor enter.  Inline a special case of call_VM that
2477    // disallows any pending_exception.
2478    __ mov(L3_box, O1);
2479
2480    // Pass in current thread pointer
2481    __ mov(G2_thread, O2);
2482
2483    __ call(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), relocInfo::runtime_call_type);
2484    __ delayed()->mov(L4, O0);              // Need oop in O0
2485
2486    __ restore_thread(L7_thread_cache); // restore G2_thread
2487
2488#ifdef ASSERT
2489    { Label L;
2490    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O0);
2491    __ br_null_short(O0, Assembler::pt, L);
2492    __ stop("no pending exception allowed on exit from IR::monitorexit");
2493    __ bind(L);
2494    }
2495#endif
2496    restore_native_result(masm, ret_type, stack_slots);
2497    // check_forward_pending_exception jump to forward_exception if any pending
2498    // exception is set.  The forward_exception routine expects to see the
2499    // exception in pending_exception and not in a register.  Kind of clumsy,
2500    // since all folks who branch to forward_exception must have tested
2501    // pending_exception first and hence have it in a register already.
2502    __ st_ptr(I2_ex_oop, G2_thread, in_bytes(Thread::pending_exception_offset()));
2503    __ bind(done);
2504  }
2505
2506  // Tell dtrace about this method exit
2507  {
2508    SkipIfEqual skip_if(
2509      masm, G3_scratch, &DTraceMethodProbes, Assembler::zero);
2510    save_native_result(masm, ret_type, stack_slots);
2511    __ set_metadata_constant(method(), O1);
2512    __ call_VM_leaf(L7_thread_cache,
2513       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2514       G2_thread, O1);
2515    restore_native_result(masm, ret_type, stack_slots);
2516  }
2517
2518  // Clear "last Java frame" SP and PC.
2519  __ verify_thread(); // G2_thread must be correct
2520  __ reset_last_Java_frame();
2521
2522  // Unbox oop result, e.g. JNIHandles::resolve value in I0.
2523  if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
2524    Label done, not_weak;
2525    __ br_null(I0, false, Assembler::pn, done); // Use NULL as-is.
2526    __ delayed()->andcc(I0, JNIHandles::weak_tag_mask, G0); // Test for jweak
2527    __ brx(Assembler::zero, true, Assembler::pt, not_weak);
2528    __ delayed()->ld_ptr(I0, 0, I0); // Maybe resolve (untagged) jobject.
2529    // Resolve jweak.
2530    __ ld_ptr(I0, -JNIHandles::weak_tag_value, I0);
2531#if INCLUDE_ALL_GCS
2532    if (UseG1GC) {
2533      // Copy to O0 because macro doesn't allow pre_val in input reg.
2534      __ mov(I0, O0);
2535      __ g1_write_barrier_pre(noreg /* obj */,
2536                              noreg /* index */,
2537                              0 /* offset */,
2538                              O0 /* pre_val */,
2539                              G3_scratch /* tmp */,
2540                              true /* preserve_o_regs */);
2541    }
2542#endif // INCLUDE_ALL_GCS
2543    __ bind(not_weak);
2544    __ verify_oop(I0);
2545    __ bind(done);
2546  }
2547
2548  if (CheckJNICalls) {
2549    // clear_pending_jni_exception_check
2550    __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
2551  }
2552
2553  if (!is_critical_native) {
2554    // reset handle block
2555    __ ld_ptr(G2_thread, in_bytes(JavaThread::active_handles_offset()), L5);
2556    __ st(G0, L5, JNIHandleBlock::top_offset_in_bytes());
2557
2558    __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), G3_scratch);
2559    check_forward_pending_exception(masm, G3_scratch);
2560  }
2561
2562
2563  // Return
2564
2565  __ ret();
2566  __ delayed()->restore();
2567
2568  __ flush();
2569
2570  nmethod *nm = nmethod::new_native_nmethod(method,
2571                                            compile_id,
2572                                            masm->code(),
2573                                            vep_offset,
2574                                            frame_complete,
2575                                            stack_slots / VMRegImpl::slots_per_word,
2576                                            (is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
2577                                            in_ByteSize(lock_offset),
2578                                            oop_maps);
2579
2580  if (is_critical_native) {
2581    nm->set_lazy_critical_native(true);
2582  }
2583  return nm;
2584
2585}
2586
2587// this function returns the adjust size (in number of words) to a c2i adapter
2588// activation for use during deoptimization
2589int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
2590  assert(callee_locals >= callee_parameters,
2591          "test and remove; got more parms than locals");
2592  if (callee_locals < callee_parameters)
2593    return 0;                   // No adjustment for negative locals
2594  int diff = (callee_locals - callee_parameters) * Interpreter::stackElementWords;
2595  return align_up(diff, WordsPerLong);
2596}
2597
2598// "Top of Stack" slots that may be unused by the calling convention but must
2599// otherwise be preserved.
2600// On Intel these are not necessary and the value can be zero.
2601// On Sparc this describes the words reserved for storing a register window
2602// when an interrupt occurs.
2603uint SharedRuntime::out_preserve_stack_slots() {
2604  return frame::register_save_words * VMRegImpl::slots_per_word;
2605}
2606
2607static void gen_new_frame(MacroAssembler* masm, bool deopt) {
2608//
2609// Common out the new frame generation for deopt and uncommon trap
2610//
2611  Register        G3pcs              = G3_scratch; // Array of new pcs (input)
2612  Register        Oreturn0           = O0;
2613  Register        Oreturn1           = O1;
2614  Register        O2UnrollBlock      = O2;
2615  Register        O3array            = O3;         // Array of frame sizes (input)
2616  Register        O4array_size       = O4;         // number of frames (input)
2617  Register        O7frame_size       = O7;         // number of frames (input)
2618
2619  __ ld_ptr(O3array, 0, O7frame_size);
2620  __ sub(G0, O7frame_size, O7frame_size);
2621  __ save(SP, O7frame_size, SP);
2622  __ ld_ptr(G3pcs, 0, I7);                      // load frame's new pc
2623
2624  #ifdef ASSERT
2625  // make sure that the frames are aligned properly
2626  #endif
2627
2628  // Deopt needs to pass some extra live values from frame to frame
2629
2630  if (deopt) {
2631    __ mov(Oreturn0->after_save(), Oreturn0);
2632    __ mov(Oreturn1->after_save(), Oreturn1);
2633  }
2634
2635  __ mov(O4array_size->after_save(), O4array_size);
2636  __ sub(O4array_size, 1, O4array_size);
2637  __ mov(O3array->after_save(), O3array);
2638  __ mov(O2UnrollBlock->after_save(), O2UnrollBlock);
2639  __ add(G3pcs, wordSize, G3pcs);               // point to next pc value
2640
2641  #ifdef ASSERT
2642  // trash registers to show a clear pattern in backtraces
2643  __ set(0xDEAD0000, I0);
2644  __ add(I0,  2, I1);
2645  __ add(I0,  4, I2);
2646  __ add(I0,  6, I3);
2647  __ add(I0,  8, I4);
2648  // Don't touch I5 could have valuable savedSP
2649  __ set(0xDEADBEEF, L0);
2650  __ mov(L0, L1);
2651  __ mov(L0, L2);
2652  __ mov(L0, L3);
2653  __ mov(L0, L4);
2654  __ mov(L0, L5);
2655
2656  // trash the return value as there is nothing to return yet
2657  __ set(0xDEAD0001, O7);
2658  #endif
2659
2660  __ mov(SP, O5_savedSP);
2661}
2662
2663
2664static void make_new_frames(MacroAssembler* masm, bool deopt) {
2665  //
2666  // loop through the UnrollBlock info and create new frames
2667  //
2668  Register        G3pcs              = G3_scratch;
2669  Register        Oreturn0           = O0;
2670  Register        Oreturn1           = O1;
2671  Register        O2UnrollBlock      = O2;
2672  Register        O3array            = O3;
2673  Register        O4array_size       = O4;
2674  Label           loop;
2675
2676#ifdef ASSERT
2677  // Compilers generate code that bang the stack by as much as the
2678  // interpreter would need. So this stack banging should never
2679  // trigger a fault. Verify that it does not on non product builds.
2680  if (UseStackBanging) {
2681    // Get total frame size for interpreted frames
2682    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::total_frame_sizes_offset_in_bytes(), O4);
2683    __ bang_stack_size(O4, O3, G3_scratch);
2684  }
2685#endif
2686
2687  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes(), O4array_size);
2688  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes(), G3pcs);
2689  __ ld_ptr(O2UnrollBlock, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes(), O3array);
2690
2691  // Adjust old interpreter frame to make space for new frame's extra java locals
2692  //
2693  // We capture the original sp for the transition frame only because it is needed in
2694  // order to properly calculate interpreter_sp_adjustment. Even though in real life
2695  // every interpreter frame captures a savedSP it is only needed at the transition
2696  // (fortunately). If we had to have it correct everywhere then we would need to
2697  // be told the sp_adjustment for each frame we create. If the frame size array
2698  // were to have twice the frame count entries then we could have pairs [sp_adjustment, frame_size]
2699  // for each frame we create and keep up the illusion every where.
2700  //
2701
2702  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes(), O7);
2703  __ mov(SP, O5_savedSP);       // remember initial sender's original sp before adjustment
2704  __ sub(SP, O7, SP);
2705
2706#ifdef ASSERT
2707  // make sure that there is at least one entry in the array
2708  __ tst(O4array_size);
2709  __ breakpoint_trap(Assembler::zero, Assembler::icc);
2710#endif
2711
2712  // Now push the new interpreter frames
2713  __ bind(loop);
2714
2715  // allocate a new frame, filling the registers
2716
2717  gen_new_frame(masm, deopt);        // allocate an interpreter frame
2718
2719  __ cmp_zero_and_br(Assembler::notZero, O4array_size, loop);
2720  __ delayed()->add(O3array, wordSize, O3array);
2721  __ ld_ptr(G3pcs, 0, O7);                      // load final frame new pc
2722
2723}
2724
2725//------------------------------generate_deopt_blob----------------------------
2726// Ought to generate an ideal graph & compile, but here's some SPARC ASM
2727// instead.
2728void SharedRuntime::generate_deopt_blob() {
2729  // allocate space for the code
2730  ResourceMark rm;
2731  // setup code generation tools
2732  int pad = VerifyThread ? 512 : 0;// Extra slop space for more verify code
2733#ifdef ASSERT
2734  if (UseStackBanging) {
2735    pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
2736  }
2737#endif
2738#if INCLUDE_JVMCI
2739  if (EnableJVMCI) {
2740    pad += 1000; // Increase the buffer size when compiling for JVMCI
2741  }
2742#endif
2743  CodeBuffer buffer("deopt_blob", 2100+pad, 512);
2744  MacroAssembler* masm               = new MacroAssembler(&buffer);
2745  FloatRegister   Freturn0           = F0;
2746  Register        Greturn1           = G1;
2747  Register        Oreturn0           = O0;
2748  Register        Oreturn1           = O1;
2749  Register        O2UnrollBlock      = O2;
2750  Register        L0deopt_mode       = L0;
2751  Register        G4deopt_mode       = G4_scratch;
2752  int             frame_size_words;
2753  Address         saved_Freturn0_addr(FP, -sizeof(double) + STACK_BIAS);
2754  Label           cont;
2755
2756  OopMapSet *oop_maps = new OopMapSet();
2757
2758  //
2759  // This is the entry point for code which is returning to a de-optimized
2760  // frame.
2761  // The steps taken by this frame are as follows:
2762  //   - push a dummy "register_save" and save the return values (O0, O1, F0/F1, G1)
2763  //     and all potentially live registers (at a pollpoint many registers can be live).
2764  //
2765  //   - call the C routine: Deoptimization::fetch_unroll_info (this function
2766  //     returns information about the number and size of interpreter frames
2767  //     which are equivalent to the frame which is being deoptimized)
2768  //   - deallocate the unpack frame, restoring only results values. Other
2769  //     volatile registers will now be captured in the vframeArray as needed.
2770  //   - deallocate the deoptimization frame
2771  //   - in a loop using the information returned in the previous step
2772  //     push new interpreter frames (take care to propagate the return
2773  //     values through each new frame pushed)
2774  //   - create a dummy "unpack_frame" and save the return values (O0, O1, F0)
2775  //   - call the C routine: Deoptimization::unpack_frames (this function
2776  //     lays out values on the interpreter frame which was just created)
2777  //   - deallocate the dummy unpack_frame
2778  //   - ensure that all the return values are correctly set and then do
2779  //     a return to the interpreter entry point
2780  //
2781  // Refer to the following methods for more information:
2782  //   - Deoptimization::fetch_unroll_info
2783  //   - Deoptimization::unpack_frames
2784
2785  OopMap* map = NULL;
2786
2787  int start = __ offset();
2788
2789  // restore G2, the trampoline destroyed it
2790  __ get_thread();
2791
2792  // On entry we have been called by the deoptimized nmethod with a call that
2793  // replaced the original call (or safepoint polling location) so the deoptimizing
2794  // pc is now in O7. Return values are still in the expected places
2795
2796  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2797  __ ba(cont);
2798  __ delayed()->mov(Deoptimization::Unpack_deopt, L0deopt_mode);
2799
2800
2801#if INCLUDE_JVMCI
2802  Label after_fetch_unroll_info_call;
2803  int implicit_exception_uncommon_trap_offset = 0;
2804  int uncommon_trap_offset = 0;
2805
2806  if (EnableJVMCI) {
2807    masm->block_comment("BEGIN implicit_exception_uncommon_trap");
2808    implicit_exception_uncommon_trap_offset = __ offset() - start;
2809
2810    __ ld_ptr(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()), O7);
2811    __ st_ptr(G0, Address(G2_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
2812    __ add(O7, -8, O7);
2813
2814    uncommon_trap_offset = __ offset() - start;
2815
2816    // Save everything in sight.
2817    (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2818    __ set_last_Java_frame(SP, NULL);
2819
2820    __ ld(G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()), O1);
2821    __ sub(G0, 1, L1);
2822    __ st(L1, G2_thread, in_bytes(JavaThread::pending_deoptimization_offset()));
2823
2824    __ mov((int32_t)Deoptimization::Unpack_reexecute, L0deopt_mode);
2825    __ mov(G2_thread, O0);
2826    __ mov(L0deopt_mode, O2);
2827    __ call(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap));
2828    __ delayed()->nop();
2829    oop_maps->add_gc_map( __ offset()-start, map->deep_copy());
2830    __ get_thread();
2831    __ add(O7, 8, O7);
2832    __ reset_last_Java_frame();
2833
2834    __ ba(after_fetch_unroll_info_call);
2835    __ delayed()->nop(); // Delay slot
2836    masm->block_comment("END implicit_exception_uncommon_trap");
2837  } // EnableJVMCI
2838#endif // INCLUDE_JVMCI
2839
2840  int exception_offset = __ offset() - start;
2841
2842  // restore G2, the trampoline destroyed it
2843  __ get_thread();
2844
2845  // On entry we have been jumped to by the exception handler (or exception_blob
2846  // for server).  O0 contains the exception oop and O7 contains the original
2847  // exception pc.  So if we push a frame here it will look to the
2848  // stack walking code (fetch_unroll_info) just like a normal call so
2849  // state will be extracted normally.
2850
2851  // save exception oop in JavaThread and fall through into the
2852  // exception_in_tls case since they are handled in same way except
2853  // for where the pending exception is kept.
2854  __ st_ptr(Oexception, G2_thread, JavaThread::exception_oop_offset());
2855
2856  //
2857  // Vanilla deoptimization with an exception pending in exception_oop
2858  //
2859  int exception_in_tls_offset = __ offset() - start;
2860
2861  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
2862  // Opens a new stack frame
2863  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2864
2865  // Restore G2_thread
2866  __ get_thread();
2867
2868#ifdef ASSERT
2869  {
2870    // verify that there is really an exception oop in exception_oop
2871    Label has_exception;
2872    __ ld_ptr(G2_thread, JavaThread::exception_oop_offset(), Oexception);
2873    __ br_notnull_short(Oexception, Assembler::pt, has_exception);
2874    __ stop("no exception in thread");
2875    __ bind(has_exception);
2876
2877    // verify that there is no pending exception
2878    Label no_pending_exception;
2879    Address exception_addr(G2_thread, Thread::pending_exception_offset());
2880    __ ld_ptr(exception_addr, Oexception);
2881    __ br_null_short(Oexception, Assembler::pt, no_pending_exception);
2882    __ stop("must not have pending exception here");
2883    __ bind(no_pending_exception);
2884  }
2885#endif
2886
2887  __ ba(cont);
2888  __ delayed()->mov(Deoptimization::Unpack_exception, L0deopt_mode);;
2889
2890  //
2891  // Reexecute entry, similar to c2 uncommon trap
2892  //
2893  int reexecute_offset = __ offset() - start;
2894#if INCLUDE_JVMCI && !defined(COMPILER1)
2895  if (EnableJVMCI && UseJVMCICompiler) {
2896    // JVMCI does not use this kind of deoptimization
2897    __ should_not_reach_here();
2898  }
2899#endif
2900  // No need to update oop_map  as each call to save_live_registers will produce identical oopmap
2901  (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
2902
2903  __ mov(Deoptimization::Unpack_reexecute, L0deopt_mode);
2904
2905  __ bind(cont);
2906
2907  __ set_last_Java_frame(SP, noreg);
2908
2909  // do the call by hand so we can get the oopmap
2910
2911  __ mov(G2_thread, L7_thread_cache);
2912  __ mov(L0deopt_mode, O1);
2913  __ call(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info), relocInfo::runtime_call_type);
2914  __ delayed()->mov(G2_thread, O0);
2915
2916  // Set an oopmap for the call site this describes all our saved volatile registers
2917
2918  oop_maps->add_gc_map( __ offset()-start, map);
2919
2920  __ mov(L7_thread_cache, G2_thread);
2921
2922  __ reset_last_Java_frame();
2923
2924#if INCLUDE_JVMCI
2925  if (EnableJVMCI) {
2926    __ bind(after_fetch_unroll_info_call);
2927  }
2928#endif
2929  // NOTE: we know that only O0/O1 will be reloaded by restore_result_registers
2930  // so this move will survive
2931
2932  __ mov(L0deopt_mode, G4deopt_mode);
2933
2934  __ mov(O0, O2UnrollBlock->after_save());
2935
2936  RegisterSaver::restore_result_registers(masm);
2937
2938  __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), G4deopt_mode);
2939  Label noException;
2940  __ cmp_and_br_short(G4deopt_mode, Deoptimization::Unpack_exception, Assembler::notEqual, Assembler::pt, noException);
2941
2942  // Move the pending exception from exception_oop to Oexception so
2943  // the pending exception will be picked up the interpreter.
2944  __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
2945  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
2946  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
2947  __ bind(noException);
2948
2949  // deallocate the deoptimization frame taking care to preserve the return values
2950  __ mov(Oreturn0,     Oreturn0->after_save());
2951  __ mov(Oreturn1,     Oreturn1->after_save());
2952  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
2953  __ restore();
2954
2955  // Allocate new interpreter frame(s) and possible c2i adapter frame
2956
2957  make_new_frames(masm, true);
2958
2959  // push a dummy "unpack_frame" taking care of float return values and
2960  // call Deoptimization::unpack_frames to have the unpacker layout
2961  // information in the interpreter frames just created and then return
2962  // to the interpreter entry point
2963  __ save(SP, -frame_size_words*wordSize, SP);
2964  __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
2965  // LP64 uses g4 in set_last_Java_frame
2966  __ mov(G4deopt_mode, O1);
2967  __ set_last_Java_frame(SP, G0);
2968  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O1);
2969  __ reset_last_Java_frame();
2970  __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
2971
2972  __ ret();
2973  __ delayed()->restore();
2974
2975  masm->flush();
2976  _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_words);
2977  _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
2978#if INCLUDE_JVMCI
2979  if (EnableJVMCI) {
2980    _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
2981    _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
2982  }
2983#endif
2984}
2985
2986#ifdef COMPILER2
2987
2988//------------------------------generate_uncommon_trap_blob--------------------
2989// Ought to generate an ideal graph & compile, but here's some SPARC ASM
2990// instead.
2991void SharedRuntime::generate_uncommon_trap_blob() {
2992  // allocate space for the code
2993  ResourceMark rm;
2994  // setup code generation tools
2995  int pad = VerifyThread ? 512 : 0;
2996#ifdef ASSERT
2997  if (UseStackBanging) {
2998    pad += (JavaThread::stack_shadow_zone_size() / os::vm_page_size())*16 + 32;
2999  }
3000#endif
3001  CodeBuffer buffer("uncommon_trap_blob", 2700+pad, 512);
3002  MacroAssembler* masm               = new MacroAssembler(&buffer);
3003  Register        O2UnrollBlock      = O2;
3004  Register        O2klass_index      = O2;
3005
3006  //
3007  // This is the entry point for all traps the compiler takes when it thinks
3008  // it cannot handle further execution of compilation code. The frame is
3009  // deoptimized in these cases and converted into interpreter frames for
3010  // execution
3011  // The steps taken by this frame are as follows:
3012  //   - push a fake "unpack_frame"
3013  //   - call the C routine Deoptimization::uncommon_trap (this function
3014  //     packs the current compiled frame into vframe arrays and returns
3015  //     information about the number and size of interpreter frames which
3016  //     are equivalent to the frame which is being deoptimized)
3017  //   - deallocate the "unpack_frame"
3018  //   - deallocate the deoptimization frame
3019  //   - in a loop using the information returned in the previous step
3020  //     push interpreter frames;
3021  //   - create a dummy "unpack_frame"
3022  //   - call the C routine: Deoptimization::unpack_frames (this function
3023  //     lays out values on the interpreter frame which was just created)
3024  //   - deallocate the dummy unpack_frame
3025  //   - return to the interpreter entry point
3026  //
3027  //  Refer to the following methods for more information:
3028  //   - Deoptimization::uncommon_trap
3029  //   - Deoptimization::unpack_frame
3030
3031  // the unloaded class index is in O0 (first parameter to this blob)
3032
3033  // push a dummy "unpack_frame"
3034  // and call Deoptimization::uncommon_trap to pack the compiled frame into
3035  // vframe array and return the UnrollBlock information
3036  __ save_frame(0);
3037  __ set_last_Java_frame(SP, noreg);
3038  __ mov(I0, O2klass_index);
3039  __ mov(Deoptimization::Unpack_uncommon_trap, O3); // exec mode
3040  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap), G2_thread, O2klass_index, O3);
3041  __ reset_last_Java_frame();
3042  __ mov(O0, O2UnrollBlock->after_save());
3043  __ restore();
3044
3045  // deallocate the deoptimized frame taking care to preserve the return values
3046  __ mov(O2UnrollBlock, O2UnrollBlock->after_save());
3047  __ restore();
3048
3049#ifdef ASSERT
3050  { Label L;
3051    __ ld(O2UnrollBlock, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes(), O1);
3052    __ cmp_and_br_short(O1, Deoptimization::Unpack_uncommon_trap, Assembler::equal, Assembler::pt, L);
3053    __ stop("SharedRuntime::generate_deopt_blob: expected Unpack_uncommon_trap");
3054    __ bind(L);
3055  }
3056#endif
3057
3058  // Allocate new interpreter frame(s) and possible c2i adapter frame
3059
3060  make_new_frames(masm, false);
3061
3062  // push a dummy "unpack_frame" taking care of float return values and
3063  // call Deoptimization::unpack_frames to have the unpacker layout
3064  // information in the interpreter frames just created and then return
3065  // to the interpreter entry point
3066  __ save_frame(0);
3067  __ set_last_Java_frame(SP, noreg);
3068  __ mov(Deoptimization::Unpack_uncommon_trap, O3); // indicate it is the uncommon trap case
3069  __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, O3);
3070  __ reset_last_Java_frame();
3071  __ ret();
3072  __ delayed()->restore();
3073
3074  masm->flush();
3075  _uncommon_trap_blob = UncommonTrapBlob::create(&buffer, NULL, __ total_frame_size_in_bytes(0)/wordSize);
3076}
3077
3078#endif // COMPILER2
3079
3080//------------------------------generate_handler_blob-------------------
3081//
3082// Generate a special Compile2Runtime blob that saves all registers, and sets
3083// up an OopMap.
3084//
3085// This blob is jumped to (via a breakpoint and the signal handler) from a
3086// safepoint in compiled code.  On entry to this blob, O7 contains the
3087// address in the original nmethod at which we should resume normal execution.
3088// Thus, this blob looks like a subroutine which must preserve lots of
3089// registers and return normally.  Note that O7 is never register-allocated,
3090// so it is guaranteed to be free here.
3091//
3092
3093// The hardest part of what this blob must do is to save the 64-bit %o
3094// registers in the 32-bit build.  A simple 'save' turn the %o's to %i's and
3095// an interrupt will chop off their heads.  Making space in the caller's frame
3096// first will let us save the 64-bit %o's before save'ing, but we cannot hand
3097// the adjusted FP off to the GC stack-crawler: this will modify the caller's
3098// SP and mess up HIS OopMaps.  So we first adjust the caller's SP, then save
3099// the 64-bit %o's, then do a save, then fixup the caller's SP (our FP).
3100// Tricky, tricky, tricky...
3101
3102SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_type) {
3103  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3104
3105  // allocate space for the code
3106  ResourceMark rm;
3107  // setup code generation tools
3108  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3109  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3110  CodeBuffer buffer("handler_blob", 1600, 512);
3111  MacroAssembler* masm                = new MacroAssembler(&buffer);
3112  int             frame_size_words;
3113  OopMapSet *oop_maps = new OopMapSet();
3114  OopMap* map = NULL;
3115
3116  int start = __ offset();
3117
3118  bool cause_return = (poll_type == POLL_AT_RETURN);
3119  // If this causes a return before the processing, then do a "restore"
3120  if (cause_return) {
3121    __ restore();
3122  } else {
3123    // Make it look like we were called via the poll
3124    // so that frame constructor always sees a valid return address
3125    __ ld_ptr(G2_thread, in_bytes(JavaThread::saved_exception_pc_offset()), O7);
3126    __ sub(O7, frame::pc_return_offset, O7);
3127  }
3128
3129  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3130
3131  // setup last_Java_sp (blows G4)
3132  __ set_last_Java_frame(SP, noreg);
3133
3134  // call into the runtime to handle illegal instructions exception
3135  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3136  __ mov(G2_thread, O0);
3137  __ save_thread(L7_thread_cache);
3138  __ call(call_ptr);
3139  __ delayed()->nop();
3140
3141  // Set an oopmap for the call site.
3142  // We need this not only for callee-saved registers, but also for volatile
3143  // registers that the compiler might be keeping live across a safepoint.
3144
3145  oop_maps->add_gc_map( __ offset() - start, map);
3146
3147  __ restore_thread(L7_thread_cache);
3148  // clear last_Java_sp
3149  __ reset_last_Java_frame();
3150
3151  // Check for exceptions
3152  Label pending;
3153
3154  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3155  __ br_notnull_short(O1, Assembler::pn, pending);
3156
3157  RegisterSaver::restore_live_registers(masm);
3158
3159  // We are back the the original state on entry and ready to go.
3160
3161  __ retl();
3162  __ delayed()->nop();
3163
3164  // Pending exception after the safepoint
3165
3166  __ bind(pending);
3167
3168  RegisterSaver::restore_live_registers(masm);
3169
3170  // We are back the the original state on entry.
3171
3172  // Tail-call forward_exception_entry, with the issuing PC in O7,
3173  // so it looks like the original nmethod called forward_exception_entry.
3174  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3175  __ JMP(O0, 0);
3176  __ delayed()->nop();
3177
3178  // -------------
3179  // make sure all code is generated
3180  masm->flush();
3181
3182  // return exception blob
3183  return SafepointBlob::create(&buffer, oop_maps, frame_size_words);
3184}
3185
3186//
3187// generate_resolve_blob - call resolution (static/virtual/opt-virtual/ic-miss
3188//
3189// Generate a stub that calls into vm to find out the proper destination
3190// of a java call. All the argument registers are live at this point
3191// but since this is generic code we don't know what they are and the caller
3192// must do any gc of the args.
3193//
3194RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
3195  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
3196
3197  // allocate space for the code
3198  ResourceMark rm;
3199  // setup code generation tools
3200  // Measured 8/7/03 at 896 in 32bit debug build (no VerifyThread)
3201  // Measured 8/7/03 at 1080 in 32bit debug build (VerifyThread)
3202  CodeBuffer buffer(name, 1600, 512);
3203  MacroAssembler* masm                = new MacroAssembler(&buffer);
3204  int             frame_size_words;
3205  OopMapSet *oop_maps = new OopMapSet();
3206  OopMap* map = NULL;
3207
3208  int start = __ offset();
3209
3210  map = RegisterSaver::save_live_registers(masm, 0, &frame_size_words);
3211
3212  int frame_complete = __ offset();
3213
3214  // setup last_Java_sp (blows G4)
3215  __ set_last_Java_frame(SP, noreg);
3216
3217  // call into the runtime to handle illegal instructions exception
3218  // Do not use call_VM_leaf, because we need to make a GC map at this call site.
3219  __ mov(G2_thread, O0);
3220  __ save_thread(L7_thread_cache);
3221  __ call(destination, relocInfo::runtime_call_type);
3222  __ delayed()->nop();
3223
3224  // O0 contains the address we are going to jump to assuming no exception got installed
3225
3226  // Set an oopmap for the call site.
3227  // We need this not only for callee-saved registers, but also for volatile
3228  // registers that the compiler might be keeping live across a safepoint.
3229
3230  oop_maps->add_gc_map( __ offset() - start, map);
3231
3232  __ restore_thread(L7_thread_cache);
3233  // clear last_Java_sp
3234  __ reset_last_Java_frame();
3235
3236  // Check for exceptions
3237  Label pending;
3238
3239  __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1);
3240  __ br_notnull_short(O1, Assembler::pn, pending);
3241
3242  // get the returned Method*
3243
3244  __ get_vm_result_2(G5_method);
3245  __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS);
3246
3247  // O0 is where we want to jump, overwrite G3 which is saved and scratch
3248
3249  __ stx(O0, SP, RegisterSaver::G3_offset()+STACK_BIAS);
3250
3251  RegisterSaver::restore_live_registers(masm);
3252
3253  // We are back the the original state on entry and ready to go.
3254
3255  __ JMP(G3, 0);
3256  __ delayed()->nop();
3257
3258  // Pending exception after the safepoint
3259
3260  __ bind(pending);
3261
3262  RegisterSaver::restore_live_registers(masm);
3263
3264  // We are back the the original state on entry.
3265
3266  // Tail-call forward_exception_entry, with the issuing PC in O7,
3267  // so it looks like the original nmethod called forward_exception_entry.
3268  __ set((intptr_t)StubRoutines::forward_exception_entry(), O0);
3269  __ JMP(O0, 0);
3270  __ delayed()->nop();
3271
3272  // -------------
3273  // make sure all code is generated
3274  masm->flush();
3275
3276  // return the  blob
3277  // frame_size_words or bytes??
3278  return RuntimeStub::new_runtime_stub(name, &buffer, frame_complete, frame_size_words, oop_maps, true);
3279}
3280