templateInterpreterGenerator_sparc.cpp revision 13249:a2753984d2c1
1/*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "interpreter/bytecodeHistogram.hpp"
28#include "interpreter/interpreter.hpp"
29#include "interpreter/interpreterRuntime.hpp"
30#include "interpreter/interp_masm.hpp"
31#include "interpreter/templateInterpreterGenerator.hpp"
32#include "interpreter/templateTable.hpp"
33#include "oops/arrayOop.hpp"
34#include "oops/methodData.hpp"
35#include "oops/method.hpp"
36#include "oops/oop.inline.hpp"
37#include "prims/jvmtiExport.hpp"
38#include "prims/jvmtiThreadState.hpp"
39#include "runtime/arguments.hpp"
40#include "runtime/deoptimization.hpp"
41#include "runtime/frame.inline.hpp"
42#include "runtime/sharedRuntime.hpp"
43#include "runtime/stubRoutines.hpp"
44#include "runtime/synchronizer.hpp"
45#include "runtime/timer.hpp"
46#include "runtime/vframeArray.hpp"
47#include "utilities/align.hpp"
48#include "utilities/debug.hpp"
49#include "utilities/macros.hpp"
50
51// Size of interpreter code.  Increase if too small.  Interpreter will
52// fail with a guarantee ("not enough space for interpreter generation");
53// if too small.
54// Run with +PrintInterpreter to get the VM to print out the size.
55// Max size with JVMTI
56// The sethi() instruction generates lots more instructions when shell
57// stack limit is unlimited, so that's why this is much bigger.
58int TemplateInterpreter::InterpreterCodeSize = 260 * K;
59
60// Generation of Interpreter
61//
62// The TemplateInterpreterGenerator generates the interpreter into Interpreter::_code.
63
64
65#define __ _masm->
66
67
68//----------------------------------------------------------------------------------------------------
69
70// LP64 passes floating point arguments in F1, F3, F5, etc. instead of
71// O0, O1, O2 etc..
72// Doubles are passed in D0, D2, D4
73// We store the signature of the first 16 arguments in the first argument
74// slot because it will be overwritten prior to calling the native
75// function, with the pointer to the JNIEnv.
76// If LP64 there can be up to 16 floating point arguments in registers
77// or 6 integer registers.
78address TemplateInterpreterGenerator::generate_slow_signature_handler() {
79
80  enum {
81    non_float  = 0,
82    float_sig  = 1,
83    double_sig = 2,
84    sig_mask   = 3
85  };
86
87  address entry = __ pc();
88  Argument argv(0, true);
89
90  // We are in the jni transition frame. Save the last_java_frame corresponding to the
91  // outer interpreter frame
92  //
93  __ set_last_Java_frame(FP, noreg);
94  // make sure the interpreter frame we've pushed has a valid return pc
95  __ mov(O7, I7);
96  __ mov(Lmethod, G3_scratch);
97  __ mov(Llocals, G4_scratch);
98  __ save_frame(0);
99  __ mov(G2_thread, L7_thread_cache);
100  __ add(argv.address_in_frame(), O3);
101  __ mov(G2_thread, O0);
102  __ mov(G3_scratch, O1);
103  __ call(CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), relocInfo::runtime_call_type);
104  __ delayed()->mov(G4_scratch, O2);
105  __ mov(L7_thread_cache, G2_thread);
106  __ reset_last_Java_frame();
107
108
109  // load the register arguments (the C code packed them as varargs)
110  Address Sig = argv.address_in_frame();        // Argument 0 holds the signature
111  __ ld_ptr( Sig, G3_scratch );                   // Get register argument signature word into G3_scratch
112  __ mov( G3_scratch, G4_scratch);
113  __ srl( G4_scratch, 2, G4_scratch);             // Skip Arg 0
114  Label done;
115  for (Argument ldarg = argv.successor(); ldarg.is_float_register(); ldarg = ldarg.successor()) {
116    Label NonFloatArg;
117    Label LoadFloatArg;
118    Label LoadDoubleArg;
119    Label NextArg;
120    Address a = ldarg.address_in_frame();
121    __ andcc(G4_scratch, sig_mask, G3_scratch);
122    __ br(Assembler::zero, false, Assembler::pt, NonFloatArg);
123    __ delayed()->nop();
124
125    __ cmp(G3_scratch, float_sig );
126    __ br(Assembler::equal, false, Assembler::pt, LoadFloatArg);
127    __ delayed()->nop();
128
129    __ cmp(G3_scratch, double_sig );
130    __ br(Assembler::equal, false, Assembler::pt, LoadDoubleArg);
131    __ delayed()->nop();
132
133    __ bind(NonFloatArg);
134    // There are only 6 integer register arguments!
135    if ( ldarg.is_register() )
136      __ ld_ptr(ldarg.address_in_frame(), ldarg.as_register());
137    else {
138    // Optimization, see if there are any more args and get out prior to checking
139    // all 16 float registers.  My guess is that this is rare.
140    // If is_register is false, then we are done the first six integer args.
141      __ br_null_short(G4_scratch, Assembler::pt, done);
142    }
143    __ ba(NextArg);
144    __ delayed()->srl( G4_scratch, 2, G4_scratch );
145
146    __ bind(LoadFloatArg);
147    __ ldf( FloatRegisterImpl::S, a, ldarg.as_float_register(), 4);
148    __ ba(NextArg);
149    __ delayed()->srl( G4_scratch, 2, G4_scratch );
150
151    __ bind(LoadDoubleArg);
152    __ ldf( FloatRegisterImpl::D, a, ldarg.as_double_register() );
153    __ ba(NextArg);
154    __ delayed()->srl( G4_scratch, 2, G4_scratch );
155
156    __ bind(NextArg);
157  }
158
159  __ bind(done);
160  __ ret();
161  __ delayed()->restore(O0, 0, Lscratch);  // caller's Lscratch gets the result handler
162
163  return entry;
164}
165
166void TemplateInterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {
167
168  // Generate code to initiate compilation on the counter overflow.
169
170  // InterpreterRuntime::frequency_counter_overflow takes two arguments,
171  // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
172  // and the second is only used when the first is true.  We pass zero for both.
173  // The call returns the address of the verified entry point for the method or NULL
174  // if the compilation did not complete (either went background or bailed out).
175  __ set((int)false, O2);
176  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
177  // returns verified_entry_point or NULL
178  // we ignore it in any case
179  __ ba_short(Lcontinue);
180}
181
182
183// End of helpers
184
185// Various method entries
186
187// Abstract method entry
188// Attempt to execute abstract method. Throw exception
189//
190address TemplateInterpreterGenerator::generate_abstract_entry(void) {
191  address entry = __ pc();
192  // abstract method entry
193  // throw exception
194  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
195  // the call_VM checks for exception, so we should never return here.
196  __ should_not_reach_here();
197  return entry;
198}
199
200void TemplateInterpreterGenerator::save_native_result(void) {
201  // result potentially in O0/O1: save it across calls
202  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
203
204  // result potentially in F0/F1: save it across calls
205  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
206
207  // save and restore any potential method result value around the unlocking operation
208  __ stf(FloatRegisterImpl::D, F0, d_tmp);
209  __ stx(O0, l_tmp);
210}
211
212void TemplateInterpreterGenerator::restore_native_result(void) {
213  const Address& l_tmp = InterpreterMacroAssembler::l_tmp;
214  const Address& d_tmp = InterpreterMacroAssembler::d_tmp;
215
216  // Restore any method result value
217  __ ldf(FloatRegisterImpl::D, d_tmp, F0);
218  __ ldx(l_tmp, O0);
219}
220
221address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
222  assert(!pass_oop || message == NULL, "either oop or message but not both");
223  address entry = __ pc();
224  // expression stack must be empty before entering the VM if an exception happened
225  __ empty_expression_stack();
226  // load exception object
227  __ set((intptr_t)name, G3_scratch);
228  if (pass_oop) {
229    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception), G3_scratch, Otos_i);
230  } else {
231    __ set((intptr_t)message, G4_scratch);
232    __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), G3_scratch, G4_scratch);
233  }
234  // throw exception
235  assert(Interpreter::throw_exception_entry() != NULL, "generate it first");
236  AddressLiteral thrower(Interpreter::throw_exception_entry());
237  __ jump_to(thrower, G3_scratch);
238  __ delayed()->nop();
239  return entry;
240}
241
242address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
243  address entry = __ pc();
244  // expression stack must be empty before entering the VM if an exception
245  // happened
246  __ empty_expression_stack();
247  // load exception object
248  __ call_VM(Oexception,
249             CAST_FROM_FN_PTR(address,
250                              InterpreterRuntime::throw_ClassCastException),
251             Otos_i);
252  __ should_not_reach_here();
253  return entry;
254}
255
256
257address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char* name) {
258  address entry = __ pc();
259  // expression stack must be empty before entering the VM if an exception happened
260  __ empty_expression_stack();
261  // convention: expect aberrant index in register G3_scratch, then shuffle the
262  // index to G4_scratch for the VM call
263  __ mov(G3_scratch, G4_scratch);
264  __ set((intptr_t)name, G3_scratch);
265  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException), G3_scratch, G4_scratch);
266  __ should_not_reach_here();
267  return entry;
268}
269
270
271address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
272  address entry = __ pc();
273  // expression stack must be empty before entering the VM if an exception happened
274  __ empty_expression_stack();
275  __ call_VM(Oexception, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
276  __ should_not_reach_here();
277  return entry;
278}
279
280
281address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
282  address entry = __ pc();
283
284  if (state == atos) {
285    __ profile_return_type(O0, G3_scratch, G1_scratch);
286  }
287
288  // The callee returns with the stack possibly adjusted by adapter transition
289  // We remove that possible adjustment here.
290  // All interpreter local registers are untouched. Any result is passed back
291  // in the O0/O1 or float registers. Before continuing, the arguments must be
292  // popped from the java expression stack; i.e., Lesp must be adjusted.
293
294  __ mov(Llast_SP, SP);   // Remove any adapter added stack space.
295
296  const Register cache = G3_scratch;
297  const Register index  = G1_scratch;
298  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
299
300  const Register flags = cache;
301  __ ld_ptr(cache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), flags);
302  const Register parameter_size = flags;
303  __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, parameter_size);  // argument size in words
304  __ sll(parameter_size, Interpreter::logStackElementSize, parameter_size);     // each argument size in bytes
305  __ add(Lesp, parameter_size, Lesp);                                           // pop arguments
306
307  __ check_and_handle_popframe(Gtemp);
308  __ check_and_handle_earlyret(Gtemp);
309
310  __ dispatch_next(state, step);
311
312  return entry;
313}
314
315
316address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
317  address entry = __ pc();
318  __ get_constant_pool_cache(LcpoolCache); // load LcpoolCache
319#if INCLUDE_JVMCI
320  // Check if we need to take lock at entry of synchronized method.  This can
321  // only occur on method entry so emit it only for vtos with step 0.
322  if (UseJVMCICompiler && state == vtos && step == 0) {
323    Label L;
324    Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
325    __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
326    __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
327    // Clear flag.
328    __ stbool(G0, pending_monitor_enter_addr);
329    // Take lock.
330    lock_method();
331    __ bind(L);
332  } else {
333#ifdef ASSERT
334    if (UseJVMCICompiler) {
335      Label L;
336      Address pending_monitor_enter_addr(G2_thread, JavaThread::pending_monitorenter_offset());
337      __ ldbool(pending_monitor_enter_addr, Gtemp);  // Load if pending monitor enter
338      __ cmp_and_br_short(Gtemp, G0, Assembler::equal, Assembler::pn, L);
339      __ stop("unexpected pending monitor in deopt entry");
340      __ bind(L);
341    }
342#endif
343  }
344#endif
345  { Label L;
346    Address exception_addr(G2_thread, Thread::pending_exception_offset());
347    __ ld_ptr(exception_addr, Gtemp);  // Load pending exception.
348    __ br_null_short(Gtemp, Assembler::pt, L);
349    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
350    __ should_not_reach_here();
351    __ bind(L);
352  }
353  __ dispatch_next(state, step);
354  return entry;
355}
356
357// A result handler converts/unboxes a native call result into
358// a java interpreter/compiler result. The current frame is an
359// interpreter frame. The activation frame unwind code must be
360// consistent with that of TemplateTable::_return(...). In the
361// case of native methods, the caller's SP was not modified.
362address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type) {
363  address entry = __ pc();
364  Register Itos_i  = Otos_i ->after_save();
365  Register Itos_l  = Otos_l ->after_save();
366  Register Itos_l1 = Otos_l1->after_save();
367  Register Itos_l2 = Otos_l2->after_save();
368  switch (type) {
369    case T_BOOLEAN: __ subcc(G0, O0, G0); __ addc(G0, 0, Itos_i); break; // !0 => true; 0 => false
370    case T_CHAR   : __ sll(O0, 16, O0); __ srl(O0, 16, Itos_i);   break; // cannot use and3, 0xFFFF too big as immediate value!
371    case T_BYTE   : __ sll(O0, 24, O0); __ sra(O0, 24, Itos_i);   break;
372    case T_SHORT  : __ sll(O0, 16, O0); __ sra(O0, 16, Itos_i);   break;
373    case T_LONG   :
374    case T_INT    : __ mov(O0, Itos_i);                         break;
375    case T_VOID   : /* nothing to do */                         break;
376    case T_FLOAT  : assert(F0 == Ftos_f, "fix this code" );     break;
377    case T_DOUBLE : assert(F0 == Ftos_d, "fix this code" );     break;
378    case T_OBJECT :
379      __ ld_ptr(FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS, Itos_i);
380      __ verify_oop(Itos_i);
381      break;
382    default       : ShouldNotReachHere();
383  }
384  __ ret();                           // return from interpreter activation
385  __ delayed()->restore(I5_savedSP, G0, SP);  // remove interpreter frame
386  NOT_PRODUCT(__ emit_int32(0);)       // marker for disassembly
387  return entry;
388}
389
390address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state, address runtime_entry) {
391  address entry = __ pc();
392  __ push(state);
393  __ call_VM(noreg, runtime_entry);
394  __ dispatch_via(vtos, Interpreter::normal_table(vtos));
395  return entry;
396}
397
398
399//
400// Helpers for commoning out cases in the various type of method entries.
401//
402
403// increment invocation count & check for overflow
404//
405// Note: checking for negative value instead of overflow
406//       so we have a 'sticky' overflow test
407//
408// Lmethod: method
409// ??: invocation counter
410//
411void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
412  // Note: In tiered we increment either counters in MethodCounters* or in
413  // MDO depending if we're profiling or not.
414  const Register G3_method_counters = G3_scratch;
415  Label done;
416
417  if (TieredCompilation) {
418    const int increment = InvocationCounter::count_increment;
419    Label no_mdo;
420    if (ProfileInterpreter) {
421      // If no method data exists, go to profile_continue.
422      __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
423      __ br_null_short(G4_scratch, Assembler::pn, no_mdo);
424      // Increment counter
425      Address mdo_invocation_counter(G4_scratch,
426                                     in_bytes(MethodData::invocation_counter_offset()) +
427                                     in_bytes(InvocationCounter::counter_offset()));
428      Address mask(G4_scratch, in_bytes(MethodData::invoke_mask_offset()));
429      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
430                                 G3_scratch, Lscratch,
431                                 Assembler::zero, overflow);
432      __ ba_short(done);
433    }
434
435    // Increment counter in MethodCounters*
436    __ bind(no_mdo);
437    Address invocation_counter(G3_method_counters,
438            in_bytes(MethodCounters::invocation_counter_offset()) +
439            in_bytes(InvocationCounter::counter_offset()));
440    __ get_method_counters(Lmethod, G3_method_counters, done);
441    Address mask(G3_method_counters, in_bytes(MethodCounters::invoke_mask_offset()));
442    __ increment_mask_and_jump(invocation_counter, increment, mask,
443                               G4_scratch, Lscratch,
444                               Assembler::zero, overflow);
445    __ bind(done);
446  } else { // not TieredCompilation
447    // Update standard invocation counters
448    __ get_method_counters(Lmethod, G3_method_counters, done);
449    __ increment_invocation_counter(G3_method_counters, O0, G4_scratch);
450    if (ProfileInterpreter) {
451      Address interpreter_invocation_counter(G3_method_counters,
452            in_bytes(MethodCounters::interpreter_invocation_counter_offset()));
453      __ ld(interpreter_invocation_counter, G4_scratch);
454      __ inc(G4_scratch);
455      __ st(G4_scratch, interpreter_invocation_counter);
456    }
457
458    if (ProfileInterpreter && profile_method != NULL) {
459      // Test to see if we should create a method data oop
460      Address profile_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
461      __ ld(profile_limit, G1_scratch);
462      __ cmp_and_br_short(O0, G1_scratch, Assembler::lessUnsigned, Assembler::pn, *profile_method_continue);
463
464      // if no method data exists, go to profile_method
465      __ test_method_data_pointer(*profile_method);
466    }
467
468    Address invocation_limit(G3_method_counters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
469    __ ld(invocation_limit, G3_scratch);
470    __ cmp(O0, G3_scratch);
471    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow); // Far distance
472    __ delayed()->nop();
473    __ bind(done);
474  }
475}
476
477// Allocate monitor and lock method (asm interpreter)
478// ebx - Method*
479//
480void TemplateInterpreterGenerator::lock_method() {
481  __ ld(Lmethod, in_bytes(Method::access_flags_offset()), O0);  // Load access flags.
482
483#ifdef ASSERT
484 { Label ok;
485   __ btst(JVM_ACC_SYNCHRONIZED, O0);
486   __ br( Assembler::notZero, false, Assembler::pt, ok);
487   __ delayed()->nop();
488   __ stop("method doesn't need synchronization");
489   __ bind(ok);
490  }
491#endif // ASSERT
492
493  // get synchronization object to O0
494  { Label done;
495    __ btst(JVM_ACC_STATIC, O0);
496    __ br( Assembler::zero, true, Assembler::pt, done);
497    __ delayed()->ld_ptr(Llocals, Interpreter::local_offset_in_bytes(0), O0); // get receiver for not-static case
498
499    // lock the mirror, not the Klass*
500    __ load_mirror(O0, Lmethod);
501
502#ifdef ASSERT
503    __ tst(O0);
504    __ breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
505#endif // ASSERT
506
507    __ bind(done);
508  }
509
510  __ add_monitor_to_stack(true, noreg, noreg);  // allocate monitor elem
511  __ st_ptr( O0, Lmonitors, BasicObjectLock::obj_offset_in_bytes());   // store object
512  // __ untested("lock_object from method entry");
513  __ lock_object(Lmonitors, O0);
514}
515
516// See if we've got enough room on the stack for locals plus overhead below
517// JavaThread::stack_overflow_limit(). If not, throw a StackOverflowError
518// without going through the signal handler, i.e., reserved and yellow zones
519// will not be made usable. The shadow zone must suffice to handle the
520// overflow.
521void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rframe_size,
522                                                                 Register Rscratch) {
523  const int page_size = os::vm_page_size();
524  Label after_frame_check;
525
526  assert_different_registers(Rframe_size, Rscratch);
527
528  __ set(page_size, Rscratch);
529  __ cmp_and_br_short(Rframe_size, Rscratch, Assembler::lessEqual, Assembler::pt, after_frame_check);
530
531  // Get the stack overflow limit, and in debug, verify it is non-zero.
532  __ ld_ptr(G2_thread, JavaThread::stack_overflow_limit_offset(), Rscratch);
533#ifdef ASSERT
534  Label limit_ok;
535  __ br_notnull_short(Rscratch, Assembler::pn, limit_ok);
536  __ stop("stack overflow limit is zero in generate_stack_overflow_check");
537  __ bind(limit_ok);
538#endif
539
540  // Add in the size of the frame (which is the same as subtracting it from the
541  // SP, which would take another register.
542  __ add(Rscratch, Rframe_size, Rscratch);
543
544  // The frame is greater than one page in size, so check against
545  // the bottom of the stack.
546  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
547
548  // The stack will overflow, throw an exception.
549
550  // Note that SP is restored to sender's sp (in the delay slot). This
551  // is necessary if the sender's frame is an extended compiled frame
552  // (see gen_c2i_adapter()) and safer anyway in case of JSR292
553  // adaptations.
554
555  // Note also that the restored frame is not necessarily interpreted.
556  // Use the shared runtime version of the StackOverflowError.
557  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
558  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
559  __ jump_to(stub, Rscratch);
560  __ delayed()->mov(O5_savedSP, SP);
561
562  // If you get to here, then there is enough stack space.
563  __ bind(after_frame_check);
564}
565
566
567//
568// Generate a fixed interpreter frame. This is identical setup for interpreted
569// methods and for native methods hence the shared code.
570
571
572//----------------------------------------------------------------------------------------------------
573// Stack frame layout
574//
575// When control flow reaches any of the entry types for the interpreter
576// the following holds ->
577//
578// C2 Calling Conventions:
579//
580// The entry code below assumes that the following registers are set
581// when coming in:
582//    G5_method: holds the Method* of the method to call
583//    Lesp:    points to the TOS of the callers expression stack
584//             after having pushed all the parameters
585//
586// The entry code does the following to setup an interpreter frame
587//   pop parameters from the callers stack by adjusting Lesp
588//   set O0 to Lesp
589//   compute X = (max_locals - num_parameters)
590//   bump SP up by X to accommodate the extra locals
591//   compute X = max_expression_stack
592//               + vm_local_words
593//               + 16 words of register save area
594//   save frame doing a save sp, -X, sp growing towards lower addresses
595//   set Lbcp, Lmethod, LcpoolCache
596//   set Llocals to i0
597//   set Lmonitors to FP - rounded_vm_local_words
598//   set Lesp to Lmonitors - 4
599//
600//  The frame has now been setup to do the rest of the entry code
601
602// Try this optimization:  Most method entries could live in a
603// "one size fits all" stack frame without all the dynamic size
604// calculations.  It might be profitable to do all this calculation
605// statically and approximately for "small enough" methods.
606
607//-----------------------------------------------------------------------------------------------
608
609// C1 Calling conventions
610//
611// Upon method entry, the following registers are setup:
612//
613// g2 G2_thread: current thread
614// g5 G5_method: method to activate
615// g4 Gargs  : pointer to last argument
616//
617//
618// Stack:
619//
620// +---------------+ <--- sp
621// |               |
622// : reg save area :
623// |               |
624// +---------------+ <--- sp + 0x40
625// |               |
626// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
627// |               |
628// +---------------+ <--- sp + 0x5c
629// |               |
630// :     free      :
631// |               |
632// +---------------+ <--- Gargs
633// |               |
634// :   arguments   :
635// |               |
636// +---------------+
637// |               |
638//
639//
640//
641// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
642//
643// +---------------+ <--- sp
644// |               |
645// : reg save area :
646// |               |
647// +---------------+ <--- sp + 0x40
648// |               |
649// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
650// |               |
651// +---------------+ <--- sp + 0x5c
652// |               |
653// :               :
654// |               | <--- Lesp
655// +---------------+ <--- Lmonitors (fp - 0x18)
656// |   VM locals   |
657// +---------------+ <--- fp
658// |               |
659// : reg save area :
660// |               |
661// +---------------+ <--- fp + 0x40
662// |               |
663// : extra 7 slots :      note: these slots are not really needed for the interpreter (fix later)
664// |               |
665// +---------------+ <--- fp + 0x5c
666// |               |
667// :     free      :
668// |               |
669// +---------------+
670// |               |
671// : nonarg locals :
672// |               |
673// +---------------+
674// |               |
675// :   arguments   :
676// |               | <--- Llocals
677// +---------------+ <--- Gargs
678// |               |
679
680void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
681  //
682  //
683  // The entry code sets up a new interpreter frame in 4 steps:
684  //
685  // 1) Increase caller's SP by for the extra local space needed:
686  //    (check for overflow)
687  //    Efficient implementation of xload/xstore bytecodes requires
688  //    that arguments and non-argument locals are in a contiguously
689  //    addressable memory block => non-argument locals must be
690  //    allocated in the caller's frame.
691  //
692  // 2) Create a new stack frame and register window:
693  //    The new stack frame must provide space for the standard
694  //    register save area, the maximum java expression stack size,
695  //    the monitor slots (0 slots initially), and some frame local
696  //    scratch locations.
697  //
698  // 3) The following interpreter activation registers must be setup:
699  //    Lesp       : expression stack pointer
700  //    Lbcp       : bytecode pointer
701  //    Lmethod    : method
702  //    Llocals    : locals pointer
703  //    Lmonitors  : monitor pointer
704  //    LcpoolCache: constant pool cache
705  //
706  // 4) Initialize the non-argument locals if necessary:
707  //    Non-argument locals may need to be initialized to NULL
708  //    for GC to work. If the oop-map information is accurate
709  //    (in the absence of the JSR problem), no initialization
710  //    is necessary.
711  //
712  // (gri - 2/25/2000)
713
714
715  int rounded_vm_local_words = align_up((int)frame::interpreter_frame_vm_local_words, WordsPerLong );
716
717  const int extra_space =
718    rounded_vm_local_words +                   // frame local scratch space
719    Method::extra_stack_entries() +            // extra stack for jsr 292
720    frame::memory_parameter_word_sp_offset +   // register save area
721    (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
722
723  const Register Glocals_size = G3;
724  const Register RconstMethod = Glocals_size;
725  const Register Otmp1 = O3;
726  const Register Otmp2 = O4;
727  // Lscratch can't be used as a temporary because the call_stub uses
728  // it to assert that the stack frame was setup correctly.
729  const Address constMethod       (G5_method, Method::const_offset());
730  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
731
732  __ ld_ptr( constMethod, RconstMethod );
733  __ lduh( size_of_parameters, Glocals_size);
734
735  // Gargs points to first local + BytesPerWord
736  // Set the saved SP after the register window save
737  //
738  assert_different_registers(Gargs, Glocals_size, Gframe_size, O5_savedSP);
739  __ sll(Glocals_size, Interpreter::logStackElementSize, Otmp1);
740  __ add(Gargs, Otmp1, Gargs);
741
742  if (native_call) {
743    __ calc_mem_param_words( Glocals_size, Gframe_size );
744    __ add( Gframe_size,  extra_space, Gframe_size);
745    __ round_to( Gframe_size, WordsPerLong );
746    __ sll( Gframe_size, LogBytesPerWord, Gframe_size );
747
748    // Native calls don't need the stack size check since they have no
749    // expression stack and the arguments are already on the stack and
750    // we only add a handful of words to the stack.
751  } else {
752
753    //
754    // Compute number of locals in method apart from incoming parameters
755    //
756    const Address size_of_locals(Otmp1, ConstMethod::size_of_locals_offset());
757    __ ld_ptr(constMethod, Otmp1);
758    __ lduh(size_of_locals, Otmp1);
759    __ sub(Otmp1, Glocals_size, Glocals_size);
760    __ round_to(Glocals_size, WordsPerLong);
761    __ sll(Glocals_size, Interpreter::logStackElementSize, Glocals_size);
762
763    // See if the frame is greater than one page in size. If so,
764    // then we need to verify there is enough stack space remaining.
765    // Frame_size = (max_stack + extra_space) * BytesPerWord;
766    __ ld_ptr(constMethod, Gframe_size);
767    __ lduh(Gframe_size, in_bytes(ConstMethod::max_stack_offset()), Gframe_size);
768    __ add(Gframe_size, extra_space, Gframe_size);
769    __ round_to(Gframe_size, WordsPerLong);
770    __ sll(Gframe_size, Interpreter::logStackElementSize, Gframe_size);
771
772    // Add in java locals size for stack overflow check only
773    __ add(Gframe_size, Glocals_size, Gframe_size);
774
775    const Register Otmp2 = O4;
776    assert_different_registers(Otmp1, Otmp2, O5_savedSP);
777    generate_stack_overflow_check(Gframe_size, Otmp1);
778
779    __ sub(Gframe_size, Glocals_size, Gframe_size);
780
781    //
782    // bump SP to accommodate the extra locals
783    //
784    __ sub(SP, Glocals_size, SP);
785  }
786
787  //
788  // now set up a stack frame with the size computed above
789  //
790  __ neg( Gframe_size );
791  __ save( SP, Gframe_size, SP );
792
793  //
794  // now set up all the local cache registers
795  //
796  // NOTE: At this point, Lbyte_code/Lscratch has been modified. Note
797  // that all present references to Lbyte_code initialize the register
798  // immediately before use
799  if (native_call) {
800    __ mov(G0, Lbcp);
801  } else {
802    __ ld_ptr(G5_method, Method::const_offset(), Lbcp);
803    __ add(Lbcp, in_bytes(ConstMethod::codes_offset()), Lbcp);
804  }
805  __ mov( G5_method, Lmethod);                 // set Lmethod
806  // Get mirror and store it in the frame as GC root for this Method*
807  Register mirror = LcpoolCache;
808  __ load_mirror(mirror, Lmethod);
809  __ st_ptr(mirror, FP, (frame::interpreter_frame_mirror_offset * wordSize) + STACK_BIAS);
810  __ get_constant_pool_cache(LcpoolCache);     // set LcpoolCache
811  __ sub(FP, rounded_vm_local_words * BytesPerWord, Lmonitors ); // set Lmonitors
812  __ add(Lmonitors, STACK_BIAS, Lmonitors);    // Account for 64 bit stack bias
813  __ sub(Lmonitors, BytesPerWord, Lesp);       // set Lesp
814
815  // setup interpreter activation registers
816  __ sub(Gargs, BytesPerWord, Llocals);        // set Llocals
817
818  if (ProfileInterpreter) {
819    __ set_method_data_pointer();
820  }
821
822}
823
824// Method entry for java.lang.ref.Reference.get.
825address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
826#if INCLUDE_ALL_GCS
827  // Code: _aload_0, _getfield, _areturn
828  // parameter size = 1
829  //
830  // The code that gets generated by this routine is split into 2 parts:
831  //    1. The "intrinsified" code for G1 (or any SATB based GC),
832  //    2. The slow path - which is an expansion of the regular method entry.
833  //
834  // Notes:-
835  // * In the G1 code we do not check whether we need to block for
836  //   a safepoint. If G1 is enabled then we must execute the specialized
837  //   code for Reference.get (except when the Reference object is null)
838  //   so that we can log the value in the referent field with an SATB
839  //   update buffer.
840  //   If the code for the getfield template is modified so that the
841  //   G1 pre-barrier code is executed when the current method is
842  //   Reference.get() then going through the normal method entry
843  //   will be fine.
844  // * The G1 code can, however, check the receiver object (the instance
845  //   of java.lang.Reference) and jump to the slow path if null. If the
846  //   Reference object is null then we obviously cannot fetch the referent
847  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
848  //   regular method entry code to generate the NPE.
849  //
850  // This code is based on generate_accessor_enty.
851
852  address entry = __ pc();
853
854  const int referent_offset = java_lang_ref_Reference::referent_offset;
855  guarantee(referent_offset > 0, "referent offset not initialized");
856
857  if (UseG1GC) {
858     Label slow_path;
859
860    // In the G1 code we don't check if we need to reach a safepoint. We
861    // continue and the thread will safepoint at the next bytecode dispatch.
862
863    // Check if local 0 != NULL
864    // If the receiver is null then it is OK to jump to the slow path.
865    __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
866    // check if local 0 == NULL and go the slow path
867    __ cmp_and_brx_short(Otos_i, 0, Assembler::equal, Assembler::pn, slow_path);
868
869
870    // Load the value of the referent field.
871    if (Assembler::is_simm13(referent_offset)) {
872      __ load_heap_oop(Otos_i, referent_offset, Otos_i);
873    } else {
874      __ set(referent_offset, G3_scratch);
875      __ load_heap_oop(Otos_i, G3_scratch, Otos_i);
876    }
877
878    // Generate the G1 pre-barrier code to log the value of
879    // the referent field in an SATB buffer. Note with
880    // these parameters the pre-barrier does not generate
881    // the load of the previous value
882
883    __ g1_write_barrier_pre(noreg /* obj */, noreg /* index */, 0 /* offset */,
884                            Otos_i /* pre_val */,
885                            G3_scratch /* tmp */,
886                            true /* preserve_o_regs */);
887
888    // _areturn
889    __ retl();                      // return from leaf routine
890    __ delayed()->mov(O5_savedSP, SP);
891
892    // Generate regular method entry
893    __ bind(slow_path);
894    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
895    return entry;
896  }
897#endif // INCLUDE_ALL_GCS
898
899  // If G1 is not enabled then attempt to go through the accessor entry point
900  // Reference.get is an accessor
901  return NULL;
902}
903
904/**
905 * Method entry for static native methods:
906 *   int java.util.zip.CRC32.update(int crc, int b)
907 */
908address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
909
910  if (UseCRC32Intrinsics) {
911    address entry = __ pc();
912
913    Label L_slow_path;
914    // If we need a safepoint check, generate full interpreter entry.
915    ExternalAddress state(SafepointSynchronize::address_of_state());
916    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
917    __ set(SafepointSynchronize::_not_synchronized, O3);
918    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
919
920    // Load parameters
921    const Register crc   = O0; // initial crc
922    const Register val   = O1; // byte to update with
923    const Register table = O2; // address of 256-entry lookup table
924
925    __ ldub(Gargs, 3, val);
926    __ lduw(Gargs, 8, crc);
927
928    __ set(ExternalAddress(StubRoutines::crc_table_addr()), table);
929
930    __ not1(crc); // ~crc
931    __ clruwu(crc);
932    __ update_byte_crc32(crc, val, table);
933    __ not1(crc); // ~crc
934
935    // result in O0
936    __ retl();
937    __ delayed()->nop();
938
939    // generate a vanilla native entry as the slow path
940    __ bind(L_slow_path);
941    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
942    return entry;
943  }
944  return NULL;
945}
946
947/**
948 * Method entry for static native methods:
949 *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
950 *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
951 */
952address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
953
954  if (UseCRC32Intrinsics) {
955    address entry = __ pc();
956
957    Label L_slow_path;
958    // If we need a safepoint check, generate full interpreter entry.
959    ExternalAddress state(SafepointSynchronize::address_of_state());
960    __ set(ExternalAddress(SafepointSynchronize::address_of_state()), O2);
961    __ set(SafepointSynchronize::_not_synchronized, O3);
962    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pt, L_slow_path);
963
964    // Load parameters from the stack
965    const Register crc    = O0; // initial crc
966    const Register buf    = O1; // source java byte array address
967    const Register len    = O2; // len
968    const Register offset = O3; // offset
969
970    // Arguments are reversed on java expression stack
971    // Calculate address of start element
972    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
973      __ lduw(Gargs, 0,  len);
974      __ lduw(Gargs, 8,  offset);
975      __ ldx( Gargs, 16, buf);
976      __ lduw(Gargs, 32, crc);
977      __ add(buf, offset, buf);
978    } else {
979      __ lduw(Gargs, 0,  len);
980      __ lduw(Gargs, 8,  offset);
981      __ ldx( Gargs, 16, buf);
982      __ lduw(Gargs, 24, crc);
983      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
984      __ add(buf, offset, buf);
985    }
986
987    // Call the crc32 kernel
988    __ MacroAssembler::save_thread(L7_thread_cache);
989    __ kernel_crc32(crc, buf, len, O3);
990    __ MacroAssembler::restore_thread(L7_thread_cache);
991
992    // result in O0
993    __ retl();
994    __ delayed()->nop();
995
996    // generate a vanilla native entry as the slow path
997    __ bind(L_slow_path);
998    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
999    return entry;
1000  }
1001  return NULL;
1002}
1003
1004/**
1005 * Method entry for intrinsic-candidate (non-native) methods:
1006 *   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int end)
1007 *   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int end)
1008 * Unlike CRC32, CRC32C does not have any methods marked as native
1009 * CRC32C also uses an "end" variable instead of the length variable CRC32 uses
1010 */
1011address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1012
1013  if (UseCRC32CIntrinsics) {
1014    address entry = __ pc();
1015
1016    // Load parameters from the stack
1017    const Register crc    = O0; // initial crc
1018    const Register buf    = O1; // source java byte array address
1019    const Register offset = O2; // offset
1020    const Register end    = O3; // index of last element to process
1021    const Register len    = O2; // len argument to the kernel
1022    const Register table  = O3; // crc32c lookup table address
1023
1024    // Arguments are reversed on java expression stack
1025    // Calculate address of start element
1026    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) {
1027      __ lduw(Gargs, 0,  end);
1028      __ lduw(Gargs, 8,  offset);
1029      __ ldx( Gargs, 16, buf);
1030      __ lduw(Gargs, 32, crc);
1031      __ add(buf, offset, buf);
1032      __ sub(end, offset, len);
1033    } else {
1034      __ lduw(Gargs, 0,  end);
1035      __ lduw(Gargs, 8,  offset);
1036      __ ldx( Gargs, 16, buf);
1037      __ lduw(Gargs, 24, crc);
1038      __ add(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE), buf); // account for the header size
1039      __ add(buf, offset, buf);
1040      __ sub(end, offset, len);
1041    }
1042
1043    // Call the crc32c kernel
1044    __ MacroAssembler::save_thread(L7_thread_cache);
1045    __ kernel_crc32c(crc, buf, len, table);
1046    __ MacroAssembler::restore_thread(L7_thread_cache);
1047
1048    // result in O0
1049    __ retl();
1050    __ delayed()->nop();
1051
1052    return entry;
1053  }
1054  return NULL;
1055}
1056
1057/* Math routines only partially supported.
1058 *
1059 *   Providing support for fma (float/double) only.
1060 */
1061address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind)
1062{
1063  if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
1064
1065  address entry = __ pc();
1066
1067  switch (kind) {
1068    case Interpreter::java_lang_math_fmaF:
1069      if (UseFMA) {
1070        // float .fma(float a, float b, float c)
1071        const FloatRegister ra = F1;
1072        const FloatRegister rb = F2;
1073        const FloatRegister rc = F3;
1074        const FloatRegister rd = F0; // Result.
1075
1076        __ ldf(FloatRegisterImpl::S, Gargs,  0, rc);
1077        __ ldf(FloatRegisterImpl::S, Gargs,  8, rb);
1078        __ ldf(FloatRegisterImpl::S, Gargs, 16, ra);
1079
1080        __ fmadd(FloatRegisterImpl::S, ra, rb, rc, rd);
1081        __ retl();  // Result in F0 (rd).
1082        __ delayed()->mov(O5_savedSP, SP);
1083
1084        return entry;
1085      }
1086      break;
1087    case Interpreter::java_lang_math_fmaD:
1088      if (UseFMA) {
1089        // double .fma(double a, double b, double c)
1090        const FloatRegister ra = F2; // D1
1091        const FloatRegister rb = F4; // D2
1092        const FloatRegister rc = F6; // D3
1093        const FloatRegister rd = F0; // D0 Result.
1094
1095        __ ldf(FloatRegisterImpl::D, Gargs,  0, rc);
1096        __ ldf(FloatRegisterImpl::D, Gargs, 16, rb);
1097        __ ldf(FloatRegisterImpl::D, Gargs, 32, ra);
1098
1099        __ fmadd(FloatRegisterImpl::D, ra, rb, rc, rd);
1100        __ retl();  // Result in D0 (rd).
1101        __ delayed()->mov(O5_savedSP, SP);
1102
1103        return entry;
1104      }
1105      break;
1106    default:
1107      break;
1108  }
1109  return NULL;
1110}
1111
1112// TODO: rather than touching all pages, check against stack_overflow_limit and bang yellow page to
1113// generate exception
1114void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
1115  // Quick & dirty stack overflow checking: bang the stack & handle trap.
1116  // Note that we do the banging after the frame is setup, since the exception
1117  // handling code expects to find a valid interpreter frame on the stack.
1118  // Doing the banging earlier fails if the caller frame is not an interpreter
1119  // frame.
1120  // (Also, the exception throwing code expects to unlock any synchronized
1121  // method receiver, so do the banging after locking the receiver.)
1122
1123  // Bang each page in the shadow zone. We can't assume it's been done for
1124  // an interpreter frame with greater than a page of locals, so each page
1125  // needs to be checked.  Only true for non-native.
1126  if (UseStackBanging) {
1127    const int page_size = os::vm_page_size();
1128    const int n_shadow_pages = ((int)JavaThread::stack_shadow_zone_size()) / page_size;
1129    const int start_page = native_call ? n_shadow_pages : 1;
1130    for (int pages = start_page; pages <= n_shadow_pages; pages++) {
1131      __ bang_stack_with_offset(pages*page_size);
1132    }
1133  }
1134}
1135
1136//
1137// Interpreter stub for calling a native method. (asm interpreter)
1138// This sets up a somewhat different looking stack for calling the native method
1139// than the typical interpreter frame setup.
1140//
1141
1142address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1143  address entry = __ pc();
1144
1145  // the following temporary registers are used during frame creation
1146  const Register Gtmp1 = G3_scratch ;
1147  const Register Gtmp2 = G1_scratch;
1148  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1149
1150  // make sure registers are different!
1151  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1152
1153  const Address Laccess_flags(Lmethod, Method::access_flags_offset());
1154
1155  const Register Glocals_size = G3;
1156  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1157
1158  // make sure method is native & not abstract
1159  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1160#ifdef ASSERT
1161  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1162  { Label L;
1163    __ btst(JVM_ACC_NATIVE, Gtmp1);
1164    __ br(Assembler::notZero, false, Assembler::pt, L);
1165    __ delayed()->nop();
1166    __ stop("tried to execute non-native method as native");
1167    __ bind(L);
1168  }
1169  { Label L;
1170    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1171    __ br(Assembler::zero, false, Assembler::pt, L);
1172    __ delayed()->nop();
1173    __ stop("tried to execute abstract method as non-abstract");
1174    __ bind(L);
1175  }
1176#endif // ASSERT
1177
1178 // generate the code to allocate the interpreter stack frame
1179  generate_fixed_frame(true);
1180
1181  //
1182  // No locals to initialize for native method
1183  //
1184
1185  // this slot will be set later, we initialize it to null here just in
1186  // case we get a GC before the actual value is stored later
1187  __ st_ptr(G0, FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS);
1188
1189  const Address do_not_unlock_if_synchronized(G2_thread,
1190    JavaThread::do_not_unlock_if_synchronized_offset());
1191  // Since at this point in the method invocation the exception handler
1192  // would try to exit the monitor of synchronized methods which hasn't
1193  // been entered yet, we set the thread local variable
1194  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1195  // runtime, exception handling i.e. unlock_if_synchronized_method will
1196  // check this thread local flag.
1197  // This flag has two effects, one is to force an unwind in the topmost
1198  // interpreter frame and not perform an unlock while doing so.
1199
1200  __ movbool(true, G3_scratch);
1201  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1202
1203  // increment invocation counter and check for overflow
1204  //
1205  // Note: checking for negative value instead of overflow
1206  //       so we have a 'sticky' overflow test (may be of
1207  //       importance as soon as we have true MT/MP)
1208  Label invocation_counter_overflow;
1209  Label Lcontinue;
1210  if (inc_counter) {
1211    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1212
1213  }
1214  __ bind(Lcontinue);
1215
1216  bang_stack_shadow_pages(true);
1217
1218  // reset the _do_not_unlock_if_synchronized flag
1219  __ stbool(G0, do_not_unlock_if_synchronized);
1220
1221  // check for synchronized methods
1222  // Must happen AFTER invocation_counter check and stack overflow check,
1223  // so method is not locked if overflows.
1224
1225  if (synchronized) {
1226    lock_method();
1227  } else {
1228#ifdef ASSERT
1229    { Label ok;
1230      __ ld(Laccess_flags, O0);
1231      __ btst(JVM_ACC_SYNCHRONIZED, O0);
1232      __ br( Assembler::zero, false, Assembler::pt, ok);
1233      __ delayed()->nop();
1234      __ stop("method needs synchronization");
1235      __ bind(ok);
1236    }
1237#endif // ASSERT
1238  }
1239
1240
1241  // start execution
1242  __ verify_thread();
1243
1244  // JVMTI support
1245  __ notify_method_entry();
1246
1247  // native call
1248
1249  // (note that O0 is never an oop--at most it is a handle)
1250  // It is important not to smash any handles created by this call,
1251  // until any oop handle in O0 is dereferenced.
1252
1253  // (note that the space for outgoing params is preallocated)
1254
1255  // get signature handler
1256  { Label L;
1257    Address signature_handler(Lmethod, Method::signature_handler_offset());
1258    __ ld_ptr(signature_handler, G3_scratch);
1259    __ br_notnull_short(G3_scratch, Assembler::pt, L);
1260    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), Lmethod);
1261    __ ld_ptr(signature_handler, G3_scratch);
1262    __ bind(L);
1263  }
1264
1265  // Push a new frame so that the args will really be stored in
1266  // Copy a few locals across so the new frame has the variables
1267  // we need but these values will be dead at the jni call and
1268  // therefore not gc volatile like the values in the current
1269  // frame (Lmethod in particular)
1270
1271  // Flush the method pointer to the register save area
1272  __ st_ptr(Lmethod, SP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS);
1273  __ mov(Llocals, O1);
1274
1275  // calculate where the mirror handle body is allocated in the interpreter frame:
1276  __ add(FP, (frame::interpreter_frame_oop_temp_offset * wordSize) + STACK_BIAS, O2);
1277
1278  // Calculate current frame size
1279  __ sub(SP, FP, O3);         // Calculate negative of current frame size
1280  __ save(SP, O3, SP);        // Allocate an identical sized frame
1281
1282  // Note I7 has leftover trash. Slow signature handler will fill it in
1283  // should we get there. Normal jni call will set reasonable last_Java_pc
1284  // below (and fix I7 so the stack trace doesn't have a meaningless frame
1285  // in it).
1286
1287  // Load interpreter frame's Lmethod into same register here
1288
1289  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1290
1291  __ mov(I1, Llocals);
1292  __ mov(I2, Lscratch2);     // save the address of the mirror
1293
1294
1295  // ONLY Lmethod and Llocals are valid here!
1296
1297  // call signature handler, It will move the arg properly since Llocals in current frame
1298  // matches that in outer frame
1299
1300  __ callr(G3_scratch, 0);
1301  __ delayed()->nop();
1302
1303  // Result handler is in Lscratch
1304
1305  // Reload interpreter frame's Lmethod since slow signature handler may block
1306  __ ld_ptr(FP, (Lmethod->sp_offset_in_saved_window() * wordSize) + STACK_BIAS, Lmethod);
1307
1308  { Label not_static;
1309
1310    __ ld(Laccess_flags, O0);
1311    __ btst(JVM_ACC_STATIC, O0);
1312    __ br( Assembler::zero, false, Assembler::pt, not_static);
1313    // get native function entry point(O0 is a good temp until the very end)
1314    __ delayed()->ld_ptr(Lmethod, in_bytes(Method::native_function_offset()), O0);
1315    // for static methods insert the mirror argument
1316    __ load_mirror(O1, Lmethod);
1317#ifdef ASSERT
1318    if (!PrintSignatureHandlers)  // do not dirty the output with this
1319    { Label L;
1320      __ br_notnull_short(O1, Assembler::pt, L);
1321      __ stop("mirror is missing");
1322      __ bind(L);
1323    }
1324#endif // ASSERT
1325    __ st_ptr(O1, Lscratch2, 0);
1326    __ mov(Lscratch2, O1);
1327    __ bind(not_static);
1328  }
1329
1330  // At this point, arguments have been copied off of stack into
1331  // their JNI positions, which are O1..O5 and SP[68..].
1332  // Oops are boxed in-place on the stack, with handles copied to arguments.
1333  // The result handler is in Lscratch.  O0 will shortly hold the JNIEnv*.
1334
1335#ifdef ASSERT
1336  { Label L;
1337    __ br_notnull_short(O0, Assembler::pt, L);
1338    __ stop("native entry point is missing");
1339    __ bind(L);
1340  }
1341#endif // ASSERT
1342
1343  //
1344  // setup the frame anchor
1345  //
1346  // The scavenge function only needs to know that the PC of this frame is
1347  // in the interpreter method entry code, it doesn't need to know the exact
1348  // PC and hence we can use O7 which points to the return address from the
1349  // previous call in the code stream (signature handler function)
1350  //
1351  // The other trick is we set last_Java_sp to FP instead of the usual SP because
1352  // we have pushed the extra frame in order to protect the volatile register(s)
1353  // in that frame when we return from the jni call
1354  //
1355
1356  __ set_last_Java_frame(FP, O7);
1357  __ mov(O7, I7);  // make dummy interpreter frame look like one above,
1358                   // not meaningless information that'll confuse me.
1359
1360  // flush the windows now. We don't care about the current (protection) frame
1361  // only the outer frames
1362
1363  __ flushw();
1364
1365  // mark windows as flushed
1366  Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
1367  __ set(JavaFrameAnchor::flushed, G3_scratch);
1368  __ st(G3_scratch, flags);
1369
1370  // Transition from _thread_in_Java to _thread_in_native. We are already safepoint ready.
1371
1372  Address thread_state(G2_thread, JavaThread::thread_state_offset());
1373#ifdef ASSERT
1374  { Label L;
1375    __ ld(thread_state, G3_scratch);
1376    __ cmp_and_br_short(G3_scratch, _thread_in_Java, Assembler::equal, Assembler::pt, L);
1377    __ stop("Wrong thread state in native stub");
1378    __ bind(L);
1379  }
1380#endif // ASSERT
1381  __ set(_thread_in_native, G3_scratch);
1382  __ st(G3_scratch, thread_state);
1383
1384  // Call the jni method, using the delay slot to set the JNIEnv* argument.
1385  __ save_thread(L7_thread_cache); // save Gthread
1386  __ callr(O0, 0);
1387  __ delayed()->
1388     add(L7_thread_cache, in_bytes(JavaThread::jni_environment_offset()), O0);
1389
1390  // Back from jni method Lmethod in this frame is DEAD, DEAD, DEAD
1391
1392  __ restore_thread(L7_thread_cache); // restore G2_thread
1393  __ reinit_heapbase();
1394
1395  // must we block?
1396
1397  // Block, if necessary, before resuming in _thread_in_Java state.
1398  // In order for GC to work, don't clear the last_Java_sp until after blocking.
1399  { Label no_block;
1400    AddressLiteral sync_state(SafepointSynchronize::address_of_state());
1401
1402    // Switch thread to "native transition" state before reading the synchronization state.
1403    // This additional state is necessary because reading and testing the synchronization
1404    // state is not atomic w.r.t. GC, as this scenario demonstrates:
1405    //     Java thread A, in _thread_in_native state, loads _not_synchronized and is preempted.
1406    //     VM thread changes sync state to synchronizing and suspends threads for GC.
1407    //     Thread A is resumed to finish this native method, but doesn't block here since it
1408    //     didn't see any synchronization is progress, and escapes.
1409    __ set(_thread_in_native_trans, G3_scratch);
1410    __ st(G3_scratch, thread_state);
1411    if (os::is_MP()) {
1412      if (UseMembar) {
1413        // Force this write out before the read below
1414        __ membar(Assembler::StoreLoad);
1415      } else {
1416        // Write serialization page so VM thread can do a pseudo remote membar.
1417        // We use the current thread pointer to calculate a thread specific
1418        // offset to write to within the page. This minimizes bus traffic
1419        // due to cache line collision.
1420        __ serialize_memory(G2_thread, G1_scratch, G3_scratch);
1421      }
1422    }
1423    __ load_contents(sync_state, G3_scratch);
1424    __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
1425
1426    Label L;
1427    __ br(Assembler::notEqual, false, Assembler::pn, L);
1428    __ delayed()->ld(G2_thread, JavaThread::suspend_flags_offset(), G3_scratch);
1429    __ cmp_and_br_short(G3_scratch, 0, Assembler::equal, Assembler::pt, no_block);
1430    __ bind(L);
1431
1432    // Block.  Save any potential method result value before the operation and
1433    // use a leaf call to leave the last_Java_frame setup undisturbed.
1434    save_native_result();
1435    __ call_VM_leaf(L7_thread_cache,
1436                    CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
1437                    G2_thread);
1438
1439    // Restore any method result value
1440    restore_native_result();
1441    __ bind(no_block);
1442  }
1443
1444  // Clear the frame anchor now
1445
1446  __ reset_last_Java_frame();
1447
1448  // Move the result handler address
1449  __ mov(Lscratch, G3_scratch);
1450  // return possible result to the outer frame
1451  __ restore(O0, G0, O0);
1452
1453  // Move result handler to expected register
1454  __ mov(G3_scratch, Lscratch);
1455
1456  // Back in normal (native) interpreter frame. State is thread_in_native_trans
1457  // switch to thread_in_Java.
1458
1459  __ set(_thread_in_Java, G3_scratch);
1460  __ st(G3_scratch, thread_state);
1461
1462  if (CheckJNICalls) {
1463    // clear_pending_jni_exception_check
1464    __ st_ptr(G0, G2_thread, JavaThread::pending_jni_exception_check_fn_offset());
1465  }
1466
1467  // reset handle block
1468  __ ld_ptr(G2_thread, JavaThread::active_handles_offset(), G3_scratch);
1469  __ st(G0, G3_scratch, JNIHandleBlock::top_offset_in_bytes());
1470
1471  // If we have an oop result store it where it will be safe for any further gc
1472  // until we return now that we've released the handle it might be protected by
1473
1474  { Label no_oop, store_result;
1475
1476    __ set((intptr_t)AbstractInterpreter::result_handler(T_OBJECT), G3_scratch);
1477    __ cmp_and_brx_short(G3_scratch, Lscratch, Assembler::notEqual, Assembler::pt, no_oop);
1478    // Unbox oop result, e.g. JNIHandles::resolve value in O0.
1479    __ br_null(O0, false, Assembler::pn, store_result); // Use NULL as-is.
1480    __ delayed()->andcc(O0, JNIHandles::weak_tag_mask, G0); // Test for jweak
1481    __ brx(Assembler::zero, true, Assembler::pt, store_result);
1482    __ delayed()->ld_ptr(O0, 0, O0); // Maybe resolve (untagged) jobject.
1483    // Resolve jweak.
1484    __ ld_ptr(O0, -JNIHandles::weak_tag_value, O0);
1485#if INCLUDE_ALL_GCS
1486    if (UseG1GC) {
1487      __ g1_write_barrier_pre(noreg /* obj */,
1488                              noreg /* index */,
1489                              0 /* offset */,
1490                              O0 /* pre_val */,
1491                              G3_scratch /* tmp */,
1492                              true /* preserve_o_regs */);
1493    }
1494#endif // INCLUDE_ALL_GCS
1495    __ bind(store_result);
1496    // Store it where gc will look for it and result handler expects it.
1497    __ st_ptr(O0, FP, (frame::interpreter_frame_oop_temp_offset*wordSize) + STACK_BIAS);
1498
1499    __ bind(no_oop);
1500
1501  }
1502
1503
1504  // handle exceptions (exception handling will handle unlocking!)
1505  { Label L;
1506    Address exception_addr(G2_thread, Thread::pending_exception_offset());
1507    __ ld_ptr(exception_addr, Gtemp);
1508    __ br_null_short(Gtemp, Assembler::pt, L);
1509    // Note: This could be handled more efficiently since we know that the native
1510    //       method doesn't have an exception handler. We could directly return
1511    //       to the exception handler for the caller.
1512    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
1513    __ should_not_reach_here();
1514    __ bind(L);
1515  }
1516
1517  // JVMTI support (preserves thread register)
1518  __ notify_method_exit(true, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1519
1520  if (synchronized) {
1521    // save and restore any potential method result value around the unlocking operation
1522    save_native_result();
1523
1524    __ add( __ top_most_monitor(), O1);
1525    __ unlock_object(O1);
1526
1527    restore_native_result();
1528  }
1529
1530  // dispose of return address and remove activation
1531#ifdef ASSERT
1532  { Label ok;
1533    __ cmp_and_brx_short(I5_savedSP, FP, Assembler::greaterEqualUnsigned, Assembler::pt, ok);
1534    __ stop("bad I5_savedSP value");
1535    __ should_not_reach_here();
1536    __ bind(ok);
1537  }
1538#endif
1539  __ jmp(Lscratch, 0);
1540  __ delayed()->nop();
1541
1542  if (inc_counter) {
1543    // handle invocation counter overflow
1544    __ bind(invocation_counter_overflow);
1545    generate_counter_overflow(Lcontinue);
1546  }
1547
1548  return entry;
1549}
1550
1551
1552// Generic method entry to (asm) interpreter
1553address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1554  address entry = __ pc();
1555
1556  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1557
1558  // the following temporary registers are used during frame creation
1559  const Register Gtmp1 = G3_scratch ;
1560  const Register Gtmp2 = G1_scratch;
1561
1562  // make sure registers are different!
1563  assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
1564
1565  const Address constMethod       (G5_method, Method::const_offset());
1566  // Seems like G5_method is live at the point this is used. So we could make this look consistent
1567  // and use in the asserts.
1568  const Address access_flags      (Lmethod,   Method::access_flags_offset());
1569
1570  const Register Glocals_size = G3;
1571  assert_different_registers(Glocals_size, G4_scratch, Gframe_size);
1572
1573  // make sure method is not native & not abstract
1574  // rethink these assertions - they can be simplified and shared (gri 2/25/2000)
1575#ifdef ASSERT
1576  __ ld(G5_method, Method::access_flags_offset(), Gtmp1);
1577  { Label L;
1578    __ btst(JVM_ACC_NATIVE, Gtmp1);
1579    __ br(Assembler::zero, false, Assembler::pt, L);
1580    __ delayed()->nop();
1581    __ stop("tried to execute native method as non-native");
1582    __ bind(L);
1583  }
1584  { Label L;
1585    __ btst(JVM_ACC_ABSTRACT, Gtmp1);
1586    __ br(Assembler::zero, false, Assembler::pt, L);
1587    __ delayed()->nop();
1588    __ stop("tried to execute abstract method as non-abstract");
1589    __ bind(L);
1590  }
1591#endif // ASSERT
1592
1593  // generate the code to allocate the interpreter stack frame
1594
1595  generate_fixed_frame(false);
1596
1597  //
1598  // Code to initialize the extra (i.e. non-parm) locals
1599  //
1600  Register init_value = noreg;    // will be G0 if we must clear locals
1601  // The way the code was setup before zerolocals was always true for vanilla java entries.
1602  // It could only be false for the specialized entries like accessor or empty which have
1603  // no extra locals so the testing was a waste of time and the extra locals were always
1604  // initialized. We removed this extra complication to already over complicated code.
1605
1606  init_value = G0;
1607  Label clear_loop;
1608
1609  const Register RconstMethod = O1;
1610  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1611  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
1612
1613  // NOTE: If you change the frame layout, this code will need to
1614  // be updated!
1615  __ ld_ptr( constMethod, RconstMethod );
1616  __ lduh( size_of_locals, O2 );
1617  __ lduh( size_of_parameters, O1 );
1618  __ sll( O2, Interpreter::logStackElementSize, O2);
1619  __ sll( O1, Interpreter::logStackElementSize, O1 );
1620  __ sub( Llocals, O2, O2 );
1621  __ sub( Llocals, O1, O1 );
1622
1623  __ bind( clear_loop );
1624  __ inc( O2, wordSize );
1625
1626  __ cmp( O2, O1 );
1627  __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, clear_loop );
1628  __ delayed()->st_ptr( init_value, O2, 0 );
1629
1630  const Address do_not_unlock_if_synchronized(G2_thread,
1631    JavaThread::do_not_unlock_if_synchronized_offset());
1632  // Since at this point in the method invocation the exception handler
1633  // would try to exit the monitor of synchronized methods which hasn't
1634  // been entered yet, we set the thread local variable
1635  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1636  // runtime, exception handling i.e. unlock_if_synchronized_method will
1637  // check this thread local flag.
1638  __ movbool(true, G3_scratch);
1639  __ stbool(G3_scratch, do_not_unlock_if_synchronized);
1640
1641  __ profile_parameters_type(G1_scratch, G3_scratch, G4_scratch, Lscratch);
1642  // increment invocation counter and check for overflow
1643  //
1644  // Note: checking for negative value instead of overflow
1645  //       so we have a 'sticky' overflow test (may be of
1646  //       importance as soon as we have true MT/MP)
1647  Label invocation_counter_overflow;
1648  Label profile_method;
1649  Label profile_method_continue;
1650  Label Lcontinue;
1651  if (inc_counter) {
1652    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1653    if (ProfileInterpreter) {
1654      __ bind(profile_method_continue);
1655    }
1656  }
1657  __ bind(Lcontinue);
1658
1659  bang_stack_shadow_pages(false);
1660
1661  // reset the _do_not_unlock_if_synchronized flag
1662  __ stbool(G0, do_not_unlock_if_synchronized);
1663
1664  // check for synchronized methods
1665  // Must happen AFTER invocation_counter check and stack overflow check,
1666  // so method is not locked if overflows.
1667
1668  if (synchronized) {
1669    lock_method();
1670  } else {
1671#ifdef ASSERT
1672    { Label ok;
1673      __ ld(access_flags, O0);
1674      __ btst(JVM_ACC_SYNCHRONIZED, O0);
1675      __ br( Assembler::zero, false, Assembler::pt, ok);
1676      __ delayed()->nop();
1677      __ stop("method needs synchronization");
1678      __ bind(ok);
1679    }
1680#endif // ASSERT
1681  }
1682
1683  // start execution
1684
1685  __ verify_thread();
1686
1687  // jvmti support
1688  __ notify_method_entry();
1689
1690  // start executing instructions
1691  __ dispatch_next(vtos);
1692
1693
1694  if (inc_counter) {
1695    if (ProfileInterpreter) {
1696      // We have decided to profile this method in the interpreter
1697      __ bind(profile_method);
1698
1699      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1700      __ set_method_data_pointer_for_bcp();
1701      __ ba_short(profile_method_continue);
1702    }
1703
1704    // handle invocation counter overflow
1705    __ bind(invocation_counter_overflow);
1706    generate_counter_overflow(Lcontinue);
1707  }
1708
1709  return entry;
1710}
1711
1712//----------------------------------------------------------------------------------------------------
1713// Exceptions
1714void TemplateInterpreterGenerator::generate_throw_exception() {
1715
1716  // Entry point in previous activation (i.e., if the caller was interpreted)
1717  Interpreter::_rethrow_exception_entry = __ pc();
1718  // O0: exception
1719
1720  // entry point for exceptions thrown within interpreter code
1721  Interpreter::_throw_exception_entry = __ pc();
1722  __ verify_thread();
1723  // expression stack is undefined here
1724  // O0: exception, i.e. Oexception
1725  // Lbcp: exception bcp
1726  __ verify_oop(Oexception);
1727
1728
1729  // expression stack must be empty before entering the VM in case of an exception
1730  __ empty_expression_stack();
1731  // find exception handler address and preserve exception oop
1732  // call C routine to find handler and jump to it
1733  __ call_VM(O1, CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception), Oexception);
1734  __ push_ptr(O1); // push exception for exception handler bytecodes
1735
1736  __ JMP(O0, 0); // jump to exception handler (may be remove activation entry!)
1737  __ delayed()->nop();
1738
1739
1740  // if the exception is not handled in the current frame
1741  // the frame is removed and the exception is rethrown
1742  // (i.e. exception continuation is _rethrow_exception)
1743  //
1744  // Note: At this point the bci is still the bxi for the instruction which caused
1745  //       the exception and the expression stack is empty. Thus, for any VM calls
1746  //       at this point, GC will find a legal oop map (with empty expression stack).
1747
1748  // in current activation
1749  // tos: exception
1750  // Lbcp: exception bcp
1751
1752  //
1753  // JVMTI PopFrame support
1754  //
1755
1756  Interpreter::_remove_activation_preserving_args_entry = __ pc();
1757  Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1758  // Set the popframe_processing bit in popframe_condition indicating that we are
1759  // currently handling popframe, so that call_VMs that may happen later do not trigger new
1760  // popframe handling cycles.
1761
1762  __ ld(popframe_condition_addr, G3_scratch);
1763  __ or3(G3_scratch, JavaThread::popframe_processing_bit, G3_scratch);
1764  __ stw(G3_scratch, popframe_condition_addr);
1765
1766  // Empty the expression stack, as in normal exception handling
1767  __ empty_expression_stack();
1768  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false, /* install_monitor_exception */ false);
1769
1770  {
1771    // Check to see whether we are returning to a deoptimized frame.
1772    // (The PopFrame call ensures that the caller of the popped frame is
1773    // either interpreted or compiled and deoptimizes it if compiled.)
1774    // In this case, we can't call dispatch_next() after the frame is
1775    // popped, but instead must save the incoming arguments and restore
1776    // them after deoptimization has occurred.
1777    //
1778    // Note that we don't compare the return PC against the
1779    // deoptimization blob's unpack entry because of the presence of
1780    // adapter frames in C2.
1781    Label caller_not_deoptimized;
1782    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), I7);
1783    __ br_notnull_short(O0, Assembler::pt, caller_not_deoptimized);
1784
1785    const Register Gtmp1 = G3_scratch;
1786    const Register Gtmp2 = G1_scratch;
1787    const Register RconstMethod = Gtmp1;
1788    const Address constMethod(Lmethod, Method::const_offset());
1789    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
1790
1791    // Compute size of arguments for saving when returning to deoptimized caller
1792    __ ld_ptr(constMethod, RconstMethod);
1793    __ lduh(size_of_parameters, Gtmp1);
1794    __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
1795    __ sub(Llocals, Gtmp1, Gtmp2);
1796    __ add(Gtmp2, wordSize, Gtmp2);
1797    // Save these arguments
1798    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), G2_thread, Gtmp1, Gtmp2);
1799    // Inform deoptimization that it is responsible for restoring these arguments
1800    __ set(JavaThread::popframe_force_deopt_reexecution_bit, Gtmp1);
1801    Address popframe_condition_addr(G2_thread, JavaThread::popframe_condition_offset());
1802    __ st(Gtmp1, popframe_condition_addr);
1803
1804    // Return from the current method
1805    // The caller's SP was adjusted upon method entry to accomodate
1806    // the callee's non-argument locals. Undo that adjustment.
1807    __ ret();
1808    __ delayed()->restore(I5_savedSP, G0, SP);
1809
1810    __ bind(caller_not_deoptimized);
1811  }
1812
1813  // Clear the popframe condition flag
1814  __ stw(G0 /* popframe_inactive */, popframe_condition_addr);
1815
1816  // Get out of the current method (how this is done depends on the particular compiler calling
1817  // convention that the interpreter currently follows)
1818  // The caller's SP was adjusted upon method entry to accomodate
1819  // the callee's non-argument locals. Undo that adjustment.
1820  __ restore(I5_savedSP, G0, SP);
1821  // The method data pointer was incremented already during
1822  // call profiling. We have to restore the mdp for the current bcp.
1823  if (ProfileInterpreter) {
1824    __ set_method_data_pointer_for_bcp();
1825  }
1826
1827#if INCLUDE_JVMTI
1828  { Label L_done;
1829
1830    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
1831    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
1832
1833    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1834    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1835
1836    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
1837
1838    __ br_null(G1_scratch, false, Assembler::pn, L_done);
1839    __ delayed()->nop();
1840
1841    __ st_ptr(G1_scratch, Lesp, wordSize);
1842    __ bind(L_done);
1843  }
1844#endif // INCLUDE_JVMTI
1845
1846  // Resume bytecode interpretation at the current bcp
1847  __ dispatch_next(vtos);
1848  // end of JVMTI PopFrame support
1849
1850  Interpreter::_remove_activation_entry = __ pc();
1851
1852  // preserve exception over this code sequence (remove activation calls the vm, but oopmaps are not correct here)
1853  __ pop_ptr(Oexception);                                  // get exception
1854
1855  // Intel has the following comment:
1856  //// remove the activation (without doing throws on illegalMonitorExceptions)
1857  // They remove the activation without checking for bad monitor state.
1858  // %%% We should make sure this is the right semantics before implementing.
1859
1860  __ set_vm_result(Oexception);
1861  __ unlock_if_synchronized_method(vtos, /* throw_monitor_exception */ false);
1862
1863  __ notify_method_exit(false, vtos, InterpreterMacroAssembler::SkipNotifyJVMTI);
1864
1865  __ get_vm_result(Oexception);
1866  __ verify_oop(Oexception);
1867
1868  const int return_reg_adjustment = frame::pc_return_offset;
1869  Address issuing_pc_addr(I7, return_reg_adjustment);
1870
1871  // We are done with this activation frame; find out where to go next.
1872  // The continuation point will be an exception handler, which expects
1873  // the following registers set up:
1874  //
1875  // Oexception: exception
1876  // Oissuing_pc: the local call that threw exception
1877  // Other On: garbage
1878  // In/Ln:  the contents of the caller's register window
1879  //
1880  // We do the required restore at the last possible moment, because we
1881  // need to preserve some state across a runtime call.
1882  // (Remember that the caller activation is unknown--it might not be
1883  // interpreted, so things like Lscratch are useless in the caller.)
1884
1885  // Although the Intel version uses call_C, we can use the more
1886  // compact call_VM.  (The only real difference on SPARC is a
1887  // harmlessly ignored [re]set_last_Java_frame, compared with
1888  // the Intel code which lacks this.)
1889  __ mov(Oexception,      Oexception ->after_save());  // get exception in I0 so it will be on O0 after restore
1890  __ add(issuing_pc_addr, Oissuing_pc->after_save());  // likewise set I1 to a value local to the caller
1891  __ super_call_VM_leaf(L7_thread_cache,
1892                        CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address),
1893                        G2_thread, Oissuing_pc->after_save());
1894
1895  // The caller's SP was adjusted upon method entry to accomodate
1896  // the callee's non-argument locals. Undo that adjustment.
1897  __ JMP(O0, 0);                         // return exception handler in caller
1898  __ delayed()->restore(I5_savedSP, G0, SP);
1899
1900  // (same old exception object is already in Oexception; see above)
1901  // Note that an "issuing PC" is actually the next PC after the call
1902}
1903
1904
1905//
1906// JVMTI ForceEarlyReturn support
1907//
1908
1909address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1910  address entry = __ pc();
1911
1912  __ empty_expression_stack();
1913  __ load_earlyret_value(state);
1914
1915  __ ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), G3_scratch);
1916  Address cond_addr(G3_scratch, JvmtiThreadState::earlyret_state_offset());
1917
1918  // Clear the earlyret state
1919  __ stw(G0 /* JvmtiThreadState::earlyret_inactive */, cond_addr);
1920
1921  __ remove_activation(state,
1922                       /* throw_monitor_exception */ false,
1923                       /* install_monitor_exception */ false);
1924
1925  // The caller's SP was adjusted upon method entry to accomodate
1926  // the callee's non-argument locals. Undo that adjustment.
1927  __ ret();                             // return to caller
1928  __ delayed()->restore(I5_savedSP, G0, SP);
1929
1930  return entry;
1931} // end of JVMTI ForceEarlyReturn support
1932
1933
1934//------------------------------------------------------------------------------------------------------------------------
1935// Helper for vtos entry point generation
1936
1937void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep) {
1938  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1939  Label L;
1940  aep = __ pc(); __ push_ptr(); __ ba_short(L);
1941  fep = __ pc(); __ push_f();   __ ba_short(L);
1942  dep = __ pc(); __ push_d();   __ ba_short(L);
1943  lep = __ pc(); __ push_l();   __ ba_short(L);
1944  iep = __ pc(); __ push_i();
1945  bep = cep = sep = iep;                        // there aren't any
1946  vep = __ pc(); __ bind(L);                    // fall through
1947  generate_and_dispatch(t);
1948}
1949
1950// --------------------------------------------------------------------------------
1951
1952// Non-product code
1953#ifndef PRODUCT
1954address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1955  address entry = __ pc();
1956
1957  __ push(state);
1958  __ mov(O7, Lscratch); // protect return address within interpreter
1959
1960  // Pass a 0 (not used in sparc) and the top of stack to the bytecode tracer
1961  __ mov( Otos_l2, G3_scratch );
1962  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), G0, Otos_l1, G3_scratch);
1963  __ mov(Lscratch, O7); // restore return address
1964  __ pop(state);
1965  __ retl();
1966  __ delayed()->nop();
1967
1968  return entry;
1969}
1970
1971
1972// helpers for generate_and_dispatch
1973
1974void TemplateInterpreterGenerator::count_bytecode() {
1975  __ inc_counter(&BytecodeCounter::_counter_value, G3_scratch, G4_scratch);
1976}
1977
1978
1979void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1980  __ inc_counter(&BytecodeHistogram::_counters[t->bytecode()], G3_scratch, G4_scratch);
1981}
1982
1983
1984void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1985  AddressLiteral index   (&BytecodePairHistogram::_index);
1986  AddressLiteral counters((address) &BytecodePairHistogram::_counters);
1987
1988  // get index, shift out old bytecode, bring in new bytecode, and store it
1989  // _index = (_index >> log2_number_of_codes) |
1990  //          (bytecode << log2_number_of_codes);
1991
1992  __ load_contents(index, G4_scratch);
1993  __ srl( G4_scratch, BytecodePairHistogram::log2_number_of_codes, G4_scratch );
1994  __ set( ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes,  G3_scratch );
1995  __ or3( G3_scratch,  G4_scratch, G4_scratch );
1996  __ store_contents(G4_scratch, index, G3_scratch);
1997
1998  // bump bucket contents
1999  // _counters[_index] ++;
2000
2001  __ set(counters, G3_scratch);                       // loads into G3_scratch
2002  __ sll( G4_scratch, LogBytesPerWord, G4_scratch );  // Index is word address
2003  __ add (G3_scratch, G4_scratch, G3_scratch);        // Add in index
2004  __ ld (G3_scratch, 0, G4_scratch);
2005  __ inc (G4_scratch);
2006  __ st (G4_scratch, 0, G3_scratch);
2007}
2008
2009
2010void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2011  // Call a little run-time stub to avoid blow-up for each bytecode.
2012  // The run-time runtime saves the right registers, depending on
2013  // the tosca in-state for the given template.
2014  address entry = Interpreter::trace_code(t->tos_in());
2015  guarantee(entry != NULL, "entry must have been generated");
2016  __ call(entry, relocInfo::none);
2017  __ delayed()->nop();
2018}
2019
2020
2021void TemplateInterpreterGenerator::stop_interpreter_at() {
2022  AddressLiteral counter(&BytecodeCounter::_counter_value);
2023  __ load_contents(counter, G3_scratch);
2024  AddressLiteral stop_at(&StopInterpreterAt);
2025  __ load_ptr_contents(stop_at, G4_scratch);
2026  __ cmp(G3_scratch, G4_scratch);
2027  __ breakpoint_trap(Assembler::equal, Assembler::icc);
2028}
2029#endif // not PRODUCT
2030