templateInterpreterGenerator_x86.cpp revision 9867:3125c4a60cc9
195967Speter/*
2238405Sjkim * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
395967Speter * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4238405Sjkim *
5238405Sjkim * This code is free software; you can redistribute it and/or modify it
6238405Sjkim * under the terms of the GNU General Public License version 2 only, as
795967Speter * published by the Free Software Foundation.
8238405Sjkim *
995967Speter * This code is distributed in the hope that it will be useful, but WITHOUT
1095967Speter * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
1195967Speter * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12238405Sjkim * version 2 for more details (a copy is included in the LICENSE file that
13238405Sjkim * accompanied this code).
1495967Speter *
1595967Speter * You should have received a copy of the GNU General Public License version
16127326Smarkm * 2 along with this work; if not, write to the Free Software Foundation,
17238405Sjkim * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18238405Sjkim *
19127326Smarkm * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20238405Sjkim * or visit www.oracle.com if you need additional information or have any
2195967Speter * questions.
22238405Sjkim *
23127326Smarkm */
24238405Sjkim
25238405Sjkim#include "precompiled.hpp"
26238405Sjkim#include "asm/macroAssembler.hpp"
27238405Sjkim#include "interpreter/bytecodeHistogram.hpp"
28238405Sjkim#include "interpreter/interpreter.hpp"
29238405Sjkim#include "interpreter/interpreterGenerator.hpp"
30238405Sjkim#include "interpreter/interpreterRuntime.hpp"
31238405Sjkim#include "interpreter/interp_masm.hpp"
32238405Sjkim#include "interpreter/templateTable.hpp"
33238405Sjkim#include "oops/arrayOop.hpp"
34238405Sjkim#include "oops/methodData.hpp"
35238405Sjkim#include "oops/method.hpp"
3695967Speter#include "oops/oop.inline.hpp"
3795967Speter#include "prims/jvmtiExport.hpp"
38238405Sjkim#include "prims/jvmtiThreadState.hpp"
3995967Speter#include "runtime/arguments.hpp"
40238405Sjkim#include "runtime/deoptimization.hpp"
4195967Speter#include "runtime/frame.inline.hpp"
42238405Sjkim#include "runtime/sharedRuntime.hpp"
43238405Sjkim#include "runtime/stubRoutines.hpp"
44127326Smarkm#include "runtime/synchronizer.hpp"
45238405Sjkim#include "runtime/timer.hpp"
46238405Sjkim#include "runtime/vframeArray.hpp"
47238405Sjkim#include "utilities/debug.hpp"
48238405Sjkim#include "utilities/macros.hpp"
49238405Sjkim
50238405Sjkim#define __ _masm->
51238405Sjkim
52238405Sjkim#ifndef CC_INTERP
53238405Sjkim
54238405Sjkim// Global Register Names
55238405Sjkimstatic const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
56238405Sjkimstatic const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
5795967Speter
5895967Speterconst int method_offset = frame::interpreter_frame_method_offset * wordSize;
59238405Sjkimconst int bcp_offset    = frame::interpreter_frame_bcp_offset    * wordSize;
6095967Speterconst int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
61238405Sjkim
6295967Speter//-----------------------------------------------------------------------------
63238405Sjkim
64238405Sjkimaddress TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
65127326Smarkm  address entry = __ pc();
66238405Sjkim
67238405Sjkim#ifdef ASSERT
68238405Sjkim  {
69238405Sjkim    Label L;
70238405Sjkim    __ lea(rax, Address(rbp,
71238405Sjkim                        frame::interpreter_frame_monitor_block_top_offset *
72238405Sjkim                        wordSize));
73238405Sjkim    __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
74238405Sjkim                         // grows negative)
75238405Sjkim    __ jcc(Assembler::aboveEqual, L); // check if frame is complete
76238405Sjkim    __ stop ("interpreter frame not set up");
77238405Sjkim    __ bind(L);
7895967Speter  }
7995967Speter#endif // ASSERT
80238405Sjkim  // Restore bcp under the assumption that the current frame is still
8195967Speter  // interpreted
82238405Sjkim  __ restore_bcp();
8395967Speter
84238405Sjkim  // expression stack must be empty before entering the VM if an
85238405Sjkim  // exception happened
86127326Smarkm  __ empty_expression_stack();
87238405Sjkim  // throw exception
88238405Sjkim  __ call_VM(noreg,
89238405Sjkim             CAST_FROM_FN_PTR(address,
90238405Sjkim                              InterpreterRuntime::throw_StackOverflowError));
91238405Sjkim  return entry;
92238405Sjkim}
93238405Sjkim
94238405Sjkimaddress TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(
95238405Sjkim        const char* name) {
96238405Sjkim  address entry = __ pc();
97238405Sjkim  // expression stack must be empty before entering the VM if an
98238405Sjkim  // exception happened
9995967Speter  __ empty_expression_stack();
10095967Speter  // setup parameters
101238405Sjkim  // ??? convention: expect aberrant index in register ebx
10295967Speter  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
103238405Sjkim  __ lea(rarg, ExternalAddress((address)name));
10495967Speter  __ call_VM(noreg,
105238405Sjkim             CAST_FROM_FN_PTR(address,
106238405Sjkim                              InterpreterRuntime::
107127326Smarkm                              throw_ArrayIndexOutOfBoundsException),
108238405Sjkim             rarg, rbx);
109238405Sjkim  return entry;
110238405Sjkim}
111238405Sjkim
112238405Sjkimaddress TemplateInterpreterGenerator::generate_ClassCastException_handler() {
113238405Sjkim  address entry = __ pc();
114238405Sjkim
115238405Sjkim  // object is at TOS
116238405Sjkim  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
117238405Sjkim  __ pop(rarg);
118238405Sjkim
119238405Sjkim  // expression stack must be empty before entering the VM if an
12095967Speter  // exception happened
12195967Speter  __ empty_expression_stack();
122238405Sjkim
12395967Speter  __ call_VM(noreg,
124238405Sjkim             CAST_FROM_FN_PTR(address,
12595967Speter                              InterpreterRuntime::
126238405Sjkim                              throw_ClassCastException),
127238405Sjkim             rarg);
128127326Smarkm  return entry;
129238405Sjkim}
130238405Sjkim
131238405Sjkimaddress TemplateInterpreterGenerator::generate_exception_handler_common(
132238405Sjkim        const char* name, const char* message, bool pass_oop) {
133238405Sjkim  assert(!pass_oop || message == NULL, "either oop or message but not both");
134238405Sjkim  address entry = __ pc();
135238405Sjkim
136238405Sjkim  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
137238405Sjkim  Register rarg2 = NOT_LP64(rbx) LP64_ONLY(c_rarg2);
138238405Sjkim
139238405Sjkim  if (pass_oop) {
140238405Sjkim    // object is at TOS
14195967Speter    __ pop(rarg2);
14295967Speter  }
143238405Sjkim  // expression stack must be empty before entering the VM if an
14495967Speter  // exception happened
145238405Sjkim  __ empty_expression_stack();
14695967Speter  // setup parameters
147238405Sjkim  __ lea(rarg, ExternalAddress((address)name));
148238405Sjkim  if (pass_oop) {
149127326Smarkm    __ call_VM(rax, CAST_FROM_FN_PTR(address,
150238405Sjkim                                     InterpreterRuntime::
151238405Sjkim                                     create_klass_exception),
152238405Sjkim               rarg, rarg2);
153238405Sjkim  } else {
154238405Sjkim    // kind of lame ExternalAddress can't take NULL because
155238405Sjkim    // external_word_Relocation will assert.
156238405Sjkim    if (message != NULL) {
157238405Sjkim      __ lea(rarg2, ExternalAddress((address)message));
158238405Sjkim    } else {
159238405Sjkim      __ movptr(rarg2, NULL_WORD);
160238405Sjkim    }
161238405Sjkim    __ call_VM(rax,
16295967Speter               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
16395967Speter               rarg, rarg2);
164238405Sjkim  }
16595967Speter  // throw exception
166238405Sjkim  __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
16795967Speter  return entry;
168238405Sjkim}
169238405Sjkim
170127326Smarkm
171238405Sjkimaddress TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
172238405Sjkim  address entry = __ pc();
173238405Sjkim  // NULL last_sp until next java call
174238405Sjkim  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
175238405Sjkim  __ dispatch_next(state);
176238405Sjkim  return entry;
177238405Sjkim}
178238405Sjkim
179238405Sjkim
180238405Sjkimaddress TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, size_t index_size) {
181238405Sjkim  address entry = __ pc();
182238405Sjkim
18395967Speter#ifndef _LP64
18495967Speter#ifdef COMPILER2
185238405Sjkim  // The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
18695967Speter  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
187238405Sjkim    for (int i = 1; i < 8; i++) {
18895967Speter        __ ffree(i);
189238405Sjkim    }
190238405Sjkim  } else if (UseSSE < 2) {
191127326Smarkm    __ empty_FPU_stack();
192238405Sjkim  }
193238405Sjkim#endif // COMPILER2
194238405Sjkim  if ((state == ftos && UseSSE < 1) || (state == dtos && UseSSE < 2)) {
195238405Sjkim    __ MacroAssembler::verify_FPU(1, "generate_return_entry_for compiled");
196238405Sjkim  } else {
197238405Sjkim    __ MacroAssembler::verify_FPU(0, "generate_return_entry_for compiled");
198238405Sjkim  }
199238405Sjkim
200238405Sjkim  if (state == ftos) {
201238405Sjkim    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_return_entry_for in interpreter");
202238405Sjkim  } else if (state == dtos) {
203238405Sjkim    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_return_entry_for in interpreter");
20495967Speter  }
20595967Speter#endif // _LP64
206238405Sjkim
20795967Speter  // Restore stack bottom in case i2c adjusted stack
208238405Sjkim  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
20995967Speter  // and NULL it as marker that esp is now tos until next java call
210238405Sjkim  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
211238405Sjkim
212127326Smarkm  __ restore_bcp();
213238405Sjkim  __ restore_locals();
214238405Sjkim
215238405Sjkim  if (state == atos) {
216238405Sjkim    Register mdp = rbx;
217238405Sjkim    Register tmp = rcx;
218238405Sjkim    __ profile_return_type(mdp, rax, tmp);
219238405Sjkim  }
220238405Sjkim
221238405Sjkim  const Register cache = rbx;
222238405Sjkim  const Register index = rcx;
223238405Sjkim  __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
224238405Sjkim
22595967Speter  const Register flags = cache;
22695967Speter  __ movl(flags, Address(cache, index, Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
227238405Sjkim  __ andl(flags, ConstantPoolCacheEntry::parameter_size_mask);
22895967Speter  __ lea(rsp, Address(rsp, flags, Interpreter::stackElementScale()));
229238405Sjkim  __ dispatch_next(state, step);
23095967Speter
231238405Sjkim  return entry;
232238405Sjkim}
233127326Smarkm
234238405Sjkim
235238405Sjkimaddress TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, int step) {
236238405Sjkim  address entry = __ pc();
237238405Sjkim
238238405Sjkim#ifndef _LP64
239238405Sjkim  if (state == ftos) {
240238405Sjkim    __ MacroAssembler::verify_FPU(UseSSE >= 1 ? 0 : 1, "generate_deopt_entry_for in interpreter");
241238405Sjkim  } else if (state == dtos) {
242238405Sjkim    __ MacroAssembler::verify_FPU(UseSSE >= 2 ? 0 : 1, "generate_deopt_entry_for in interpreter");
243238405Sjkim  }
244238405Sjkim#endif // _LP64
245238405Sjkim
24695967Speter  // NULL last_sp until next java call
24795967Speter  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
248238405Sjkim  __ restore_bcp();
24995967Speter  __ restore_locals();
250238405Sjkim  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
25195967Speter  NOT_LP64(__ get_thread(thread));
252238405Sjkim#if INCLUDE_JVMCI
253238405Sjkim  // Check if we need to take lock at entry of synchronized method.
254127326Smarkm  if (UseJVMCICompiler) {
255238405Sjkim    Label L;
256238405Sjkim    __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
257238405Sjkim    __ jcc(Assembler::zero, L);
258238405Sjkim    // Clear flag.
259238405Sjkim    __ movb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
260238405Sjkim    // Satisfy calling convention for lock_method().
261238405Sjkim    __ get_method(rbx);
262238405Sjkim    // Take lock.
263238405Sjkim    lock_method();
264238405Sjkim    __ bind(L);
265238405Sjkim  }
266238405Sjkim#endif
26795967Speter  // handle exceptions
26895967Speter  {
269238405Sjkim    Label L;
27095967Speter    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
271238405Sjkim    __ jcc(Assembler::zero, L);
27295967Speter    __ call_VM(noreg,
273238405Sjkim               CAST_FROM_FN_PTR(address,
274238405Sjkim                                InterpreterRuntime::throw_pending_exception));
275127326Smarkm    __ should_not_reach_here();
27695967Speter    __ bind(L);
277238405Sjkim  }
27895967Speter  __ dispatch_next(state, step);
279127326Smarkm  return entry;
280238405Sjkim}
281238405Sjkim
282238405Sjkimaddress TemplateInterpreterGenerator::generate_result_handler_for(
283238405Sjkim        BasicType type) {
284238405Sjkim  address entry = __ pc();
285238405Sjkim  switch (type) {
286238405Sjkim  case T_BOOLEAN: __ c2bool(rax);            break;
287238405Sjkim#ifndef _LP64
288238405Sjkim  case T_CHAR   : __ andptr(rax, 0xFFFF);    break;
289238405Sjkim#else
290238405Sjkim  case T_CHAR   : __ movzwl(rax, rax);       break;
291238405Sjkim#endif // _LP64
29295967Speter  case T_BYTE   : __ sign_extend_byte(rax);  break;
29395967Speter  case T_SHORT  : __ sign_extend_short(rax); break;
294238405Sjkim  case T_INT    : /* nothing to do */        break;
29595967Speter  case T_LONG   : /* nothing to do */        break;
296238405Sjkim  case T_VOID   : /* nothing to do */        break;
29795967Speter#ifndef _LP64
298238405Sjkim  case T_DOUBLE :
299238405Sjkim  case T_FLOAT  :
300127326Smarkm    { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
301238405Sjkim      __ pop(t);                            // remove return address first
302238405Sjkim      // Must return a result for interpreter or compiler. In SSE
303238405Sjkim      // mode, results are returned in xmm0 and the FPU stack must
304238405Sjkim      // be empty.
305238405Sjkim      if (type == T_FLOAT && UseSSE >= 1) {
306238405Sjkim        // Load ST0
307238405Sjkim        __ fld_d(Address(rsp, 0));
308238405Sjkim        // Store as float and empty fpu stack
309238405Sjkim        __ fstp_s(Address(rsp, 0));
310238405Sjkim        // and reload
311238405Sjkim        __ movflt(xmm0, Address(rsp, 0));
312238405Sjkim      } else if (type == T_DOUBLE && UseSSE >= 2 ) {
31395967Speter        __ movdbl(xmm0, Address(rsp, 0));
31495967Speter      } else {
315238405Sjkim        // restore ST0
31695967Speter        __ fld_d(Address(rsp, 0));
317238405Sjkim      }
31895967Speter      // and pop the temp
319238405Sjkim      __ addptr(rsp, 2 * wordSize);
320238405Sjkim      __ push(t);                           // restore return address
321127326Smarkm    }
322238405Sjkim    break;
323238405Sjkim#else
324238405Sjkim  case T_FLOAT  : /* nothing to do */        break;
325238405Sjkim  case T_DOUBLE : /* nothing to do */        break;
326238405Sjkim#endif // _LP64
327238405Sjkim
328238405Sjkim  case T_OBJECT :
329238405Sjkim    // retrieve result from frame
330238405Sjkim    __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
331238405Sjkim    // and verify it
332238405Sjkim    __ verify_oop(rax);
333238405Sjkim    break;
33495967Speter  default       : ShouldNotReachHere();
33595967Speter  }
336238405Sjkim  __ ret(0);                                   // return from result handler
33795967Speter  return entry;
338238405Sjkim}
33995967Speter
340238405Sjkimaddress TemplateInterpreterGenerator::generate_safept_entry_for(
341238405Sjkim        TosState state,
342127326Smarkm        address runtime_entry) {
343238405Sjkim  address entry = __ pc();
344238405Sjkim  __ push(state);
345238405Sjkim  __ call_VM(noreg, runtime_entry);
346238405Sjkim  __ dispatch_via(vtos, Interpreter::_normal_table.table_for(vtos));
347238405Sjkim  return entry;
348238405Sjkim}
349238405Sjkim
350238405Sjkim
351238405Sjkim
352238405Sjkim// Helpers for commoning out cases in the various type of method entries.
353238405Sjkim//
354238405Sjkim
35595967Speter
35695967Speter// increment invocation count & check for overflow
357238405Sjkim//
35895967Speter// Note: checking for negative value instead of overflow
359238405Sjkim//       so we have a 'sticky' overflow test
36095967Speter//
361238405Sjkim// rbx: method
362238405Sjkim// rcx: invocation counter
36395967Speter//
36495967Spetervoid InterpreterGenerator::generate_counter_incr(
365238405Sjkim        Label* overflow,
366238405Sjkim        Label* profile_method,
367238405Sjkim        Label* profile_method_continue) {
36895967Speter  Label done;
36995967Speter  // Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
37095967Speter  if (TieredCompilation) {
37195967Speter    int increment = InvocationCounter::count_increment;
37295967Speter    Label no_mdo;
373238405Sjkim    if (ProfileInterpreter) {
374238405Sjkim      // Are we profiling?
375238405Sjkim      __ movptr(rax, Address(rbx, Method::method_data_offset()));
376238405Sjkim      __ testptr(rax, rax);
37795967Speter      __ jccb(Assembler::zero, no_mdo);
378238405Sjkim      // Increment counter in the MDO
37995967Speter      const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
38095967Speter                                                in_bytes(InvocationCounter::counter_offset()));
38195967Speter      const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
382238405Sjkim      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
383238405Sjkim      __ jmp(done);
38495967Speter    }
38595967Speter    __ bind(no_mdo);
386127326Smarkm    // Increment counter in MethodCounters
387238405Sjkim    const Address invocation_counter(rax,
388238405Sjkim                  MethodCounters::invocation_counter_offset() +
389127326Smarkm                  InvocationCounter::counter_offset());
390238405Sjkim    __ get_method_counters(rbx, rax, done);
391238405Sjkim    const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
39295967Speter    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
393238405Sjkim                               false, Assembler::zero, overflow);
394127326Smarkm    __ bind(done);
395238405Sjkim  } else { // not TieredCompilation
396238405Sjkim    const Address backedge_counter(rax,
397238405Sjkim                  MethodCounters::backedge_counter_offset() +
398238405Sjkim                  InvocationCounter::counter_offset());
399238405Sjkim    const Address invocation_counter(rax,
400238405Sjkim                  MethodCounters::invocation_counter_offset() +
401238405Sjkim                  InvocationCounter::counter_offset());
402238405Sjkim
403238405Sjkim    __ get_method_counters(rbx, rax, done);
404238405Sjkim
405238405Sjkim    if (ProfileInterpreter) {
406238405Sjkim      __ incrementl(Address(rax,
40795967Speter              MethodCounters::interpreter_invocation_counter_offset()));
40895967Speter    }
409238405Sjkim    // Update standard invocation counters
41095967Speter    __ movl(rcx, invocation_counter);
411238405Sjkim    __ incrementl(rcx, InvocationCounter::count_increment);
41295967Speter    __ movl(invocation_counter, rcx); // save invocation count
413238405Sjkim
414238405Sjkim    __ movl(rax, backedge_counter);   // load backedge counter
415127326Smarkm    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
416238405Sjkim
417238405Sjkim    __ addl(rcx, rax);                // add both counters
418238405Sjkim
419238405Sjkim    // profile_method is non-null only for interpreted method so
420238405Sjkim    // profile_method != NULL == !native_call
421238405Sjkim
422238405Sjkim    if (ProfileInterpreter && profile_method != NULL) {
423238405Sjkim      // Test to see if we should create a method data oop
424238405Sjkim      __ movptr(rax, Address(rbx, Method::method_counters_offset()));
425238405Sjkim      __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
426238405Sjkim      __ jcc(Assembler::less, *profile_method_continue);
427238405Sjkim
42895967Speter      // if no method data exists, go to profile_method
42995967Speter      __ test_method_data_pointer(rax, *profile_method);
430238405Sjkim    }
43195967Speter
432238405Sjkim    __ movptr(rax, Address(rbx, Method::method_counters_offset()));
43395967Speter    __ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
434238405Sjkim    __ jcc(Assembler::aboveEqual, *overflow);
435238405Sjkim    __ bind(done);
436127326Smarkm  }
437238405Sjkim}
438238405Sjkim
439238405Sjkimvoid InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
440238405Sjkim
441238405Sjkim  // Asm interpreter on entry
442238405Sjkim  // r14/rdi - locals
443238405Sjkim  // r13/rsi - bcp
444238405Sjkim  // rbx - method
445238405Sjkim  // rdx - cpool --- DOES NOT APPEAR TO BE TRUE
446238405Sjkim  // rbp - interpreter frame
447238405Sjkim
448238405Sjkim  // On return (i.e. jump to entry_point) [ back to invocation of interpreter ]
44995967Speter  // Everything as it was on entry
45095967Speter  // rdx is not restored. Doesn't appear to really be set.
451238405Sjkim
45295967Speter  // InterpreterRuntime::frequency_counter_overflow takes two
453238405Sjkim  // arguments, the first (thread) is passed by call_VM, the second
45495967Speter  // indicates if the counter overflow occurs at a backwards branch
455238405Sjkim  // (NULL bcp).  We pass zero for it.  The call returns the address
456238405Sjkim  // of the verified entry point for the method or NULL if the
457127326Smarkm  // compilation did not complete (either went background or bailed
458238405Sjkim  // out).
459238405Sjkim  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
460238405Sjkim  __ movl(rarg, 0);
461238405Sjkim  __ call_VM(noreg,
462238405Sjkim             CAST_FROM_FN_PTR(address,
463238405Sjkim                              InterpreterRuntime::frequency_counter_overflow),
464238405Sjkim             rarg);
465238405Sjkim
466238405Sjkim  __ movptr(rbx, Address(rbp, method_offset));   // restore Method*
467238405Sjkim  // Preserve invariant that r13/r14 contain bcp/locals of sender frame
468238405Sjkim  // and jump to the interpreted entry.
469238405Sjkim  __ jmp(*do_continue, relocInfo::none);
47095967Speter}
47195967Speter
472238405Sjkim// See if we've got enough room on the stack for locals plus overhead.
47395967Speter// The expression stack grows down incrementally, so the normal guard
474238405Sjkim// page mechanism will work for that.
47595967Speter//
476238405Sjkim// NOTE: Since the additional locals are also always pushed (wasn't
477238405Sjkim// obvious in generate_fixed_frame) so the guard should work for them
47895967Speter// too.
479127326Smarkm//
480238405Sjkim// Args:
481238405Sjkim//      rdx: number of additional locals this frame needs (what we must check)
482238405Sjkim//      rbx: Method*
483238405Sjkim//
484238405Sjkim// Kills:
485238405Sjkim//      rax
486238405Sjkimvoid InterpreterGenerator::generate_stack_overflow_check(void) {
487238405Sjkim
488238405Sjkim  // monitor entry size: see picture of stack in frame_x86.hpp
489238405Sjkim  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
490238405Sjkim
491238405Sjkim  // total overhead size: entry_size + (saved rbp through expr stack
49295967Speter  // bottom).  be sure to change this if you add/subtract anything
49395967Speter  // to/from the overhead area
494238405Sjkim  const int overhead_size =
49595967Speter    -(frame::interpreter_frame_initial_sp_offset * wordSize) + entry_size;
496238405Sjkim
49795967Speter  const int page_size = os::vm_page_size();
498238405Sjkim
499238405Sjkim  Label after_frame_check;
500127326Smarkm
501238405Sjkim  // see if the frame is greater than one page in size. If so,
502238405Sjkim  // then we need to verify there is enough stack space remaining
503238405Sjkim  // for the additional locals.
504238405Sjkim  __ cmpl(rdx, (page_size - overhead_size) / Interpreter::stackElementSize);
505238405Sjkim  __ jcc(Assembler::belowEqual, after_frame_check);
506238405Sjkim
507238405Sjkim  // compute rsp as if this were going to be the last frame on
508238405Sjkim  // the stack before the red zone
509238405Sjkim
510238405Sjkim  Label after_frame_check_pop;
511238405Sjkim  const Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
512238405Sjkim#ifndef _LP64
51395967Speter  __ push(thread);
51495967Speter  __ get_thread(thread);
515238405Sjkim#endif
51695967Speter
517238405Sjkim  const Address stack_base(thread, Thread::stack_base_offset());
51895967Speter  const Address stack_size(thread, Thread::stack_size_offset());
519238405Sjkim
520238405Sjkim  // locals + overhead, in bytes
521127326Smarkm  __ mov(rax, rdx);
522238405Sjkim  __ shlptr(rax, Interpreter::logStackElementSize);  // 2 slots per parameter.
523238405Sjkim  __ addptr(rax, overhead_size);
524238405Sjkim
525238405Sjkim#ifdef ASSERT
526238405Sjkim  Label stack_base_okay, stack_size_okay;
527238405Sjkim  // verify that thread stack base is non-zero
528238405Sjkim  __ cmpptr(stack_base, (int32_t)NULL_WORD);
529238405Sjkim  __ jcc(Assembler::notEqual, stack_base_okay);
530238405Sjkim  __ stop("stack base is zero");
531238405Sjkim  __ bind(stack_base_okay);
532238405Sjkim  // verify that thread stack size is non-zero
533238405Sjkim  __ cmpptr(stack_size, 0);
53495967Speter  __ jcc(Assembler::notEqual, stack_size_okay);
53595967Speter  __ stop("stack size is zero");
536238405Sjkim  __ bind(stack_size_okay);
53795967Speter#endif
538238405Sjkim
53995967Speter  // Add stack base to locals and subtract stack size
540238405Sjkim  __ addptr(rax, stack_base);
541238405Sjkim  __ subptr(rax, stack_size);
542127326Smarkm
543238405Sjkim  // Use the bigger size for banging.
544238405Sjkim  const int max_bang_size = (int)MAX2(JavaThread::stack_shadow_zone_size(),
545238405Sjkim                                      JavaThread::stack_guard_zone_size());
546238405Sjkim
547238405Sjkim  // add in the red and yellow zone sizes
548238405Sjkim  __ addptr(rax, max_bang_size);
549238405Sjkim
550238405Sjkim  // check against the current stack bottom
551238405Sjkim  __ cmpptr(rsp, rax);
552238405Sjkim
553238405Sjkim  __ jcc(Assembler::above, after_frame_check_pop);
554238405Sjkim  NOT_LP64(__ pop(rsi));  // get saved bcp
55595967Speter
55695967Speter  // Restore sender's sp as SP. This is necessary if the sender's
557238405Sjkim  // frame is an extended compiled frame (see gen_c2i_adapter())
55895967Speter  // and safer anyway in case of JSR292 adaptations.
559238405Sjkim
56095967Speter  __ pop(rax); // return address must be moved if SP is changed
561238405Sjkim  __ mov(rsp, rbcp);
562238405Sjkim  __ push(rax);
563127326Smarkm
564238405Sjkim  // Note: the restored frame is not necessarily interpreted.
565238405Sjkim  // Use the shared runtime version of the StackOverflowError.
566238405Sjkim  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
567238405Sjkim  __ jump(ExternalAddress(StubRoutines::throw_StackOverflowError_entry()));
568238405Sjkim  // all done with frame size check
569238405Sjkim  __ bind(after_frame_check_pop);
570238405Sjkim  NOT_LP64(__ pop(rsi));
571238405Sjkim
572238405Sjkim  // all done with frame size check
573238405Sjkim  __ bind(after_frame_check);
574238405Sjkim}
575238405Sjkim
57695967Speter// Allocate monitor and lock method (asm interpreter)
57795967Speter//
578238405Sjkim// Args:
57995967Speter//      rbx: Method*
580238405Sjkim//      r14/rdi: locals
58195967Speter//
582238405Sjkim// Kills:
583238405Sjkim//      rax
584127326Smarkm//      c_rarg0, c_rarg1, c_rarg2, c_rarg3, ...(param regs)
585238405Sjkim//      rscratch1, rscratch2 (scratch regs)
586238405Sjkimvoid TemplateInterpreterGenerator::lock_method() {
587238405Sjkim  // synchronize method
588238405Sjkim  const Address access_flags(rbx, Method::access_flags_offset());
589238405Sjkim  const Address monitor_block_top(
590238405Sjkim        rbp,
591238405Sjkim        frame::interpreter_frame_monitor_block_top_offset * wordSize);
592238405Sjkim  const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
593238405Sjkim
594238405Sjkim#ifdef ASSERT
595238405Sjkim  {
596238405Sjkim    Label L;
59795967Speter    __ movl(rax, access_flags);
59895967Speter    __ testl(rax, JVM_ACC_SYNCHRONIZED);
599238405Sjkim    __ jcc(Assembler::notZero, L);
60095967Speter    __ stop("method doesn't need synchronization");
601238405Sjkim    __ bind(L);
60295967Speter  }
603238405Sjkim#endif // ASSERT
604238405Sjkim
605127326Smarkm  // get synchronization object
606238405Sjkim  {
607238405Sjkim    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
608238405Sjkim    Label done;
609238405Sjkim    __ movl(rax, access_flags);
610238405Sjkim    __ testl(rax, JVM_ACC_STATIC);
611238405Sjkim    // get receiver (assume this is frequent case)
612238405Sjkim    __ movptr(rax, Address(rlocals, Interpreter::local_offset_in_bytes(0)));
613238405Sjkim    __ jcc(Assembler::zero, done);
614238405Sjkim    __ movptr(rax, Address(rbx, Method::const_offset()));
615238405Sjkim    __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
616238405Sjkim    __ movptr(rax, Address(rax,
617238405Sjkim                           ConstantPool::pool_holder_offset_in_bytes()));
61895967Speter    __ movptr(rax, Address(rax, mirror_offset));
61995967Speter
620238405Sjkim#ifdef ASSERT
62195967Speter    {
622238405Sjkim      Label L;
62395967Speter      __ testptr(rax, rax);
624238405Sjkim      __ jcc(Assembler::notZero, L);
625238405Sjkim      __ stop("synchronization object is NULL");
626127326Smarkm      __ bind(L);
627238405Sjkim    }
628238405Sjkim#endif // ASSERT
629238405Sjkim
630238405Sjkim    __ bind(done);
631238405Sjkim  }
632238405Sjkim
633238405Sjkim  // add space for monitor & lock
634238405Sjkim  __ subptr(rsp, entry_size); // add space for a monitor entry
635238405Sjkim  __ movptr(monitor_block_top, rsp);  // set new monitor block top
636238405Sjkim  // store object
637238405Sjkim  __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
638238405Sjkim  const Register lockreg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
63995967Speter  __ movptr(lockreg, rsp); // object address
64095967Speter  __ lock_object(lockreg);
641238405Sjkim}
64295967Speter
643238405Sjkim// Generate a fixed interpreter frame. This is identical setup for
64495967Speter// interpreted methods and for native methods hence the shared code.
645238405Sjkim//
646238405Sjkim// Args:
647127326Smarkm//      rax: return address
648238405Sjkim//      rbx: Method*
649238405Sjkim//      r14/rdi: pointer to locals
650238405Sjkim//      r13/rsi: sender sp
651238405Sjkim//      rdx: cp cache
652238405Sjkimvoid TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
653238405Sjkim  // initialize fixed part of activation frame
654238405Sjkim  __ push(rax);        // save return address
655238405Sjkim  __ enter();          // save old & set new rbp
656238405Sjkim  __ push(rbcp);        // set sender sp
657238405Sjkim  __ push((int)NULL_WORD); // leave last_sp as null
658238405Sjkim  __ movptr(rbcp, Address(rbx, Method::const_offset()));      // get ConstMethod*
659238405Sjkim  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset())); // get codebase
66095967Speter  __ push(rbx);        // save Method*
66195967Speter  if (ProfileInterpreter) {
662238405Sjkim    Label method_data_continue;
66395967Speter    __ movptr(rdx, Address(rbx, in_bytes(Method::method_data_offset())));
664238405Sjkim    __ testptr(rdx, rdx);
66595967Speter    __ jcc(Assembler::zero, method_data_continue);
666238405Sjkim    __ addptr(rdx, in_bytes(MethodData::data_offset()));
667238405Sjkim    __ bind(method_data_continue);
668127326Smarkm    __ push(rdx);      // set the mdp (method data pointer)
669238405Sjkim  } else {
670238405Sjkim    __ push(0);
671238405Sjkim  }
672238405Sjkim
673238405Sjkim  __ movptr(rdx, Address(rbx, Method::const_offset()));
674238405Sjkim  __ movptr(rdx, Address(rdx, ConstMethod::constants_offset()));
675238405Sjkim  __ movptr(rdx, Address(rdx, ConstantPool::cache_offset_in_bytes()));
676238405Sjkim  __ push(rdx); // set constant pool cache
677238405Sjkim  __ push(rlocals); // set locals pointer
678238405Sjkim  if (native_call) {
679238405Sjkim    __ push(0); // no bcp
680238405Sjkim  } else {
68195967Speter    __ push(rbcp); // set bcp
68295967Speter  }
683238405Sjkim  __ push(0); // reserve word for pointer to expression stack bottom
68495967Speter  __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
685238405Sjkim}
68695967Speter
687238405Sjkim// End of helpers
688238405Sjkim
689127326Smarkm// Method entry for java.lang.ref.Reference.get.
690238405Sjkimaddress InterpreterGenerator::generate_Reference_get_entry(void) {
691238405Sjkim#if INCLUDE_ALL_GCS
692238405Sjkim  // Code: _aload_0, _getfield, _areturn
693238405Sjkim  // parameter size = 1
694238405Sjkim  //
695238405Sjkim  // The code that gets generated by this routine is split into 2 parts:
696238405Sjkim  //    1. The "intrinsified" code for G1 (or any SATB based GC),
697238405Sjkim  //    2. The slow path - which is an expansion of the regular method entry.
698238405Sjkim  //
699238405Sjkim  // Notes:-
700238405Sjkim  // * In the G1 code we do not check whether we need to block for
701238405Sjkim  //   a safepoint. If G1 is enabled then we must execute the specialized
70295967Speter  //   code for Reference.get (except when the Reference object is null)
70395967Speter  //   so that we can log the value in the referent field with an SATB
704238405Sjkim  //   update buffer.
70595967Speter  //   If the code for the getfield template is modified so that the
706238405Sjkim  //   G1 pre-barrier code is executed when the current method is
70795967Speter  //   Reference.get() then going through the normal method entry
708238405Sjkim  //   will be fine.
709238405Sjkim  // * The G1 code can, however, check the receiver object (the instance
710127326Smarkm  //   of java.lang.Reference) and jump to the slow path if null. If the
711238405Sjkim  //   Reference object is null then we obviously cannot fetch the referent
712238405Sjkim  //   and so we don't need to call the G1 pre-barrier. Thus we can use the
713238405Sjkim  //   regular method entry code to generate the NPE.
714238405Sjkim  //
715238405Sjkim  // rbx: Method*
716238405Sjkim
717238405Sjkim  // r13: senderSP must preserve for slow path, set SP to it on fast path
718238405Sjkim
719238405Sjkim  address entry = __ pc();
720238405Sjkim
721238405Sjkim  const int referent_offset = java_lang_ref_Reference::referent_offset;
722238405Sjkim  guarantee(referent_offset > 0, "referent offset not initialized");
72395967Speter
72495967Speter  if (UseG1GC) {
725238405Sjkim    Label slow_path;
72695967Speter    // rbx: method
727238405Sjkim
72895967Speter    // Check if local 0 != NULL
729238405Sjkim    // If the receiver is null then it is OK to jump to the slow path.
730238405Sjkim    __ movptr(rax, Address(rsp, wordSize));
73195967Speter
732238405Sjkim    __ testptr(rax, rax);
733238405Sjkim    __ jcc(Assembler::zero, slow_path);
734238405Sjkim
73595967Speter    // rax: local 0
73695967Speter    // rbx: method (but can be used as scratch now)
73795967Speter    // rdx: scratch
73895967Speter    // rdi: scratch
73995967Speter
740238405Sjkim    // Preserve the sender sp in case the pre-barrier
741238405Sjkim    // calls the runtime
742238405Sjkim    NOT_LP64(__ push(rsi));
743238405Sjkim
74495967Speter    // Generate the G1 pre-barrier code to log the value of
745238405Sjkim    // the referent field in an SATB buffer.
74695967Speter
74795967Speter    // Load the value of the referent field.
74895967Speter    const Address field_address(rax, referent_offset);
74995967Speter    __ load_heap_oop(rax, field_address);
75095967Speter
751238405Sjkim    const Register sender_sp = NOT_LP64(rsi) LP64_ONLY(r13);
752127326Smarkm    const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
753238405Sjkim    NOT_LP64(__ get_thread(thread));
754238405Sjkim
755238405Sjkim    // Generate the G1 pre-barrier code to log the value of
75695967Speter    // the referent field in an SATB buffer.
75795967Speter    __ g1_write_barrier_pre(noreg /* obj */,
75895967Speter                            rax /* pre_val */,
75995967Speter                            thread /* thread */,
760238405Sjkim                            rbx /* tmp */,
761238405Sjkim                            true /* tosca_live */,
762238405Sjkim                            true /* expand_call */);
763127326Smarkm
764238405Sjkim    // _areturn
765127326Smarkm    NOT_LP64(__ pop(rsi));      // get sender sp
766238405Sjkim    __ pop(rdi);                // get return address
76795967Speter    __ mov(rsp, sender_sp);     // set sp to sender sp
76895967Speter    __ jmp(rdi);
769238405Sjkim    __ ret(0);
77095967Speter
771238405Sjkim    // generate a vanilla interpreter entry as the slow path
772238405Sjkim    __ bind(slow_path);
773238405Sjkim    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals));
77495967Speter    return entry;
77595967Speter  }
776238405Sjkim#endif // INCLUDE_ALL_GCS
777238405Sjkim
778238405Sjkim  // If G1 is not enabled then attempt to go through the accessor entry point
779238405Sjkim  // Reference.get is an accessor
780238405Sjkim  return NULL;
781238405Sjkim}
782238405Sjkim
783238405Sjkim// Interpreter stub for calling a native method. (asm interpreter)
784238405Sjkim// This sets up a somewhat different looking stack for calling the
785238405Sjkim// native method than the typical interpreter frame setup.
786238405Sjkimaddress InterpreterGenerator::generate_native_entry(bool synchronized) {
787238405Sjkim  // determine code generation flags
788238405Sjkim  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
789238405Sjkim
790238405Sjkim  // rbx: Method*
791238405Sjkim  // rbcp: sender sp
792238405Sjkim
793238405Sjkim  address entry_point = __ pc();
79495967Speter
79595967Speter  const Address constMethod       (rbx, Method::const_offset());
796238405Sjkim  const Address access_flags      (rbx, Method::access_flags_offset());
797238405Sjkim  const Address size_of_parameters(rcx, ConstMethod::
79895967Speter                                        size_of_parameters_offset());
799127326Smarkm
800127326Smarkm
801127326Smarkm  // get parameter size (always needed)
802127326Smarkm  __ movptr(rcx, constMethod);
803238405Sjkim  __ load_unsigned_short(rcx, size_of_parameters);
804238405Sjkim
805238405Sjkim  // native calls don't need the stack size check since they have no
806238405Sjkim  // expression stack and the arguments are already on the stack and
80795967Speter  // we only add a handful of words to the stack
808127326Smarkm
809238405Sjkim  // rbx: Method*
810238405Sjkim  // rcx: size of parameters
811127326Smarkm  // rbcp: sender sp
812238405Sjkim  __ pop(rax);                                       // get return address
813127326Smarkm
814238405Sjkim  // for natives the size of locals is zero
815127326Smarkm
816238405Sjkim  // compute beginning of parameters
817127326Smarkm  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
818127326Smarkm
819238405Sjkim  // add 2 zero-initialized slots for native calls
820238405Sjkim  // initialize result_handler slot
821127326Smarkm  __ push((int) NULL_WORD);
822238405Sjkim  // slot for oop temp
823127326Smarkm  // (static native method holder mirror/jni oop result)
824238405Sjkim  __ push((int) NULL_WORD);
825127326Smarkm
826238405Sjkim  // initialize fixed part of activation frame
827238405Sjkim  generate_fixed_frame(true);
828238405Sjkim
829238405Sjkim  // make sure method is native & not abstract
830238405Sjkim#ifdef ASSERT
831238405Sjkim  __ movl(rax, access_flags);
832238405Sjkim  {
833238405Sjkim    Label L;
834238405Sjkim    __ testl(rax, JVM_ACC_NATIVE);
835238405Sjkim    __ jcc(Assembler::notZero, L);
836238405Sjkim    __ stop("tried to execute non-native method as native");
837238405Sjkim    __ bind(L);
838238405Sjkim  }
83995967Speter  {
84095967Speter    Label L;
841238405Sjkim    __ testl(rax, JVM_ACC_ABSTRACT);
842238405Sjkim    __ jcc(Assembler::zero, L);
843238405Sjkim    __ stop("tried to execute abstract method in interpreter");
844127326Smarkm    __ bind(L);
845127326Smarkm  }
846238405Sjkim#endif
847238405Sjkim
848238405Sjkim  // Since at this point in the method invocation the exception handler
849238405Sjkim  // would try to exit the monitor of synchronized methods which hasn't
850238405Sjkim  // been entered yet, we set the thread local variable
851238405Sjkim  // _do_not_unlock_if_synchronized to true. The remove_activation will
852238405Sjkim  // check this flag.
853238405Sjkim
854238405Sjkim  const Register thread1 = NOT_LP64(rax) LP64_ONLY(r15_thread);
855238405Sjkim  NOT_LP64(__ get_thread(thread1));
856238405Sjkim  const Address do_not_unlock_if_synchronized(thread1,
857238405Sjkim        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
858238405Sjkim  __ movbool(do_not_unlock_if_synchronized, true);
859238405Sjkim
860238405Sjkim  // increment invocation count & check for overflow
861238405Sjkim  Label invocation_counter_overflow;
862238405Sjkim  if (inc_counter) {
863238405Sjkim    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
864238405Sjkim  }
865238405Sjkim
866238405Sjkim  Label continue_after_compile;
867238405Sjkim  __ bind(continue_after_compile);
868238405Sjkim
869238405Sjkim  bang_stack_shadow_pages(true);
870127326Smarkm
871127326Smarkm  // reset the _do_not_unlock_if_synchronized flag
872238405Sjkim  NOT_LP64(__ get_thread(thread1));
873238405Sjkim  __ movbool(do_not_unlock_if_synchronized, false);
87495967Speter
875238405Sjkim  // check for synchronized methods
876238405Sjkim  // Must happen AFTER invocation_counter check and stack overflow check,
877238405Sjkim  // so method is not locked if overflows.
878238405Sjkim  if (synchronized) {
879238405Sjkim    lock_method();
880238405Sjkim  } else {
881238405Sjkim    // no synchronization necessary
882238405Sjkim#ifdef ASSERT
883238405Sjkim    {
884238405Sjkim      Label L;
885238405Sjkim      __ movl(rax, access_flags);
886238405Sjkim      __ testl(rax, JVM_ACC_SYNCHRONIZED);
887238405Sjkim      __ jcc(Assembler::zero, L);
888238405Sjkim      __ stop("method needs synchronization");
889238405Sjkim      __ bind(L);
890238405Sjkim    }
891238405Sjkim#endif
892127326Smarkm  }
893238405Sjkim
894238405Sjkim  // start execution
895238405Sjkim#ifdef ASSERT
896127326Smarkm  {
897238405Sjkim    Label L;
898127326Smarkm    const Address monitor_block_top(rbp,
899238405Sjkim                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
900127326Smarkm    __ movptr(rax, monitor_block_top);
901238405Sjkim    __ cmpptr(rax, rsp);
902127326Smarkm    __ jcc(Assembler::equal, L);
903127326Smarkm    __ stop("broken stack frame setup in interpreter");
904238405Sjkim    __ bind(L);
905238405Sjkim  }
906238405Sjkim#endif
907127326Smarkm
908238405Sjkim  // jvmti support
909127326Smarkm  __ notify_method_entry();
910238405Sjkim
911127326Smarkm  // work registers
91295967Speter  const Register method = rbx;
91395967Speter  const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
914238405Sjkim  const Register t      = NOT_LP64(rcx) LP64_ONLY(r11);
915238405Sjkim
916238405Sjkim  // allocate space for parameters
917238405Sjkim  __ get_method(method);
91895967Speter  __ movptr(t, Address(method, Method::const_offset()));
91995967Speter  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
92095967Speter
92195967Speter#ifndef _LP64
92295967Speter  __ shlptr(t, Interpreter::logStackElementSize);
923238405Sjkim  __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
924127326Smarkm  __ subptr(rsp, t);
925238405Sjkim  __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
926238405Sjkim#else
927238405Sjkim  __ shll(t, Interpreter::logStackElementSize);
928238405Sjkim
929238405Sjkim  __ subptr(rsp, t);
930238405Sjkim  __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
931238405Sjkim  __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI)
932238405Sjkim#endif // _LP64
933238405Sjkim
934238405Sjkim  // get signature handler
935  {
936    Label L;
937    __ movptr(t, Address(method, Method::signature_handler_offset()));
938    __ testptr(t, t);
939    __ jcc(Assembler::notZero, L);
940    __ call_VM(noreg,
941               CAST_FROM_FN_PTR(address,
942                                InterpreterRuntime::prepare_native_call),
943               method);
944    __ get_method(method);
945    __ movptr(t, Address(method, Method::signature_handler_offset()));
946    __ bind(L);
947  }
948
949  // call signature handler
950  assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rlocals,
951         "adjust this code");
952  assert(InterpreterRuntime::SignatureHandlerGenerator::to() == rsp,
953         "adjust this code");
954  assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == NOT_LP64(t) LP64_ONLY(rscratch1),
955         "adjust this code");
956
957  // The generated handlers do not touch RBX (the method oop).
958  // However, large signatures cannot be cached and are generated
959  // each time here.  The slow-path generator can do a GC on return,
960  // so we must reload it after the call.
961  __ call(t);
962  __ get_method(method);        // slow path can do a GC, reload RBX
963
964
965  // result handler is in rax
966  // set result handler
967  __ movptr(Address(rbp,
968                    (frame::interpreter_frame_result_handler_offset) * wordSize),
969            rax);
970
971  // pass mirror handle if static call
972  {
973    Label L;
974    const int mirror_offset = in_bytes(Klass::java_mirror_offset());
975    __ movl(t, Address(method, Method::access_flags_offset()));
976    __ testl(t, JVM_ACC_STATIC);
977    __ jcc(Assembler::zero, L);
978    // get mirror
979    __ movptr(t, Address(method, Method::const_offset()));
980    __ movptr(t, Address(t, ConstMethod::constants_offset()));
981    __ movptr(t, Address(t, ConstantPool::pool_holder_offset_in_bytes()));
982    __ movptr(t, Address(t, mirror_offset));
983    // copy mirror into activation frame
984    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
985            t);
986    // pass handle to mirror
987#ifndef _LP64
988    __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
989    __ movptr(Address(rsp, wordSize), t);
990#else
991    __ lea(c_rarg1,
992           Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
993#endif // _LP64
994    __ bind(L);
995  }
996
997  // get native function entry point
998  {
999    Label L;
1000    __ movptr(rax, Address(method, Method::native_function_offset()));
1001    ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
1002    __ cmpptr(rax, unsatisfied.addr());
1003    __ jcc(Assembler::notEqual, L);
1004    __ call_VM(noreg,
1005               CAST_FROM_FN_PTR(address,
1006                                InterpreterRuntime::prepare_native_call),
1007               method);
1008    __ get_method(method);
1009    __ movptr(rax, Address(method, Method::native_function_offset()));
1010    __ bind(L);
1011  }
1012
1013  // pass JNIEnv
1014#ifndef _LP64
1015   __ get_thread(thread);
1016   __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
1017   __ movptr(Address(rsp, 0), t);
1018
1019   // set_last_Java_frame_before_call
1020   // It is enough that the pc()
1021   // points into the right code segment. It does not have to be the correct return pc.
1022   __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1023#else
1024   __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
1025
1026   // It is enough that the pc() points into the right code
1027   // segment. It does not have to be the correct return pc.
1028   __ set_last_Java_frame(rsp, rbp, (address) __ pc());
1029#endif // _LP64
1030
1031  // change thread state
1032#ifdef ASSERT
1033  {
1034    Label L;
1035    __ movl(t, Address(thread, JavaThread::thread_state_offset()));
1036    __ cmpl(t, _thread_in_Java);
1037    __ jcc(Assembler::equal, L);
1038    __ stop("Wrong thread state in native stub");
1039    __ bind(L);
1040  }
1041#endif
1042
1043  // Change state to native
1044
1045  __ movl(Address(thread, JavaThread::thread_state_offset()),
1046          _thread_in_native);
1047
1048  // Call the native method.
1049  __ call(rax);
1050  // 32: result potentially in rdx:rax or ST0
1051  // 64: result potentially in rax or xmm0
1052
1053  // Verify or restore cpu control state after JNI call
1054  __ restore_cpu_control_state_after_jni();
1055
1056  // NOTE: The order of these pushes is known to frame::interpreter_frame_result
1057  // in order to extract the result of a method call. If the order of these
1058  // pushes change or anything else is added to the stack then the code in
1059  // interpreter_frame_result must also change.
1060
1061#ifndef _LP64
1062  // save potential result in ST(0) & rdx:rax
1063  // (if result handler is the T_FLOAT or T_DOUBLE handler, result must be in ST0 -
1064  // the check is necessary to avoid potential Intel FPU overflow problems by saving/restoring 'empty' FPU registers)
1065  // It is safe to do this push because state is _thread_in_native and return address will be found
1066  // via _last_native_pc and not via _last_jave_sp
1067
1068  // NOTE: the order of theses push(es) is known to frame::interpreter_frame_result.
1069  // If the order changes or anything else is added to the stack the code in
1070  // interpreter_frame_result will have to be changed.
1071
1072  { Label L;
1073    Label push_double;
1074    ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
1075    ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
1076    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1077              float_handler.addr());
1078    __ jcc(Assembler::equal, push_double);
1079    __ cmpptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset + 1)*wordSize),
1080              double_handler.addr());
1081    __ jcc(Assembler::notEqual, L);
1082    __ bind(push_double);
1083    __ push_d(); // FP values are returned using the FPU, so push FPU contents (even if UseSSE > 0).
1084    __ bind(L);
1085  }
1086#else
1087  __ push(dtos);
1088#endif // _LP64
1089
1090  __ push(ltos);
1091
1092  // change thread state
1093  NOT_LP64(__ get_thread(thread));
1094  __ movl(Address(thread, JavaThread::thread_state_offset()),
1095          _thread_in_native_trans);
1096
1097  if (os::is_MP()) {
1098    if (UseMembar) {
1099      // Force this write out before the read below
1100      __ membar(Assembler::Membar_mask_bits(
1101           Assembler::LoadLoad | Assembler::LoadStore |
1102           Assembler::StoreLoad | Assembler::StoreStore));
1103    } else {
1104      // Write serialization page so VM thread can do a pseudo remote membar.
1105      // We use the current thread pointer to calculate a thread specific
1106      // offset to write to within the page. This minimizes bus traffic
1107      // due to cache line collision.
1108      __ serialize_memory(thread, rcx);
1109    }
1110  }
1111
1112#ifndef _LP64
1113  if (AlwaysRestoreFPU) {
1114    //  Make sure the control word is correct.
1115    __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
1116  }
1117#endif // _LP64
1118
1119  // check for safepoint operation in progress and/or pending suspend requests
1120  {
1121    Label Continue;
1122    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
1123             SafepointSynchronize::_not_synchronized);
1124
1125    Label L;
1126    __ jcc(Assembler::notEqual, L);
1127    __ cmpl(Address(thread, JavaThread::suspend_flags_offset()), 0);
1128    __ jcc(Assembler::equal, Continue);
1129    __ bind(L);
1130
1131    // Don't use call_VM as it will see a possible pending exception
1132    // and forward it and never return here preventing us from
1133    // clearing _last_native_pc down below.  Also can't use
1134    // call_VM_leaf either as it will check to see if r13 & r14 are
1135    // preserved and correspond to the bcp/locals pointers. So we do a
1136    // runtime call by hand.
1137    //
1138#ifndef _LP64
1139    __ push(thread);
1140    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
1141                                            JavaThread::check_special_condition_for_native_trans)));
1142    __ increment(rsp, wordSize);
1143    __ get_thread(thread);
1144#else
1145    __ mov(c_rarg0, r15_thread);
1146    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1147    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1148    __ andptr(rsp, -16); // align stack as required by ABI
1149    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
1150    __ mov(rsp, r12); // restore sp
1151    __ reinit_heapbase();
1152#endif // _LP64
1153    __ bind(Continue);
1154  }
1155
1156  // change thread state
1157  __ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_Java);
1158
1159  // reset_last_Java_frame
1160  __ reset_last_Java_frame(thread, true, true);
1161
1162  // reset handle block
1163  __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
1164  __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
1165
1166  // If result is an oop unbox and store it in frame where gc will see it
1167  // and result handler will pick it up
1168
1169  {
1170    Label no_oop, store_result;
1171    __ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
1172    __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
1173    __ jcc(Assembler::notEqual, no_oop);
1174    // retrieve result
1175    __ pop(ltos);
1176    __ testptr(rax, rax);
1177    __ jcc(Assembler::zero, store_result);
1178    __ movptr(rax, Address(rax, 0));
1179    __ bind(store_result);
1180    __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
1181    // keep stack depth as expected by pushing oop which will eventually be discarded
1182    __ push(ltos);
1183    __ bind(no_oop);
1184  }
1185
1186
1187  {
1188    Label no_reguard;
1189    __ cmpl(Address(thread, JavaThread::stack_guard_state_offset()),
1190            JavaThread::stack_guard_yellow_reserved_disabled);
1191    __ jcc(Assembler::notEqual, no_reguard);
1192
1193    __ pusha(); // XXX only save smashed registers
1194#ifndef _LP64
1195    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1196    __ popa();
1197#else
1198    __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1199    __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
1200    __ andptr(rsp, -16); // align stack as required by ABI
1201    __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
1202    __ mov(rsp, r12); // restore sp
1203    __ popa(); // XXX only restore smashed registers
1204    __ reinit_heapbase();
1205#endif // _LP64
1206
1207    __ bind(no_reguard);
1208  }
1209
1210
1211  // The method register is junk from after the thread_in_native transition
1212  // until here.  Also can't call_VM until the bcp has been
1213  // restored.  Need bcp for throwing exception below so get it now.
1214  __ get_method(method);
1215
1216  // restore to have legal interpreter frame, i.e., bci == 0 <=> code_base()
1217  __ movptr(rbcp, Address(method, Method::const_offset()));   // get ConstMethod*
1218  __ lea(rbcp, Address(rbcp, ConstMethod::codes_offset()));    // get codebase
1219
1220  // handle exceptions (exception handling will handle unlocking!)
1221  {
1222    Label L;
1223    __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
1224    __ jcc(Assembler::zero, L);
1225    // Note: At some point we may want to unify this with the code
1226    // used in call_VM_base(); i.e., we should use the
1227    // StubRoutines::forward_exception code. For now this doesn't work
1228    // here because the rsp is not correctly set at this point.
1229    __ MacroAssembler::call_VM(noreg,
1230                               CAST_FROM_FN_PTR(address,
1231                               InterpreterRuntime::throw_pending_exception));
1232    __ should_not_reach_here();
1233    __ bind(L);
1234  }
1235
1236  // do unlocking if necessary
1237  {
1238    Label L;
1239    __ movl(t, Address(method, Method::access_flags_offset()));
1240    __ testl(t, JVM_ACC_SYNCHRONIZED);
1241    __ jcc(Assembler::zero, L);
1242    // the code below should be shared with interpreter macro
1243    // assembler implementation
1244    {
1245      Label unlock;
1246      // BasicObjectLock will be first in list, since this is a
1247      // synchronized method. However, need to check that the object
1248      // has not been unlocked by an explicit monitorexit bytecode.
1249      const Address monitor(rbp,
1250                            (intptr_t)(frame::interpreter_frame_initial_sp_offset *
1251                                       wordSize - (int)sizeof(BasicObjectLock)));
1252
1253      const Register regmon = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1254
1255      // monitor expect in c_rarg1 for slow unlock path
1256      __ lea(regmon, monitor); // address of first monitor
1257
1258      __ movptr(t, Address(regmon, BasicObjectLock::obj_offset_in_bytes()));
1259      __ testptr(t, t);
1260      __ jcc(Assembler::notZero, unlock);
1261
1262      // Entry already unlocked, need to throw exception
1263      __ MacroAssembler::call_VM(noreg,
1264                                 CAST_FROM_FN_PTR(address,
1265                   InterpreterRuntime::throw_illegal_monitor_state_exception));
1266      __ should_not_reach_here();
1267
1268      __ bind(unlock);
1269      __ unlock_object(regmon);
1270    }
1271    __ bind(L);
1272  }
1273
1274  // jvmti support
1275  // Note: This must happen _after_ handling/throwing any exceptions since
1276  //       the exception handler code notifies the runtime of method exits
1277  //       too. If this happens before, method entry/exit notifications are
1278  //       not properly paired (was bug - gri 11/22/99).
1279  __ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
1280
1281  // restore potential result in edx:eax, call result handler to
1282  // restore potential result in ST0 & handle result
1283
1284  __ pop(ltos);
1285  LP64_ONLY( __ pop(dtos));
1286
1287  __ movptr(t, Address(rbp,
1288                       (frame::interpreter_frame_result_handler_offset) * wordSize));
1289  __ call(t);
1290
1291  // remove activation
1292  __ movptr(t, Address(rbp,
1293                       frame::interpreter_frame_sender_sp_offset *
1294                       wordSize)); // get sender sp
1295  __ leave();                                // remove frame anchor
1296  __ pop(rdi);                               // get return address
1297  __ mov(rsp, t);                            // set sp to sender sp
1298  __ jmp(rdi);
1299
1300  if (inc_counter) {
1301    // Handle overflow of counter and compile method
1302    __ bind(invocation_counter_overflow);
1303    generate_counter_overflow(&continue_after_compile);
1304  }
1305
1306  return entry_point;
1307}
1308
1309//
1310// Generic interpreted method entry to (asm) interpreter
1311//
1312address InterpreterGenerator::generate_normal_entry(bool synchronized) {
1313  // determine code generation flags
1314  bool inc_counter  = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1315
1316  // ebx: Method*
1317  // rbcp: sender sp
1318  address entry_point = __ pc();
1319
1320  const Address constMethod(rbx, Method::const_offset());
1321  const Address access_flags(rbx, Method::access_flags_offset());
1322  const Address size_of_parameters(rdx,
1323                                   ConstMethod::size_of_parameters_offset());
1324  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
1325
1326
1327  // get parameter size (always needed)
1328  __ movptr(rdx, constMethod);
1329  __ load_unsigned_short(rcx, size_of_parameters);
1330
1331  // rbx: Method*
1332  // rcx: size of parameters
1333  // rbcp: sender_sp (could differ from sp+wordSize if we were called via c2i )
1334
1335  __ load_unsigned_short(rdx, size_of_locals); // get size of locals in words
1336  __ subl(rdx, rcx); // rdx = no. of additional locals
1337
1338  // YYY
1339//   __ incrementl(rdx);
1340//   __ andl(rdx, -2);
1341
1342  // see if we've got enough room on the stack for locals plus overhead.
1343  generate_stack_overflow_check();
1344
1345  // get return address
1346  __ pop(rax);
1347
1348  // compute beginning of parameters
1349  __ lea(rlocals, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
1350
1351  // rdx - # of additional locals
1352  // allocate space for locals
1353  // explicitly initialize locals
1354  {
1355    Label exit, loop;
1356    __ testl(rdx, rdx);
1357    __ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
1358    __ bind(loop);
1359    __ push((int) NULL_WORD); // initialize local variables
1360    __ decrementl(rdx); // until everything initialized
1361    __ jcc(Assembler::greater, loop);
1362    __ bind(exit);
1363  }
1364
1365  // initialize fixed part of activation frame
1366  generate_fixed_frame(false);
1367
1368  // make sure method is not native & not abstract
1369#ifdef ASSERT
1370  __ movl(rax, access_flags);
1371  {
1372    Label L;
1373    __ testl(rax, JVM_ACC_NATIVE);
1374    __ jcc(Assembler::zero, L);
1375    __ stop("tried to execute native method as non-native");
1376    __ bind(L);
1377  }
1378  {
1379    Label L;
1380    __ testl(rax, JVM_ACC_ABSTRACT);
1381    __ jcc(Assembler::zero, L);
1382    __ stop("tried to execute abstract method in interpreter");
1383    __ bind(L);
1384  }
1385#endif
1386
1387  // Since at this point in the method invocation the exception
1388  // handler would try to exit the monitor of synchronized methods
1389  // which hasn't been entered yet, we set the thread local variable
1390  // _do_not_unlock_if_synchronized to true. The remove_activation
1391  // will check this flag.
1392
1393  const Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
1394  NOT_LP64(__ get_thread(thread));
1395  const Address do_not_unlock_if_synchronized(thread,
1396        in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
1397  __ movbool(do_not_unlock_if_synchronized, true);
1398
1399  __ profile_parameters_type(rax, rcx, rdx);
1400  // increment invocation count & check for overflow
1401  Label invocation_counter_overflow;
1402  Label profile_method;
1403  Label profile_method_continue;
1404  if (inc_counter) {
1405    generate_counter_incr(&invocation_counter_overflow,
1406                          &profile_method,
1407                          &profile_method_continue);
1408    if (ProfileInterpreter) {
1409      __ bind(profile_method_continue);
1410    }
1411  }
1412
1413  Label continue_after_compile;
1414  __ bind(continue_after_compile);
1415
1416  // check for synchronized interpreted methods
1417  bang_stack_shadow_pages(false);
1418
1419  // reset the _do_not_unlock_if_synchronized flag
1420  NOT_LP64(__ get_thread(thread));
1421  __ movbool(do_not_unlock_if_synchronized, false);
1422
1423  // check for synchronized methods
1424  // Must happen AFTER invocation_counter check and stack overflow check,
1425  // so method is not locked if overflows.
1426  if (synchronized) {
1427    // Allocate monitor and lock method
1428    lock_method();
1429  } else {
1430    // no synchronization necessary
1431#ifdef ASSERT
1432    {
1433      Label L;
1434      __ movl(rax, access_flags);
1435      __ testl(rax, JVM_ACC_SYNCHRONIZED);
1436      __ jcc(Assembler::zero, L);
1437      __ stop("method needs synchronization");
1438      __ bind(L);
1439    }
1440#endif
1441  }
1442
1443  // start execution
1444#ifdef ASSERT
1445  {
1446    Label L;
1447     const Address monitor_block_top (rbp,
1448                 frame::interpreter_frame_monitor_block_top_offset * wordSize);
1449    __ movptr(rax, monitor_block_top);
1450    __ cmpptr(rax, rsp);
1451    __ jcc(Assembler::equal, L);
1452    __ stop("broken stack frame setup in interpreter");
1453    __ bind(L);
1454  }
1455#endif
1456
1457  // jvmti support
1458  __ notify_method_entry();
1459
1460  __ dispatch_next(vtos);
1461
1462  // invocation counter overflow
1463  if (inc_counter) {
1464    if (ProfileInterpreter) {
1465      // We have decided to profile this method in the interpreter
1466      __ bind(profile_method);
1467      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1468      __ set_method_data_pointer_for_bcp();
1469      __ get_method(rbx);
1470      __ jmp(profile_method_continue);
1471    }
1472    // Handle overflow of counter and compile method
1473    __ bind(invocation_counter_overflow);
1474    generate_counter_overflow(&continue_after_compile);
1475  }
1476
1477  return entry_point;
1478}
1479
1480//-----------------------------------------------------------------------------
1481// Exceptions
1482
1483void TemplateInterpreterGenerator::generate_throw_exception() {
1484  // Entry point in previous activation (i.e., if the caller was
1485  // interpreted)
1486  Interpreter::_rethrow_exception_entry = __ pc();
1487  // Restore sp to interpreter_frame_last_sp even though we are going
1488  // to empty the expression stack for the exception processing.
1489  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1490  // rax: exception
1491  // rdx: return address/pc that threw exception
1492  __ restore_bcp();    // r13/rsi points to call/send
1493  __ restore_locals();
1494  LP64_ONLY(__ reinit_heapbase());  // restore r12 as heapbase.
1495  // Entry point for exceptions thrown within interpreter code
1496  Interpreter::_throw_exception_entry = __ pc();
1497  // expression stack is undefined here
1498  // rax: exception
1499  // r13/rsi: exception bcp
1500  __ verify_oop(rax);
1501  Register rarg = NOT_LP64(rax) LP64_ONLY(c_rarg1);
1502  LP64_ONLY(__ mov(c_rarg1, rax));
1503
1504  // expression stack must be empty before entering the VM in case of
1505  // an exception
1506  __ empty_expression_stack();
1507  // find exception handler address and preserve exception oop
1508  __ call_VM(rdx,
1509             CAST_FROM_FN_PTR(address,
1510                          InterpreterRuntime::exception_handler_for_exception),
1511             rarg);
1512  // rax: exception handler entry point
1513  // rdx: preserved exception oop
1514  // r13/rsi: bcp for exception handler
1515  __ push_ptr(rdx); // push exception which is now the only value on the stack
1516  __ jmp(rax); // jump to exception handler (may be _remove_activation_entry!)
1517
1518  // If the exception is not handled in the current frame the frame is
1519  // removed and the exception is rethrown (i.e. exception
1520  // continuation is _rethrow_exception).
1521  //
1522  // Note: At this point the bci is still the bxi for the instruction
1523  // which caused the exception and the expression stack is
1524  // empty. Thus, for any VM calls at this point, GC will find a legal
1525  // oop map (with empty expression stack).
1526
1527  // In current activation
1528  // tos: exception
1529  // esi: exception bcp
1530
1531  //
1532  // JVMTI PopFrame support
1533  //
1534
1535  Interpreter::_remove_activation_preserving_args_entry = __ pc();
1536  __ empty_expression_stack();
1537  // Set the popframe_processing bit in pending_popframe_condition
1538  // indicating that we are currently handling popframe, so that
1539  // call_VMs that may happen later do not trigger new popframe
1540  // handling cycles.
1541  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1542  NOT_LP64(__ get_thread(thread));
1543  __ movl(rdx, Address(thread, JavaThread::popframe_condition_offset()));
1544  __ orl(rdx, JavaThread::popframe_processing_bit);
1545  __ movl(Address(thread, JavaThread::popframe_condition_offset()), rdx);
1546
1547  {
1548    // Check to see whether we are returning to a deoptimized frame.
1549    // (The PopFrame call ensures that the caller of the popped frame is
1550    // either interpreted or compiled and deoptimizes it if compiled.)
1551    // In this case, we can't call dispatch_next() after the frame is
1552    // popped, but instead must save the incoming arguments and restore
1553    // them after deoptimization has occurred.
1554    //
1555    // Note that we don't compare the return PC against the
1556    // deoptimization blob's unpack entry because of the presence of
1557    // adapter frames in C2.
1558    Label caller_not_deoptimized;
1559    Register rarg = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
1560    __ movptr(rarg, Address(rbp, frame::return_addr_offset * wordSize));
1561    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1562                               InterpreterRuntime::interpreter_contains), rarg);
1563    __ testl(rax, rax);
1564    __ jcc(Assembler::notZero, caller_not_deoptimized);
1565
1566    // Compute size of arguments for saving when returning to
1567    // deoptimized caller
1568    __ get_method(rax);
1569    __ movptr(rax, Address(rax, Method::const_offset()));
1570    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
1571                                                size_of_parameters_offset())));
1572    __ shll(rax, Interpreter::logStackElementSize);
1573    __ restore_locals();
1574    __ subptr(rlocals, rax);
1575    __ addptr(rlocals, wordSize);
1576    // Save these arguments
1577    NOT_LP64(__ get_thread(thread));
1578    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1579                                           Deoptimization::
1580                                           popframe_preserve_args),
1581                          thread, rax, rlocals);
1582
1583    __ remove_activation(vtos, rdx,
1584                         /* throw_monitor_exception */ false,
1585                         /* install_monitor_exception */ false,
1586                         /* notify_jvmdi */ false);
1587
1588    // Inform deoptimization that it is responsible for restoring
1589    // these arguments
1590    NOT_LP64(__ get_thread(thread));
1591    __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1592            JavaThread::popframe_force_deopt_reexecution_bit);
1593
1594    // Continue in deoptimization handler
1595    __ jmp(rdx);
1596
1597    __ bind(caller_not_deoptimized);
1598  }
1599
1600  __ remove_activation(vtos, rdx, /* rdx result (retaddr) is not used */
1601                       /* throw_monitor_exception */ false,
1602                       /* install_monitor_exception */ false,
1603                       /* notify_jvmdi */ false);
1604
1605  // Finish with popframe handling
1606  // A previous I2C followed by a deoptimization might have moved the
1607  // outgoing arguments further up the stack. PopFrame expects the
1608  // mutations to those outgoing arguments to be preserved and other
1609  // constraints basically require this frame to look exactly as
1610  // though it had previously invoked an interpreted activation with
1611  // no space between the top of the expression stack (current
1612  // last_sp) and the top of stack. Rather than force deopt to
1613  // maintain this kind of invariant all the time we call a small
1614  // fixup routine to move the mutated arguments onto the top of our
1615  // expression stack if necessary.
1616#ifndef _LP64
1617  __ mov(rax, rsp);
1618  __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1619  __ get_thread(thread);
1620  // PC must point into interpreter here
1621  __ set_last_Java_frame(thread, noreg, rbp, __ pc());
1622  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), thread, rax, rbx);
1623  __ get_thread(thread);
1624#else
1625  __ mov(c_rarg1, rsp);
1626  __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1627  // PC must point into interpreter here
1628  __ set_last_Java_frame(noreg, rbp, __ pc());
1629  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
1630#endif
1631  __ reset_last_Java_frame(thread, true, true);
1632
1633  // Restore the last_sp and null it out
1634  __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
1635  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
1636
1637  __ restore_bcp();
1638  __ restore_locals();
1639  // The method data pointer was incremented already during
1640  // call profiling. We have to restore the mdp for the current bcp.
1641  if (ProfileInterpreter) {
1642    __ set_method_data_pointer_for_bcp();
1643  }
1644
1645  // Clear the popframe condition flag
1646  NOT_LP64(__ get_thread(thread));
1647  __ movl(Address(thread, JavaThread::popframe_condition_offset()),
1648          JavaThread::popframe_inactive);
1649
1650#if INCLUDE_JVMTI
1651  {
1652    Label L_done;
1653    const Register local0 = rlocals;
1654
1655    __ cmpb(Address(rbcp, 0), Bytecodes::_invokestatic);
1656    __ jcc(Assembler::notEqual, L_done);
1657
1658    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
1659    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
1660
1661    __ get_method(rdx);
1662    __ movptr(rax, Address(local0, 0));
1663    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rbcp);
1664
1665    __ testptr(rax, rax);
1666    __ jcc(Assembler::zero, L_done);
1667
1668    __ movptr(Address(rbx, 0), rax);
1669    __ bind(L_done);
1670  }
1671#endif // INCLUDE_JVMTI
1672
1673  __ dispatch_next(vtos);
1674  // end of PopFrame support
1675
1676  Interpreter::_remove_activation_entry = __ pc();
1677
1678  // preserve exception over this code sequence
1679  __ pop_ptr(rax);
1680  NOT_LP64(__ get_thread(thread));
1681  __ movptr(Address(thread, JavaThread::vm_result_offset()), rax);
1682  // remove the activation (without doing throws on illegalMonitorExceptions)
1683  __ remove_activation(vtos, rdx, false, true, false);
1684  // restore exception
1685  NOT_LP64(__ get_thread(thread));
1686  __ get_vm_result(rax, thread);
1687
1688  // In between activations - previous activation type unknown yet
1689  // compute continuation point - the continuation point expects the
1690  // following registers set up:
1691  //
1692  // rax: exception
1693  // rdx: return address/pc that threw exception
1694  // rsp: expression stack of caller
1695  // rbp: ebp of caller
1696  __ push(rax);                                  // save exception
1697  __ push(rdx);                                  // save return address
1698  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
1699                          SharedRuntime::exception_handler_for_return_address),
1700                        thread, rdx);
1701  __ mov(rbx, rax);                              // save exception handler
1702  __ pop(rdx);                                   // restore return address
1703  __ pop(rax);                                   // restore exception
1704  // Note that an "issuing PC" is actually the next PC after the call
1705  __ jmp(rbx);                                   // jump to exception
1706                                                 // handler of caller
1707}
1708
1709
1710//
1711// JVMTI ForceEarlyReturn support
1712//
1713address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state) {
1714  address entry = __ pc();
1715
1716  __ restore_bcp();
1717  __ restore_locals();
1718  __ empty_expression_stack();
1719  __ load_earlyret_value(state);  // 32 bits returns value in rdx, so don't reuse
1720
1721  const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
1722  NOT_LP64(__ get_thread(thread));
1723  __ movptr(rcx, Address(thread, JavaThread::jvmti_thread_state_offset()));
1724  Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
1725
1726  // Clear the earlyret state
1727  __ movl(cond_addr, JvmtiThreadState::earlyret_inactive);
1728
1729  __ remove_activation(state, rsi,
1730                       false, /* throw_monitor_exception */
1731                       false, /* install_monitor_exception */
1732                       true); /* notify_jvmdi */
1733  __ jmp(rsi);
1734
1735  return entry;
1736} // end of ForceEarlyReturn support
1737
1738
1739//-----------------------------------------------------------------------------
1740// Helper for vtos entry point generation
1741
1742void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
1743                                                         address& bep,
1744                                                         address& cep,
1745                                                         address& sep,
1746                                                         address& aep,
1747                                                         address& iep,
1748                                                         address& lep,
1749                                                         address& fep,
1750                                                         address& dep,
1751                                                         address& vep) {
1752  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
1753  Label L;
1754  aep = __ pc();  __ push_ptr();   __ jmp(L);
1755#ifndef _LP64
1756  fep = __ pc(); __ push(ftos); __ jmp(L);
1757  dep = __ pc(); __ push(dtos); __ jmp(L);
1758#else
1759  fep = __ pc();  __ push_f(xmm0); __ jmp(L);
1760  dep = __ pc();  __ push_d(xmm0); __ jmp(L);
1761#endif // _LP64
1762  lep = __ pc();  __ push_l();     __ jmp(L);
1763  bep = cep = sep =
1764  iep = __ pc();  __ push_i();
1765  vep = __ pc();
1766  __ bind(L);
1767  generate_and_dispatch(t);
1768}
1769
1770
1771//-----------------------------------------------------------------------------
1772// Generation of individual instructions
1773
1774// helpers for generate_and_dispatch
1775
1776
1777InterpreterGenerator::InterpreterGenerator(StubQueue* code)
1778  : TemplateInterpreterGenerator(code) {
1779   generate_all(); // down here so it can be "virtual"
1780}
1781
1782//-----------------------------------------------------------------------------
1783
1784// Non-product code
1785#ifndef PRODUCT
1786
1787address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
1788  address entry = __ pc();
1789
1790#ifndef _LP64
1791  // prepare expression stack
1792  __ pop(rcx);          // pop return address so expression stack is 'pure'
1793  __ push(state);       // save tosca
1794
1795  // pass tosca registers as arguments & call tracer
1796  __ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
1797  __ mov(rcx, rax);     // make sure return address is not destroyed by pop(state)
1798  __ pop(state);        // restore tosca
1799
1800  // return
1801  __ jmp(rcx);
1802#else
1803  __ push(state);
1804  __ push(c_rarg0);
1805  __ push(c_rarg1);
1806  __ push(c_rarg2);
1807  __ push(c_rarg3);
1808  __ mov(c_rarg2, rax);  // Pass itos
1809#ifdef _WIN64
1810  __ movflt(xmm3, xmm0); // Pass ftos
1811#endif
1812  __ call_VM(noreg,
1813             CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
1814             c_rarg1, c_rarg2, c_rarg3);
1815  __ pop(c_rarg3);
1816  __ pop(c_rarg2);
1817  __ pop(c_rarg1);
1818  __ pop(c_rarg0);
1819  __ pop(state);
1820  __ ret(0);                                   // return from result handler
1821#endif // _LP64
1822
1823  return entry;
1824}
1825
1826void TemplateInterpreterGenerator::count_bytecode() {
1827  __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
1828}
1829
1830void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
1831  __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
1832}
1833
1834void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
1835  __ mov32(rbx, ExternalAddress((address) &BytecodePairHistogram::_index));
1836  __ shrl(rbx, BytecodePairHistogram::log2_number_of_codes);
1837  __ orl(rbx,
1838         ((int) t->bytecode()) <<
1839         BytecodePairHistogram::log2_number_of_codes);
1840  __ mov32(ExternalAddress((address) &BytecodePairHistogram::_index), rbx);
1841  __ lea(rscratch1, ExternalAddress((address) BytecodePairHistogram::_counters));
1842  __ incrementl(Address(rscratch1, rbx, Address::times_4));
1843}
1844
1845
1846void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
1847  // Call a little run-time stub to avoid blow-up for each bytecode.
1848  // The run-time runtime saves the right registers, depending on
1849  // the tosca in-state for the given template.
1850
1851  assert(Interpreter::trace_code(t->tos_in()) != NULL,
1852         "entry must have been generated");
1853#ifndef _LP64
1854  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1855#else
1856  __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM)
1857  __ andptr(rsp, -16); // align stack as required by ABI
1858  __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
1859  __ mov(rsp, r12); // restore sp
1860  __ reinit_heapbase();
1861#endif // _LP64
1862}
1863
1864
1865void TemplateInterpreterGenerator::stop_interpreter_at() {
1866  Label L;
1867  __ cmp32(ExternalAddress((address) &BytecodeCounter::_counter_value),
1868           StopInterpreterAt);
1869  __ jcc(Assembler::notEqual, L);
1870  __ int3();
1871  __ bind(L);
1872}
1873#endif // !PRODUCT
1874#endif // ! CC_INTERP
1875