templateInterpreterGenerator_s390.cpp revision 12993:a8503d22944f
1/*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2016, 2017, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "interpreter/abstractInterpreter.hpp"
29#include "interpreter/bytecodeHistogram.hpp"
30#include "interpreter/interpreter.hpp"
31#include "interpreter/interpreterRuntime.hpp"
32#include "interpreter/interp_masm.hpp"
33#include "interpreter/templateInterpreterGenerator.hpp"
34#include "interpreter/templateTable.hpp"
35#include "oops/arrayOop.hpp"
36#include "oops/oop.inline.hpp"
37#include "prims/jvmtiExport.hpp"
38#include "prims/jvmtiThreadState.hpp"
39#include "runtime/arguments.hpp"
40#include "runtime/deoptimization.hpp"
41#include "runtime/frame.inline.hpp"
42#include "runtime/sharedRuntime.hpp"
43#include "runtime/stubRoutines.hpp"
44#include "runtime/synchronizer.hpp"
45#include "runtime/timer.hpp"
46#include "runtime/vframeArray.hpp"
47#include "utilities/debug.hpp"
48
49
50// Size of interpreter code.  Increase if too small.  Interpreter will
51// fail with a guarantee ("not enough space for interpreter generation");
52// if too small.
53// Run with +PrintInterpreter to get the VM to print out the size.
54// Max size with JVMTI
55int TemplateInterpreter::InterpreterCodeSize = 320*K;
56
57#undef  __
58#ifdef PRODUCT
59  #define __ _masm->
60#else
61  #define __ _masm->
62//  #define __ (Verbose ? (_masm->block_comment(FILE_AND_LINE),_masm):_masm)->
63#endif
64
65#define BLOCK_COMMENT(str) __ block_comment(str)
66#define BIND(label)        __ bind(label); BLOCK_COMMENT(#label ":")
67
68#define oop_tmp_offset     _z_ijava_state_neg(oop_tmp)
69
70//-----------------------------------------------------------------------------
71
72address TemplateInterpreterGenerator::generate_slow_signature_handler() {
73  //
74  // New slow_signature handler that respects the z/Architecture
75  // C calling conventions.
76  //
77  // We get called by the native entry code with our output register
78  // area == 8. First we call InterpreterRuntime::get_result_handler
79  // to copy the pointer to the signature string temporarily to the
80  // first C-argument and to return the result_handler in
81  // Z_RET. Since native_entry will copy the jni-pointer to the
82  // first C-argument slot later on, it's OK to occupy this slot
83  // temporarily. Then we copy the argument list on the java
84  // expression stack into native varargs format on the native stack
85  // and load arguments into argument registers. Integer arguments in
86  // the varargs vector will be sign-extended to 8 bytes.
87  //
88  // On entry:
89  //   Z_ARG1  - intptr_t*       Address of java argument list in memory.
90  //   Z_state - cppInterpreter* Address of interpreter state for
91  //                               this method
92  //   Z_method
93  //
94  // On exit (just before return instruction):
95  //   Z_RET contains the address of the result_handler.
96  //   Z_ARG2 is not updated for static methods and contains "this" otherwise.
97  //   Z_ARG3-Z_ARG5 contain the first 3 arguments of types other than float and double.
98  //   Z_FARG1-Z_FARG4 contain the first 4 arguments of type float or double.
99
100  const int LogSizeOfCase = 3;
101
102  const int max_fp_register_arguments   = Argument::n_float_register_parameters;
103  const int max_int_register_arguments  = Argument::n_register_parameters - 2;  // First 2 are reserved.
104
105  const Register arg_java       = Z_tmp_2;
106  const Register arg_c          = Z_tmp_3;
107  const Register signature      = Z_R1_scratch; // Is a string.
108  const Register fpcnt          = Z_R0_scratch;
109  const Register argcnt         = Z_tmp_4;
110  const Register intSlot        = Z_tmp_1;
111  const Register sig_end        = Z_tmp_1; // Assumed end of signature (only used in do_object).
112  const Register target_sp      = Z_tmp_1;
113  const FloatRegister floatSlot = Z_F1;
114
115  const int d_signature         = _z_abi(gpr6); // Only spill space, register contents not affected.
116  const int d_fpcnt             = _z_abi(gpr7); // Only spill space, register contents not affected.
117
118  unsigned int entry_offset = __ offset();
119
120  BLOCK_COMMENT("slow_signature_handler {");
121
122  // We use target_sp for storing arguments in the C frame.
123  __ save_return_pc();
124
125  __ z_stmg(Z_R10,Z_R13,-32,Z_SP);
126  __ push_frame_abi160(32);
127
128  __ z_lgr(arg_java, Z_ARG1);
129
130  Register   method = Z_ARG2; // Directly load into correct argument register.
131
132  __ get_method(method);
133  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_signature), Z_thread, method);
134
135  // Move signature to callee saved register.
136  // Don't directly write to stack. Frame is used by VM call.
137  __ z_lgr(Z_tmp_1, Z_RET);
138
139  // Reload method. Register may have been altered by VM call.
140  __ get_method(method);
141
142  // Get address of result handler.
143  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::get_result_handler), Z_thread, method);
144
145  // Save signature address to stack.
146  __ z_stg(Z_tmp_1, d_signature, Z_SP);
147
148  // Don't overwrite return value (Z_RET, Z_ARG1) in rest of the method !
149
150  {
151    Label   isStatic;
152
153    // Test if static.
154    // We can test the bit directly.
155    // Path is Z_method->_access_flags._flags.
156    // We only support flag bits in the least significant byte (assert !).
157    // Therefore add 3 to address that byte within "_flags".
158    // Reload method. VM call above may have destroyed register contents
159    __ get_method(method);
160    __ testbit(method2_(method, access_flags), JVM_ACC_STATIC_BIT);
161    method = noreg;  // end of life
162    __ z_btrue(isStatic);
163
164    // For non-static functions, pass "this" in Z_ARG2 and copy it to 2nd C-arg slot.
165    // Need to box the Java object here, so we use arg_java
166    // (address of current Java stack slot) as argument and
167    // don't dereference it as in case of ints, floats, etc..
168    __ z_lgr(Z_ARG2, arg_java);
169    __ add2reg(arg_java, -BytesPerWord);
170    __ bind(isStatic);
171  }
172
173  // argcnt == 0 corresponds to 3rd C argument.
174  //   arg #1 (result handler) and
175  //   arg #2 (this, for non-statics), unused else
176  // are reserved and pre-filled above.
177  // arg_java points to the corresponding Java argument here. It
178  // has been decremented by one argument (this) in case of non-static.
179  __ clear_reg(argcnt, true, false);  // Don't set CC.
180  __ z_lg(target_sp, 0, Z_SP);
181  __ add2reg(arg_c, _z_abi(remaining_cargs), target_sp);
182  // No floating-point args parsed so far.
183  __ clear_mem(Address(Z_SP, d_fpcnt), 8);
184
185  NearLabel   move_intSlot_to_ARG, move_floatSlot_to_FARG;
186  NearLabel   loop_start, loop_start_restore, loop_end;
187  NearLabel   do_int, do_long, do_float, do_double;
188  NearLabel   do_dontreachhere, do_object, do_array, do_boxed;
189
190#ifdef ASSERT
191  // Signature needs to point to '(' (== 0x28) at entry.
192  __ z_lg(signature, d_signature, Z_SP);
193  __ z_cli(0, signature, (int) '(');
194  __ z_brne(do_dontreachhere);
195#endif
196
197  __ bind(loop_start_restore);
198  __ z_lg(signature, d_signature, Z_SP);  // Restore signature ptr, destroyed by move_XX_to_ARG.
199
200  BIND(loop_start);
201  // Advance to next argument type token from the signature.
202  __ add2reg(signature, 1);
203
204  // Use CLI, works well on all CPU versions.
205    __ z_cli(0, signature, (int) ')');
206    __ z_bre(loop_end);                // end of signature
207    __ z_cli(0, signature, (int) 'L');
208    __ z_bre(do_object);               // object     #9
209    __ z_cli(0, signature, (int) 'F');
210    __ z_bre(do_float);                // float      #7
211    __ z_cli(0, signature, (int) 'J');
212    __ z_bre(do_long);                 // long       #6
213    __ z_cli(0, signature, (int) 'B');
214    __ z_bre(do_int);                  // byte       #1
215    __ z_cli(0, signature, (int) 'Z');
216    __ z_bre(do_int);                  // boolean    #2
217    __ z_cli(0, signature, (int) 'C');
218    __ z_bre(do_int);                  // char       #3
219    __ z_cli(0, signature, (int) 'S');
220    __ z_bre(do_int);                  // short      #4
221    __ z_cli(0, signature, (int) 'I');
222    __ z_bre(do_int);                  // int        #5
223    __ z_cli(0, signature, (int) 'D');
224    __ z_bre(do_double);               // double     #8
225    __ z_cli(0, signature, (int) '[');
226    __ z_bre(do_array);                // array      #10
227
228  __ bind(do_dontreachhere);
229
230  __ unimplemented("ShouldNotReachHere in slow_signature_handler", 120);
231
232  // Array argument
233  BIND(do_array);
234
235  {
236    Label   start_skip, end_skip;
237
238    __ bind(start_skip);
239
240    // Advance to next type tag from signature.
241    __ add2reg(signature, 1);
242
243    // Use CLI, works well on all CPU versions.
244    __ z_cli(0, signature, (int) '[');
245    __ z_bre(start_skip);               // Skip further brackets.
246
247    __ z_cli(0, signature, (int) '9');
248    __ z_brh(end_skip);                 // no optional size
249
250    __ z_cli(0, signature, (int) '0');
251    __ z_brnl(start_skip);              // Skip optional size.
252
253    __ bind(end_skip);
254
255    __ z_cli(0, signature, (int) 'L');
256    __ z_brne(do_boxed);                // If not array of objects: go directly to do_boxed.
257  }
258
259  //  OOP argument
260  BIND(do_object);
261  // Pass by an object's type name.
262  {
263    Label   L;
264
265    __ add2reg(sig_end, 4095, signature);     // Assume object type name is shorter than 4k.
266    __ load_const_optimized(Z_R0, (int) ';'); // Type name terminator (must be in Z_R0!).
267    __ MacroAssembler::search_string(sig_end, signature);
268    __ z_brl(L);
269    __ z_illtrap();  // No semicolon found: internal error or object name too long.
270    __ bind(L);
271    __ z_lgr(signature, sig_end);
272    // fallthru to do_boxed
273  }
274
275  // Need to box the Java object here, so we use arg_java
276  // (address of current Java stack slot) as argument and
277  // don't dereference it as in case of ints, floats, etc..
278
279  // UNBOX argument
280  // Load reference and check for NULL.
281  Label  do_int_Entry4Boxed;
282  __ bind(do_boxed);
283  {
284    __ load_and_test_long(intSlot, Address(arg_java));
285    __ z_bre(do_int_Entry4Boxed);
286    __ z_lgr(intSlot, arg_java);
287    __ z_bru(do_int_Entry4Boxed);
288  }
289
290  // INT argument
291
292  // (also for byte, boolean, char, short)
293  // Use lgf for load (sign-extend) and stg for store.
294  BIND(do_int);
295  __ z_lgf(intSlot, 0, arg_java);
296
297  __ bind(do_int_Entry4Boxed);
298  __ add2reg(arg_java, -BytesPerWord);
299  // If argument fits into argument register, go and handle it, otherwise continue.
300  __ compare32_and_branch(argcnt, max_int_register_arguments,
301                          Assembler::bcondLow, move_intSlot_to_ARG);
302  __ z_stg(intSlot, 0, arg_c);
303  __ add2reg(arg_c, BytesPerWord);
304  __ z_bru(loop_start);
305
306  // LONG argument
307
308  BIND(do_long);
309  __ add2reg(arg_java, -2*BytesPerWord);  // Decrement first to have positive displacement for lg.
310  __ z_lg(intSlot, BytesPerWord, arg_java);
311  // If argument fits into argument register, go and handle it, otherwise continue.
312  __ compare32_and_branch(argcnt, max_int_register_arguments,
313                          Assembler::bcondLow, move_intSlot_to_ARG);
314  __ z_stg(intSlot, 0, arg_c);
315  __ add2reg(arg_c, BytesPerWord);
316  __ z_bru(loop_start);
317
318  // FLOAT argumen
319
320  BIND(do_float);
321  __ z_le(floatSlot, 0, arg_java);
322  __ add2reg(arg_java, -BytesPerWord);
323  assert(max_fp_register_arguments <= 255, "always true");  // safety net
324  __ z_cli(d_fpcnt+7, Z_SP, max_fp_register_arguments);
325  __ z_brl(move_floatSlot_to_FARG);
326  __ z_ste(floatSlot, 4, arg_c);
327  __ add2reg(arg_c, BytesPerWord);
328  __ z_bru(loop_start);
329
330  // DOUBLE argument
331
332  BIND(do_double);
333  __ add2reg(arg_java, -2*BytesPerWord);  // Decrement first to have positive displacement for lg.
334  __ z_ld(floatSlot, BytesPerWord, arg_java);
335  assert(max_fp_register_arguments <= 255, "always true");  // safety net
336  __ z_cli(d_fpcnt+7, Z_SP, max_fp_register_arguments);
337  __ z_brl(move_floatSlot_to_FARG);
338  __ z_std(floatSlot, 0, arg_c);
339  __ add2reg(arg_c, BytesPerWord);
340  __ z_bru(loop_start);
341
342  // Method exit, all arguments proocessed.
343  __ bind(loop_end);
344  __ pop_frame();
345  __ restore_return_pc();
346  __ z_lmg(Z_R10,Z_R13,-32,Z_SP);
347  __ z_br(Z_R14);
348
349  // Copy int arguments.
350
351  Label  iarg_caselist;   // Distance between each case has to be a power of 2
352                          // (= 1 << LogSizeOfCase).
353  __ align(16);
354  BIND(iarg_caselist);
355  __ z_lgr(Z_ARG3, intSlot);    // 4 bytes
356  __ z_bru(loop_start_restore); // 4 bytes
357
358  __ z_lgr(Z_ARG4, intSlot);
359  __ z_bru(loop_start_restore);
360
361  __ z_lgr(Z_ARG5, intSlot);
362  __ z_bru(loop_start_restore);
363
364  __ align(16);
365  __ bind(move_intSlot_to_ARG);
366  __ z_stg(signature, d_signature, Z_SP);       // Spill since signature == Z_R1_scratch.
367  __ z_larl(Z_R1_scratch, iarg_caselist);
368  __ z_sllg(Z_R0_scratch, argcnt, LogSizeOfCase);
369  __ add2reg(argcnt, 1);
370  __ z_agr(Z_R1_scratch, Z_R0_scratch);
371  __ z_bcr(Assembler::bcondAlways, Z_R1_scratch);
372
373  // Copy float arguments.
374
375  Label  farg_caselist;   // Distance between each case has to be a power of 2
376                          // (= 1 << logSizeOfCase, padded with nop.
377  __ align(16);
378  BIND(farg_caselist);
379  __ z_ldr(Z_FARG1, floatSlot); // 2 bytes
380  __ z_bru(loop_start_restore); // 4 bytes
381  __ z_nop();                   // 2 bytes
382
383  __ z_ldr(Z_FARG2, floatSlot);
384  __ z_bru(loop_start_restore);
385  __ z_nop();
386
387  __ z_ldr(Z_FARG3, floatSlot);
388  __ z_bru(loop_start_restore);
389  __ z_nop();
390
391  __ z_ldr(Z_FARG4, floatSlot);
392  __ z_bru(loop_start_restore);
393  __ z_nop();
394
395  __ align(16);
396  __ bind(move_floatSlot_to_FARG);
397  __ z_stg(signature, d_signature, Z_SP);        // Spill since signature == Z_R1_scratch.
398  __ z_lg(Z_R0_scratch, d_fpcnt, Z_SP);          // Need old value for indexing.
399  __ add2mem_64(Address(Z_SP, d_fpcnt), 1, Z_R1_scratch); // Increment index.
400  __ z_larl(Z_R1_scratch, farg_caselist);
401  __ z_sllg(Z_R0_scratch, Z_R0_scratch, LogSizeOfCase);
402  __ z_agr(Z_R1_scratch, Z_R0_scratch);
403  __ z_bcr(Assembler::bcondAlways, Z_R1_scratch);
404
405  BLOCK_COMMENT("} slow_signature_handler");
406
407  return __ addr_at(entry_offset);
408}
409
410address TemplateInterpreterGenerator::generate_result_handler_for (BasicType type) {
411  address entry = __ pc();
412
413  assert(Z_tos == Z_RET, "Result handler: must move result!");
414  assert(Z_ftos == Z_FRET, "Result handler: must move float result!");
415
416  switch (type) {
417    case T_BOOLEAN:
418      __ c2bool(Z_tos);
419      break;
420    case T_CHAR:
421      __ and_imm(Z_tos, 0xffff);
422      break;
423    case T_BYTE:
424      __ z_lbr(Z_tos, Z_tos);
425      break;
426    case T_SHORT:
427      __ z_lhr(Z_tos, Z_tos);
428      break;
429    case T_INT:
430    case T_LONG:
431    case T_VOID:
432    case T_FLOAT:
433    case T_DOUBLE:
434      break;
435    case T_OBJECT:
436      // Retrieve result from frame...
437      __ mem2reg_opt(Z_tos, Address(Z_fp, oop_tmp_offset));
438      // and verify it.
439      __ verify_oop(Z_tos);
440      break;
441    default:
442      ShouldNotReachHere();
443  }
444  __ z_br(Z_R14);      // Return from result handler.
445  return entry;
446}
447
448// Abstract method entry.
449// Attempt to execute abstract method. Throw exception.
450address TemplateInterpreterGenerator::generate_abstract_entry(void) {
451  unsigned int entry_offset = __ offset();
452
453  // Caller could be the call_stub or a compiled method (x86 version is wrong!).
454
455  BLOCK_COMMENT("abstract_entry {");
456
457  // Implement call of InterpreterRuntime::throw_AbstractMethodError.
458  __ set_top_ijava_frame_at_SP_as_last_Java_frame(Z_SP, Z_R1);
459  __ save_return_pc();       // Save Z_R14.
460  __ push_frame_abi160(0);   // Without new frame the RT call could overwrite the saved Z_R14.
461
462  __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError), Z_thread);
463
464  __ pop_frame();
465  __ restore_return_pc();    // Restore Z_R14.
466  __ reset_last_Java_frame();
467
468  // Restore caller sp for c2i case.
469  __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
470
471  // branch to SharedRuntime::generate_forward_exception() which handles all possible callers,
472  // i.e. call stub, compiled method, interpreted method.
473  __ load_absolute_address(Z_tmp_1, StubRoutines::forward_exception_entry());
474  __ z_br(Z_tmp_1);
475
476  BLOCK_COMMENT("} abstract_entry");
477
478  return __ addr_at(entry_offset);
479}
480
481address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
482#if INCLUDE_ALL_GCS
483  if (UseG1GC) {
484    // Inputs:
485    //  Z_ARG1 - receiver
486    //
487    // What we do:
488    //  - Load the referent field address.
489    //  - Load the value in the referent field.
490    //  - Pass that value to the pre-barrier.
491    //
492    // In the case of G1 this will record the value of the
493    // referent in an SATB buffer if marking is active.
494    // This will cause concurrent marking to mark the referent
495    // field as live.
496
497    Register  scratch1 = Z_tmp_2;
498    Register  scratch2 = Z_tmp_3;
499    Register  pre_val  = Z_RET;   // return value
500    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
501    Register  Rargp    = Z_esp;
502
503    Label     slow_path;
504    address   entry = __ pc();
505
506    const int referent_offset = java_lang_ref_Reference::referent_offset;
507    guarantee(referent_offset > 0, "referent offset not initialized");
508
509    BLOCK_COMMENT("Reference_get {");
510
511    //  If the receiver is null then it is OK to jump to the slow path.
512    __ load_and_test_long(pre_val, Address(Rargp, Interpreter::stackElementSize)); // Get receiver.
513    __ z_bre(slow_path);
514
515    //  Load the value of the referent field.
516    __ load_heap_oop(pre_val, referent_offset, pre_val);
517
518    // Restore caller sp for c2i case.
519    __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
520
521    // Generate the G1 pre-barrier code to log the value of
522    // the referent field in an SATB buffer.
523    // Note:
524    //   With these parameters the write_barrier_pre does not
525    //   generate instructions to load the previous value.
526    __ g1_write_barrier_pre(noreg,      // obj
527                            noreg,      // offset
528                            pre_val,    // pre_val
529                            noreg,      // no new val to preserve
530                            scratch1,   // tmp
531                            scratch2,   // tmp
532                            true);      // pre_val_needed
533
534    __ z_br(Z_R14);
535
536    // Branch to previously generated regular method entry.
537    __ bind(slow_path);
538
539    address meth_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
540    __ jump_to_entry(meth_entry, Z_R1);
541
542    BLOCK_COMMENT("} Reference_get");
543
544    return entry;
545  }
546#endif // INCLUDE_ALL_GCS
547
548  return NULL;
549}
550
551address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
552  address entry = __ pc();
553
554  DEBUG_ONLY(__ verify_esp(Z_esp, Z_ARG5));
555
556  // Restore bcp under the assumption that the current frame is still
557  // interpreted.
558  __ restore_bcp();
559
560  // Expression stack must be empty before entering the VM if an
561  // exception happened.
562  __ empty_expression_stack();
563  // Throw exception.
564  __ call_VM(noreg,
565             CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
566  return entry;
567}
568
569//
570// Args:
571//   Z_ARG3: aberrant index
572//
573address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(const char * name) {
574  address entry = __ pc();
575  address excp = CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ArrayIndexOutOfBoundsException);
576
577  // Expression stack must be empty before entering the VM if an
578  // exception happened.
579  __ empty_expression_stack();
580
581  // Setup parameters.
582  // Leave out the name and use register for array to create more detailed exceptions.
583  __ load_absolute_address(Z_ARG2, (address) name);
584  __ call_VM(noreg, excp, Z_ARG2, Z_ARG3);
585  return entry;
586}
587
588address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
589  address entry = __ pc();
590
591  // Object is at TOS.
592  __ pop_ptr(Z_ARG2);
593
594  // Expression stack must be empty before entering the VM if an
595  // exception happened.
596  __ empty_expression_stack();
597
598  __ call_VM(Z_ARG1,
599             CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException),
600             Z_ARG2);
601
602  DEBUG_ONLY(__ should_not_reach_here();)
603
604  return entry;
605}
606
607address TemplateInterpreterGenerator::generate_exception_handler_common(const char* name, const char* message, bool pass_oop) {
608  assert(!pass_oop || message == NULL, "either oop or message but not both");
609  address entry = __ pc();
610
611  BLOCK_COMMENT("exception_handler_common {");
612
613  // Expression stack must be empty before entering the VM if an
614  // exception happened.
615  __ empty_expression_stack();
616  if (name != NULL) {
617    __ load_absolute_address(Z_ARG2, (address)name);
618  } else {
619    __ clear_reg(Z_ARG2, true, false);
620  }
621
622  if (pass_oop) {
623    __ call_VM(Z_tos,
624               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_klass_exception),
625               Z_ARG2, Z_tos /*object (see TT::aastore())*/);
626  } else {
627    if (message != NULL) {
628      __ load_absolute_address(Z_ARG3, (address)message);
629    } else {
630      __ clear_reg(Z_ARG3, true, false);
631    }
632    __ call_VM(Z_tos,
633               CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception),
634               Z_ARG2, Z_ARG3);
635  }
636  // Throw exception.
637  __ load_absolute_address(Z_R1_scratch, Interpreter::throw_exception_entry());
638  __ z_br(Z_R1_scratch);
639
640  BLOCK_COMMENT("} exception_handler_common");
641
642  return entry;
643}
644
645address TemplateInterpreterGenerator::generate_return_entry_for (TosState state, int step, size_t index_size) {
646  address entry = __ pc();
647
648  BLOCK_COMMENT("return_entry {");
649
650  // Pop i2c extension or revert top-2-parent-resize done by interpreted callees.
651  Register sp_before_i2c_extension = Z_bcp;
652  __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer.
653  __ z_lg(sp_before_i2c_extension, Address(Z_fp, _z_ijava_state_neg(top_frame_sp)));
654  __ resize_frame_absolute(sp_before_i2c_extension, Z_locals/*tmp*/, true/*load_fp*/);
655
656  // TODO(ZASM): necessary??
657  //  // and NULL it as marker that esp is now tos until next java call
658  //  __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
659
660  __ restore_bcp();
661  __ restore_locals();
662  __ restore_esp();
663
664  if (state == atos) {
665    __ profile_return_type(Z_tmp_1, Z_tos, Z_tmp_2);
666  }
667
668  Register cache  = Z_tmp_1;
669  Register size   = Z_tmp_1;
670  Register offset = Z_tmp_2;
671  const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
672                                    ConstantPoolCacheEntry::flags_offset());
673  __ get_cache_and_index_at_bcp(cache, offset, 1, index_size);
674
675  // #args is in rightmost byte of the _flags field.
676  __ z_llgc(size, Address(cache, offset, flags_offset+(sizeof(size_t)-1)));
677  __ z_sllg(size, size, Interpreter::logStackElementSize); // Each argument size in bytes.
678  __ z_agr(Z_esp, size);                                   // Pop arguments.
679
680  __ check_and_handle_popframe(Z_thread);
681  __ check_and_handle_earlyret(Z_thread);
682
683  __ dispatch_next(state, step);
684
685  BLOCK_COMMENT("} return_entry");
686
687  return entry;
688}
689
690address TemplateInterpreterGenerator::generate_deopt_entry_for (TosState state,
691                                                               int step) {
692  address entry = __ pc();
693
694  BLOCK_COMMENT("deopt_entry {");
695
696  // TODO(ZASM): necessary? NULL last_sp until next java call
697  // __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
698  __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer.
699  __ restore_bcp();
700  __ restore_locals();
701  __ restore_esp();
702
703  // Handle exceptions.
704  {
705    Label L;
706    __ load_and_test_long(Z_R0/*pending_exception*/, thread_(pending_exception));
707    __ z_bre(L);
708    __ call_VM(noreg,
709               CAST_FROM_FN_PTR(address,
710                                InterpreterRuntime::throw_pending_exception));
711    __ should_not_reach_here();
712    __ bind(L);
713  }
714  __ dispatch_next(state, step);
715
716  BLOCK_COMMENT("} deopt_entry");
717
718  return entry;
719}
720
721address TemplateInterpreterGenerator::generate_safept_entry_for (TosState state,
722                                                                address runtime_entry) {
723  address entry = __ pc();
724  __ push(state);
725  __ call_VM(noreg, runtime_entry);
726  __ dispatch_via(vtos, Interpreter::_normal_table.table_for (vtos));
727  return entry;
728}
729
730//
731// Helpers for commoning out cases in the various type of method entries.
732//
733
734// Increment invocation count & check for overflow.
735//
736// Note: checking for negative value instead of overflow
737// so we have a 'sticky' overflow test.
738//
739// Z_ARG2: method (see generate_fixed_frame())
740//
741void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
742  Label done;
743  Register method = Z_ARG2; // Generate_fixed_frame() copies Z_method into Z_ARG2.
744  Register m_counters = Z_ARG4;
745
746  BLOCK_COMMENT("counter_incr {");
747
748  // Note: In tiered we increment either counters in method or in MDO depending
749  // if we are profiling or not.
750  if (TieredCompilation) {
751    int increment = InvocationCounter::count_increment;
752    if (ProfileInterpreter) {
753      NearLabel no_mdo;
754      Register mdo = m_counters;
755      // Are we profiling?
756      __ load_and_test_long(mdo, method2_(method, method_data));
757      __ branch_optimized(Assembler::bcondZero, no_mdo);
758      // Increment counter in the MDO.
759      const Address mdo_invocation_counter(mdo, MethodData::invocation_counter_offset() +
760                                           InvocationCounter::counter_offset());
761      const Address mask(mdo, MethodData::invoke_mask_offset());
762      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
763                                 Z_R1_scratch, false, Assembler::bcondZero,
764                                 overflow);
765      __ z_bru(done);
766      __ bind(no_mdo);
767    }
768
769    // Increment counter in MethodCounters.
770    const Address invocation_counter(m_counters,
771                                     MethodCounters::invocation_counter_offset() +
772                                     InvocationCounter::counter_offset());
773    // Get address of MethodCounters object.
774    __ get_method_counters(method, m_counters, done);
775    const Address mask(m_counters, MethodCounters::invoke_mask_offset());
776    __ increment_mask_and_jump(invocation_counter,
777                               increment, mask,
778                               Z_R1_scratch, false, Assembler::bcondZero,
779                               overflow);
780  } else {
781    Register counter_sum = Z_ARG3; // The result of this piece of code.
782    Register tmp         = Z_R1_scratch;
783#ifdef ASSERT
784    {
785      NearLabel ok;
786      __ get_method(tmp);
787      __ compare64_and_branch(method, tmp, Assembler::bcondEqual, ok);
788      __ z_illtrap(0x66);
789      __ bind(ok);
790    }
791#endif
792
793    // Get address of MethodCounters object.
794    __ get_method_counters(method, m_counters, done);
795    // Update standard invocation counters.
796    __ increment_invocation_counter(m_counters, counter_sum);
797    if (ProfileInterpreter) {
798      __ add2mem_32(Address(m_counters, MethodCounters::interpreter_invocation_counter_offset()), 1, tmp);
799      if (profile_method != NULL) {
800        const Address profile_limit(m_counters, MethodCounters::interpreter_profile_limit_offset());
801        __ z_cl(counter_sum, profile_limit);
802        __ branch_optimized(Assembler::bcondLow, *profile_method_continue);
803        // If no method data exists, go to profile_method.
804        __ test_method_data_pointer(tmp, *profile_method);
805      }
806    }
807
808    const Address invocation_limit(m_counters, MethodCounters::interpreter_invocation_limit_offset());
809    __ z_cl(counter_sum, invocation_limit);
810    __ branch_optimized(Assembler::bcondNotLow, *overflow);
811  }
812
813  __ bind(done);
814
815  BLOCK_COMMENT("} counter_incr");
816}
817
818void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
819  // InterpreterRuntime::frequency_counter_overflow takes two
820  // arguments, the first (thread) is passed by call_VM, the second
821  // indicates if the counter overflow occurs at a backwards branch
822  // (NULL bcp). We pass zero for it. The call returns the address
823  // of the verified entry point for the method or NULL if the
824  // compilation did not complete (either went background or bailed
825  // out).
826  __ clear_reg(Z_ARG2);
827  __ call_VM(noreg,
828             CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
829             Z_ARG2);
830  __ z_bru(do_continue);
831}
832
833void TemplateInterpreterGenerator::generate_stack_overflow_check(Register frame_size, Register tmp1) {
834  Register tmp2 = Z_R1_scratch;
835  const int page_size = os::vm_page_size();
836  NearLabel after_frame_check;
837
838  BLOCK_COMMENT("counter_overflow {");
839
840  assert_different_registers(frame_size, tmp1);
841
842  // Stack banging is sufficient overflow check if frame_size < page_size.
843  if (Immediate::is_uimm(page_size, 15)) {
844    __ z_chi(frame_size, page_size);
845    __ z_brl(after_frame_check);
846  } else {
847    __ load_const_optimized(tmp1, page_size);
848    __ compareU32_and_branch(frame_size, tmp1, Assembler::bcondLow, after_frame_check);
849  }
850
851  // Get the stack base, and in debug, verify it is non-zero.
852  __ z_lg(tmp1, thread_(stack_base));
853#ifdef ASSERT
854  address reentry = NULL;
855  NearLabel base_not_zero;
856  __ compareU64_and_branch(tmp1, (intptr_t)0L, Assembler::bcondNotEqual, base_not_zero);
857  reentry = __ stop_chain_static(reentry, "stack base is zero in generate_stack_overflow_check");
858  __ bind(base_not_zero);
859#endif
860
861  // Get the stack size, and in debug, verify it is non-zero.
862  assert(sizeof(size_t) == sizeof(intptr_t), "wrong load size");
863  __ z_lg(tmp2, thread_(stack_size));
864#ifdef ASSERT
865  NearLabel size_not_zero;
866  __ compareU64_and_branch(tmp2, (intptr_t)0L, Assembler::bcondNotEqual, size_not_zero);
867  reentry = __ stop_chain_static(reentry, "stack size is zero in generate_stack_overflow_check");
868  __ bind(size_not_zero);
869#endif
870
871  // Compute the beginning of the protected zone minus the requested frame size.
872  __ z_sgr(tmp1, tmp2);
873  __ add2reg(tmp1, JavaThread::stack_guard_zone_size());
874
875  // Add in the size of the frame (which is the same as subtracting it from the
876  // SP, which would take another register.
877  __ z_agr(tmp1, frame_size);
878
879  // The frame is greater than one page in size, so check against
880  // the bottom of the stack.
881  __ compareU64_and_branch(Z_SP, tmp1, Assembler::bcondHigh, after_frame_check);
882
883  // The stack will overflow, throw an exception.
884
885  // Restore SP to sender's sp. This is necessary if the sender's frame is an
886  // extended compiled frame (see gen_c2i_adapter()) and safer anyway in case of
887  // JSR292 adaptations.
888  __ resize_frame_absolute(Z_R10, tmp1, true/*load_fp*/);
889
890  // Note also that the restored frame is not necessarily interpreted.
891  // Use the shared runtime version of the StackOverflowError.
892  assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "stub not yet generated");
893  AddressLiteral stub(StubRoutines::throw_StackOverflowError_entry());
894  __ load_absolute_address(tmp1, StubRoutines::throw_StackOverflowError_entry());
895  __ z_br(tmp1);
896
897  // If you get to here, then there is enough stack space.
898  __ bind(after_frame_check);
899
900  BLOCK_COMMENT("} counter_overflow");
901}
902
903// Allocate monitor and lock method (asm interpreter).
904//
905// Args:
906//   Z_locals: locals
907
908void TemplateInterpreterGenerator::lock_method(void) {
909
910  BLOCK_COMMENT("lock_method {");
911
912  // Synchronize method.
913  const Register method = Z_tmp_2;
914  __ get_method(method);
915
916#ifdef ASSERT
917  address reentry = NULL;
918  {
919    Label L;
920    __ testbit(method2_(method, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
921    __ z_btrue(L);
922    reentry = __ stop_chain_static(reentry, "method doesn't need synchronization");
923    __ bind(L);
924  }
925#endif // ASSERT
926
927  // Get synchronization object.
928  const Register object = Z_tmp_2;
929
930  {
931    Label     done;
932    Label     static_method;
933
934    __ testbit(method2_(method, access_flags), JVM_ACC_STATIC_BIT);
935    __ z_btrue(static_method);
936
937    // non-static method: Load receiver obj from stack.
938    __ mem2reg_opt(object, Address(Z_locals, Interpreter::local_offset_in_bytes(0)));
939    __ z_bru(done);
940
941    __ bind(static_method);
942
943    // Lock the java mirror.
944    __ load_mirror(object, method);
945#ifdef ASSERT
946    {
947      NearLabel L;
948      __ compare64_and_branch(object, (intptr_t) 0, Assembler::bcondNotEqual, L);
949      reentry = __ stop_chain_static(reentry, "synchronization object is NULL");
950      __ bind(L);
951    }
952#endif // ASSERT
953
954    __ bind(done);
955  }
956
957  __ add_monitor_to_stack(true, Z_ARG3, Z_ARG4, Z_ARG5); // Allocate monitor elem.
958  // Store object and lock it.
959  __ get_monitors(Z_tmp_1);
960  __ reg2mem_opt(object, Address(Z_tmp_1, BasicObjectLock::obj_offset_in_bytes()));
961  __ lock_object(Z_tmp_1, object);
962
963  BLOCK_COMMENT("} lock_method");
964}
965
966// Generate a fixed interpreter frame. This is identical setup for
967// interpreted methods and for native methods hence the shared code.
968//
969// Registers alive
970//   Z_thread   - JavaThread*
971//   Z_SP       - old stack pointer
972//   Z_method   - callee's method
973//   Z_esp      - parameter list (slot 'above' last param)
974//   Z_R14      - return pc, to be stored in caller's frame
975//   Z_R10      - sender sp, note: Z_tmp_1 is Z_R10!
976//
977// Registers updated
978//   Z_SP       - new stack pointer
979//   Z_esp      - callee's operand stack pointer
980//                points to the slot above the value on top
981//   Z_locals   - used to access locals: locals[i] := *(Z_locals - i*BytesPerWord)
982//   Z_bcp      - the bytecode pointer
983//   Z_fp       - the frame pointer, thereby killing Z_method
984//   Z_ARG2     - copy of Z_method
985//
986void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
987
988  //  stack layout
989  //
990  //   F1 [TOP_IJAVA_FRAME_ABI]              <-- Z_SP, Z_R10 (see note below)
991  //      [F1's operand stack (unused)]
992  //      [F1's outgoing Java arguments]     <-- Z_esp
993  //      [F1's operand stack (non args)]
994  //      [monitors]      (optional)
995  //      [IJAVA_STATE]
996  //
997  //   F2 [PARENT_IJAVA_FRAME_ABI]
998  //      ...
999  //
1000  //  0x000
1001  //
1002  // Note: Z_R10, the sender sp, will be below Z_SP if F1 was extended by a c2i adapter.
1003
1004  //=============================================================================
1005  // Allocate space for locals other than the parameters, the
1006  // interpreter state, monitors, and the expression stack.
1007
1008  const Register local_count     = Z_ARG5;
1009  const Register fp              = Z_tmp_2;
1010
1011  BLOCK_COMMENT("generate_fixed_frame {");
1012
1013  {
1014  // local registers
1015  const Register top_frame_size  = Z_ARG2;
1016  const Register sp_after_resize = Z_ARG3;
1017  const Register max_stack       = Z_ARG4;
1018
1019  // local_count = method->constMethod->max_locals();
1020  __ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
1021  __ z_llgh(local_count, Address(Z_R1_scratch, ConstMethod::size_of_locals_offset()));
1022
1023  if (native_call) {
1024    // If we're calling a native method, we replace max_stack (which is
1025    // zero) with space for the worst-case signature handler varargs
1026    // vector, which is:
1027    //   max_stack = max(Argument::n_register_parameters, parameter_count+2);
1028    //
1029    // We add two slots to the parameter_count, one for the jni
1030    // environment and one for a possible native mirror. We allocate
1031    // space for at least the number of ABI registers, even though
1032    // InterpreterRuntime::slow_signature_handler won't write more than
1033    // parameter_count+2 words when it creates the varargs vector at the
1034    // top of the stack. The generated slow signature handler will just
1035    // load trash into registers beyond the necessary number. We're
1036    // still going to cut the stack back by the ABI register parameter
1037    // count so as to get SP+16 pointing at the ABI outgoing parameter
1038    // area, so we need to allocate at least that much even though we're
1039    // going to throw it away.
1040    //
1041
1042    __ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
1043    __ z_llgh(max_stack,  Address(Z_R1_scratch, ConstMethod::size_of_parameters_offset()));
1044    __ add2reg(max_stack, 2);
1045
1046    NearLabel passing_args_on_stack;
1047
1048    // max_stack in bytes
1049    __ z_sllg(max_stack, max_stack, LogBytesPerWord);
1050
1051    int argument_registers_in_bytes = Argument::n_register_parameters << LogBytesPerWord;
1052    __ compare64_and_branch(max_stack, argument_registers_in_bytes, Assembler::bcondNotLow, passing_args_on_stack);
1053
1054    __ load_const_optimized(max_stack, argument_registers_in_bytes);
1055
1056    __ bind(passing_args_on_stack);
1057  } else {
1058    // !native_call
1059    __ z_lg(max_stack, method_(const));
1060
1061    // Calculate number of non-parameter locals (in slots):
1062    __ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
1063    __ z_sh(local_count, Address(Z_R1_scratch, ConstMethod::size_of_parameters_offset()));
1064
1065    // max_stack = method->max_stack();
1066    __ z_llgh(max_stack, Address(max_stack, ConstMethod::max_stack_offset()));
1067    // max_stack in bytes
1068    __ z_sllg(max_stack, max_stack, LogBytesPerWord);
1069  }
1070
1071  // Resize (i.e. normally shrink) the top frame F1 ...
1072  //   F1      [TOP_IJAVA_FRAME_ABI]          <-- Z_SP, Z_R10
1073  //           F1's operand stack (free)
1074  //           ...
1075  //           F1's operand stack (free)      <-- Z_esp
1076  //           F1's outgoing Java arg m
1077  //           ...
1078  //           F1's outgoing Java arg 0
1079  //           ...
1080  //
1081  //  ... into a parent frame (Z_R10 holds F1's SP before any modification, see also above)
1082  //
1083  //           +......................+
1084  //           :                      :        <-- Z_R10, saved below as F0's z_ijava_state.sender_sp
1085  //           :                      :
1086  //   F1      [PARENT_IJAVA_FRAME_ABI]        <-- Z_SP       \
1087  //           F0's non arg local                             | = delta
1088  //           ...                                            |
1089  //           F0's non arg local              <-- Z_esp      /
1090  //           F1's outgoing Java arg m
1091  //           ...
1092  //           F1's outgoing Java arg 0
1093  //           ...
1094  //
1095  // then push the new top frame F0.
1096  //
1097  //   F0      [TOP_IJAVA_FRAME_ABI]    = frame::z_top_ijava_frame_abi_size \
1098  //           [operand stack]          = max_stack                          | = top_frame_size
1099  //           [IJAVA_STATE]            = frame::z_ijava_state_size         /
1100
1101  // sp_after_resize = Z_esp - delta
1102  //
1103  // delta = PARENT_IJAVA_FRAME_ABI + (locals_count - params_count)
1104
1105  __ add2reg(sp_after_resize, (Interpreter::stackElementSize) - (frame::z_parent_ijava_frame_abi_size), Z_esp);
1106  __ z_sllg(Z_R0_scratch, local_count, LogBytesPerWord); // Params have already been subtracted from local_count.
1107  __ z_slgr(sp_after_resize, Z_R0_scratch);
1108
1109  // top_frame_size = TOP_IJAVA_FRAME_ABI + max_stack + size of interpreter state
1110  __ add2reg(top_frame_size,
1111             frame::z_top_ijava_frame_abi_size +
1112             frame::z_ijava_state_size +
1113             frame::interpreter_frame_monitor_size() * wordSize,
1114             max_stack);
1115
1116  if (!native_call) {
1117    // Stack overflow check.
1118    // Native calls don't need the stack size check since they have no
1119    // expression stack and the arguments are already on the stack and
1120    // we only add a handful of words to the stack.
1121    Register frame_size = max_stack; // Reuse the regiser for max_stack.
1122    __ z_lgr(frame_size, Z_SP);
1123    __ z_sgr(frame_size, sp_after_resize);
1124    __ z_agr(frame_size, top_frame_size);
1125    generate_stack_overflow_check(frame_size, fp/*tmp1*/);
1126  }
1127
1128  DEBUG_ONLY(__ z_cg(Z_R14, _z_abi16(return_pc), Z_SP));
1129  __ asm_assert_eq("killed Z_R14", 0);
1130  __ resize_frame_absolute(sp_after_resize, fp, true);
1131  __ save_return_pc(Z_R14);
1132
1133  // ... and push the new frame F0.
1134  __ push_frame(top_frame_size, fp, true /*copy_sp*/, false);
1135  }
1136
1137  //=============================================================================
1138  // Initialize the new frame F0: initialize interpreter state.
1139
1140  {
1141  // locals
1142  const Register local_addr = Z_ARG4;
1143
1144  BLOCK_COMMENT("generate_fixed_frame: initialize interpreter state {");
1145
1146#ifdef ASSERT
1147  // Set the magic number (using local_addr as tmp register).
1148  __ load_const_optimized(local_addr, frame::z_istate_magic_number);
1149  __ z_stg(local_addr, _z_ijava_state_neg(magic), fp);
1150#endif
1151
1152  // Save sender SP from F1 (i.e. before it was potentially modified by an
1153  // adapter) into F0's interpreter state. We us it as well to revert
1154  // resizing the frame above.
1155  __ z_stg(Z_R10, _z_ijava_state_neg(sender_sp), fp);
1156
1157  // Load cp cache and save it at the and of this block.
1158  __ z_lg(Z_R1_scratch, Address(Z_method,    Method::const_offset()));
1159  __ z_lg(Z_R1_scratch, Address(Z_R1_scratch, ConstMethod::constants_offset()));
1160  __ z_lg(Z_R1_scratch, Address(Z_R1_scratch, ConstantPool::cache_offset_in_bytes()));
1161
1162  // z_ijava_state->method = method;
1163  __ z_stg(Z_method, _z_ijava_state_neg(method), fp);
1164
1165  // Point locals at the first argument. Method's locals are the
1166  // parameters on top of caller's expression stack.
1167  // Tos points past last Java argument.
1168
1169  __ z_lg(Z_locals, Address(Z_method, Method::const_offset()));
1170  __ z_llgh(Z_locals /*parameter_count words*/,
1171            Address(Z_locals, ConstMethod::size_of_parameters_offset()));
1172  __ z_sllg(Z_locals /*parameter_count bytes*/, Z_locals /*parameter_count*/, LogBytesPerWord);
1173  __ z_agr(Z_locals, Z_esp);
1174  // z_ijava_state->locals - i*BytesPerWord points to i-th Java local (i starts at 0)
1175  // z_ijava_state->locals = Z_esp + parameter_count bytes
1176  __ z_stg(Z_locals, _z_ijava_state_neg(locals), fp);
1177
1178  // z_ijava_state->oop_temp = NULL;
1179  __ store_const(Address(fp, oop_tmp_offset), 0);
1180
1181  // Initialize z_ijava_state->mdx.
1182  Register Rmdp = Z_bcp;
1183  // native_call: assert that mdo == NULL
1184  const bool check_for_mdo = !native_call DEBUG_ONLY(|| native_call);
1185  if (ProfileInterpreter && check_for_mdo) {
1186#ifdef FAST_DISPATCH
1187    // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
1188    // they both use I2.
1189    assert(0, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
1190#endif // FAST_DISPATCH
1191    Label get_continue;
1192
1193    __ load_and_test_long(Rmdp, method_(method_data));
1194    __ z_brz(get_continue);
1195    DEBUG_ONLY(if (native_call) __ stop("native methods don't have a mdo"));
1196    __ add2reg(Rmdp, in_bytes(MethodData::data_offset()));
1197    __ bind(get_continue);
1198  }
1199  __ z_stg(Rmdp, _z_ijava_state_neg(mdx), fp);
1200
1201  // Initialize z_ijava_state->bcp and Z_bcp.
1202  if (native_call) {
1203    __ clear_reg(Z_bcp); // Must initialize. Will get written into frame where GC reads it.
1204  } else {
1205    __ z_lg(Z_bcp, method_(const));
1206    __ add2reg(Z_bcp, in_bytes(ConstMethod::codes_offset()));
1207  }
1208  __ z_stg(Z_bcp, _z_ijava_state_neg(bcp), fp);
1209
1210  // no monitors and empty operand stack
1211  // => z_ijava_state->monitors points to the top slot in IJAVA_STATE.
1212  // => Z_ijava_state->esp points one slot above into the operand stack.
1213  // z_ijava_state->monitors = fp - frame::z_ijava_state_size - Interpreter::stackElementSize;
1214  // z_ijava_state->esp = Z_esp = z_ijava_state->monitors;
1215  __ add2reg(Z_esp, -frame::z_ijava_state_size, fp);
1216  __ z_stg(Z_esp, _z_ijava_state_neg(monitors), fp);
1217  __ add2reg(Z_esp, -Interpreter::stackElementSize);
1218  __ z_stg(Z_esp, _z_ijava_state_neg(esp), fp);
1219
1220  // z_ijava_state->cpoolCache = Z_R1_scratch (see load above);
1221  __ z_stg(Z_R1_scratch, _z_ijava_state_neg(cpoolCache), fp);
1222
1223  // Get mirror and store it in the frame as GC root for this Method*.
1224  __ load_mirror(Z_R1_scratch, Z_method);
1225  __ z_stg(Z_R1_scratch, _z_ijava_state_neg(mirror), fp);
1226
1227  BLOCK_COMMENT("} generate_fixed_frame: initialize interpreter state");
1228
1229  //=============================================================================
1230  if (!native_call) {
1231    // Fill locals with 0x0s.
1232    NearLabel locals_zeroed;
1233    NearLabel doXC;
1234
1235    // Local_count is already num_locals_slots - num_param_slots.
1236    __ compare64_and_branch(local_count, (intptr_t)0L, Assembler::bcondNotHigh, locals_zeroed);
1237
1238    // Advance local_addr to point behind locals (creates positive incr. in loop).
1239    __ z_lg(Z_R1_scratch, Address(Z_method, Method::const_offset()));
1240    __ z_llgh(Z_R0_scratch,
1241              Address(Z_R1_scratch, ConstMethod::size_of_locals_offset()));
1242    if (Z_R0_scratch == Z_R0) {
1243      __ z_aghi(Z_R0_scratch, -1);
1244    } else {
1245      __ add2reg(Z_R0_scratch, -1);
1246    }
1247    __ z_lgr(local_addr/*locals*/, Z_locals);
1248    __ z_sllg(Z_R0_scratch, Z_R0_scratch, LogBytesPerWord);
1249    __ z_sllg(local_count, local_count, LogBytesPerWord); // Local_count are non param locals.
1250    __ z_sgr(local_addr, Z_R0_scratch);
1251
1252    if (VM_Version::has_Prefetch()) {
1253      __ z_pfd(0x02, 0, Z_R0, local_addr);
1254      __ z_pfd(0x02, 256, Z_R0, local_addr);
1255    }
1256
1257    // Can't optimise for Z10 using "compare and branch" (immediate value is too big).
1258    __ z_cghi(local_count, 256);
1259    __ z_brnh(doXC);
1260
1261    // MVCLE: Initialize if quite a lot locals.
1262    //  __ bind(doMVCLE);
1263    __ z_lgr(Z_R0_scratch, local_addr);
1264    __ z_lgr(Z_R1_scratch, local_count);
1265    __ clear_reg(Z_ARG2);        // Src len of MVCLE is zero.
1266
1267    __ MacroAssembler::move_long_ext(Z_R0_scratch, Z_ARG1, 0);
1268    __ z_bru(locals_zeroed);
1269
1270    Label  XC_template;
1271    __ bind(XC_template);
1272    __ z_xc(0, 0, local_addr, 0, local_addr);
1273
1274    __ bind(doXC);
1275    __ z_bctgr(local_count, Z_R0);                  // Get #bytes-1 for EXECUTE.
1276    if (VM_Version::has_ExecuteExtensions()) {
1277      __ z_exrl(local_count, XC_template);          // Execute XC with variable length.
1278    } else {
1279      __ z_larl(Z_R1_scratch, XC_template);
1280      __ z_ex(local_count, 0, Z_R0, Z_R1_scratch);  // Execute XC with variable length.
1281    }
1282
1283    __ bind(locals_zeroed);
1284  }
1285
1286  }
1287  // Finally set the frame pointer, destroying Z_method.
1288  assert(Z_fp == Z_method, "maybe set Z_fp earlier if other register than Z_method");
1289  // Oprofile analysis suggests to keep a copy in a register to be used by
1290  // generate_counter_incr().
1291  __ z_lgr(Z_ARG2, Z_method);
1292  __ z_lgr(Z_fp, fp);
1293
1294  BLOCK_COMMENT("} generate_fixed_frame");
1295}
1296
1297// Various method entries
1298
1299// Math function, frame manager must set up an interpreter state, etc.
1300address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
1301
1302  // Decide what to do: Use same platform specific instructions and runtime calls as compilers.
1303  bool use_instruction = false;
1304  address runtime_entry = NULL;
1305  int num_args = 1;
1306  bool double_precision = true;
1307
1308  // s390 specific:
1309  switch (kind) {
1310    case Interpreter::java_lang_math_sqrt:
1311    case Interpreter::java_lang_math_abs:  use_instruction = true; break;
1312    case Interpreter::java_lang_math_fmaF:
1313    case Interpreter::java_lang_math_fmaD: use_instruction = UseFMA; break;
1314    default: break; // Fall back to runtime call.
1315  }
1316
1317  switch (kind) {
1318    case Interpreter::java_lang_math_sin  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);   break;
1319    case Interpreter::java_lang_math_cos  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);   break;
1320    case Interpreter::java_lang_math_tan  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);   break;
1321    case Interpreter::java_lang_math_abs  : /* run interpreted */ break;
1322    case Interpreter::java_lang_math_sqrt : /* runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsqrt); not available */ break;
1323    case Interpreter::java_lang_math_log  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);   break;
1324    case Interpreter::java_lang_math_log10: runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10); break;
1325    case Interpreter::java_lang_math_pow  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow); num_args = 2; break;
1326    case Interpreter::java_lang_math_exp  : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);   break;
1327    case Interpreter::java_lang_math_fmaF : /* run interpreted */ num_args = 3; double_precision = false; break;
1328    case Interpreter::java_lang_math_fmaD : /* run interpreted */ num_args = 3; break;
1329    default: ShouldNotReachHere();
1330  }
1331
1332  // Use normal entry if neither instruction nor runtime call is used.
1333  if (!use_instruction && runtime_entry == NULL) return NULL;
1334
1335  address entry = __ pc();
1336
1337  if (use_instruction) {
1338    switch (kind) {
1339      case Interpreter::java_lang_math_sqrt:
1340        // Can use memory operand directly.
1341        __ z_sqdb(Z_FRET, Interpreter::stackElementSize, Z_esp);
1342        break;
1343      case Interpreter::java_lang_math_abs:
1344        // Load operand from stack.
1345        __ mem2freg_opt(Z_FRET, Address(Z_esp, Interpreter::stackElementSize));
1346        __ z_lpdbr(Z_FRET);
1347        break;
1348      case Interpreter::java_lang_math_fmaF:
1349        __ mem2freg_opt(Z_FRET,  Address(Z_esp,     Interpreter::stackElementSize)); // result reg = arg3
1350        __ mem2freg_opt(Z_FARG2, Address(Z_esp, 3 * Interpreter::stackElementSize)); // arg1
1351        __ z_maeb(Z_FRET, Z_FARG2, Address(Z_esp, 2 * Interpreter::stackElementSize));
1352        break;
1353      case Interpreter::java_lang_math_fmaD:
1354        __ mem2freg_opt(Z_FRET,  Address(Z_esp,     Interpreter::stackElementSize)); // result reg = arg3
1355        __ mem2freg_opt(Z_FARG2, Address(Z_esp, 5 * Interpreter::stackElementSize)); // arg1
1356        __ z_madb(Z_FRET, Z_FARG2, Address(Z_esp, 3 * Interpreter::stackElementSize));
1357        break;
1358      default: ShouldNotReachHere();
1359    }
1360  } else {
1361    // Load arguments
1362    assert(num_args <= 4, "passed in registers");
1363    if (double_precision) {
1364      int offset = (2 * num_args - 1) * Interpreter::stackElementSize;
1365      for (int i = 0; i < num_args; ++i) {
1366        __ mem2freg_opt(as_FloatRegister(Z_FARG1->encoding() + 2 * i), Address(Z_esp, offset));
1367        offset -= 2 * Interpreter::stackElementSize;
1368      }
1369    } else {
1370      int offset = num_args * Interpreter::stackElementSize;
1371      for (int i = 0; i < num_args; ++i) {
1372        __ mem2freg_opt(as_FloatRegister(Z_FARG1->encoding() + 2 * i), Address(Z_esp, offset));
1373        offset -= Interpreter::stackElementSize;
1374      }
1375    }
1376    // Call runtime
1377    __ save_return_pc();       // Save Z_R14.
1378    __ push_frame_abi160(0);   // Without new frame the RT call could overwrite the saved Z_R14.
1379
1380    __ call_VM_leaf(runtime_entry);
1381
1382    __ pop_frame();
1383    __ restore_return_pc();    // Restore Z_R14.
1384  }
1385
1386  // Pop c2i arguments (if any) off when we return.
1387  __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
1388
1389  __ z_br(Z_R14);
1390
1391  return entry;
1392}
1393
1394// Interpreter stub for calling a native method. (asm interpreter).
1395// This sets up a somewhat different looking stack for calling the
1396// native method than the typical interpreter frame setup.
1397address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
1398  // Determine code generation flags.
1399  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1400
1401  // Interpreter entry for ordinary Java methods.
1402  //
1403  // Registers alive
1404  //   Z_SP          - stack pointer
1405  //   Z_thread      - JavaThread*
1406  //   Z_method      - callee's method (method to be invoked)
1407  //   Z_esp         - operand (or expression) stack pointer of caller. one slot above last arg.
1408  //   Z_R10         - sender sp (before modifications, e.g. by c2i adapter
1409  //                   and as well by generate_fixed_frame below)
1410  //   Z_R14         - return address to caller (call_stub or c2i_adapter)
1411  //
1412  // Registers updated
1413  //   Z_SP          - stack pointer
1414  //   Z_fp          - callee's framepointer
1415  //   Z_esp         - callee's operand stack pointer
1416  //                   points to the slot above the value on top
1417  //   Z_locals      - used to access locals: locals[i] := *(Z_locals - i*BytesPerWord)
1418  //   Z_tos         - integer result, if any
1419  //   z_ftos        - floating point result, if any
1420  //
1421  // Stack layout at this point:
1422  //
1423  //   F1      [TOP_IJAVA_FRAME_ABI]         <-- Z_SP, Z_R10 (Z_R10 will be below Z_SP if
1424  //                                                          frame was extended by c2i adapter)
1425  //           [outgoing Java arguments]     <-- Z_esp
1426  //           ...
1427  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
1428  //           ...
1429  //
1430
1431  address entry_point = __ pc();
1432
1433  // Make sure registers are different!
1434  assert_different_registers(Z_thread, Z_method, Z_esp);
1435
1436  BLOCK_COMMENT("native_entry {");
1437
1438  // Make sure method is native and not abstract.
1439#ifdef ASSERT
1440  address reentry = NULL;
1441  { Label L;
1442    __ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT);
1443    __ z_btrue(L);
1444    reentry = __ stop_chain_static(reentry, "tried to execute non-native method as native");
1445    __ bind(L);
1446  }
1447  { Label L;
1448    __ testbit(method_(access_flags), JVM_ACC_ABSTRACT_BIT);
1449    __ z_bfalse(L);
1450    reentry = __ stop_chain_static(reentry, "tried to execute abstract method as non-abstract");
1451    __ bind(L);
1452  }
1453#endif // ASSERT
1454
1455#ifdef ASSERT
1456  // Save the return PC into the callers frame for assertion in generate_fixed_frame.
1457  __ save_return_pc(Z_R14);
1458#endif
1459
1460  // Generate the code to allocate the interpreter stack frame.
1461  generate_fixed_frame(true);
1462
1463  const Address do_not_unlock_if_synchronized(Z_thread, JavaThread::do_not_unlock_if_synchronized_offset());
1464  // Since at this point in the method invocation the exception handler
1465  // would try to exit the monitor of synchronized methods which hasn't
1466  // been entered yet, we set the thread local variable
1467  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1468  // runtime, exception handling i.e. unlock_if_synchronized_method will
1469  // check this thread local flag.
1470  __ z_mvi(do_not_unlock_if_synchronized, true);
1471
1472  // Increment invocation count and check for overflow.
1473  NearLabel invocation_counter_overflow;
1474  if (inc_counter) {
1475    generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
1476  }
1477
1478  Label continue_after_compile;
1479  __ bind(continue_after_compile);
1480
1481  bang_stack_shadow_pages(true);
1482
1483  // Reset the _do_not_unlock_if_synchronized flag.
1484  __ z_mvi(do_not_unlock_if_synchronized, false);
1485
1486  // Check for synchronized methods.
1487  // This mst happen AFTER invocation_counter check and stack overflow check,
1488  // so method is not locked if overflows.
1489  if (synchronized) {
1490    lock_method();
1491  } else {
1492    // No synchronization necessary.
1493#ifdef ASSERT
1494    { Label L;
1495      __ get_method(Z_R1_scratch);
1496      __ testbit(method2_(Z_R1_scratch, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
1497      __ z_bfalse(L);
1498      reentry = __ stop_chain_static(reentry, "method needs synchronization");
1499      __ bind(L);
1500    }
1501#endif // ASSERT
1502  }
1503
1504  // start execution
1505
1506  // jvmti support
1507  __ notify_method_entry();
1508
1509  //=============================================================================
1510  // Get and call the signature handler.
1511  const Register Rmethod                 = Z_tmp_2;
1512  const Register signature_handler_entry = Z_tmp_1;
1513  const Register Rresult_handler         = Z_tmp_3;
1514  Label call_signature_handler;
1515
1516  assert_different_registers(Z_fp, Rmethod, signature_handler_entry, Rresult_handler);
1517  assert(Rresult_handler->is_nonvolatile(), "Rresult_handler must be in a non-volatile register");
1518
1519  // Reload method.
1520  __ get_method(Rmethod);
1521
1522  // Check for signature handler.
1523  __ load_and_test_long(signature_handler_entry, method2_(Rmethod, signature_handler));
1524  __ z_brne(call_signature_handler);
1525
1526  // Method has never been called. Either generate a specialized
1527  // handler or point to the slow one.
1528  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call),
1529             Rmethod);
1530
1531  // Reload method.
1532  __ get_method(Rmethod);
1533
1534  // Reload signature handler, it must have been created/assigned in the meantime.
1535  __ z_lg(signature_handler_entry, method2_(Rmethod, signature_handler));
1536
1537  __ bind(call_signature_handler);
1538
1539  // We have a TOP_IJAVA_FRAME here, which belongs to us.
1540  __ set_top_ijava_frame_at_SP_as_last_Java_frame(Z_SP, Z_R1/*tmp*/);
1541
1542  // Call signature handler and pass locals address in Z_ARG1.
1543  __ z_lgr(Z_ARG1, Z_locals);
1544  __ call_stub(signature_handler_entry);
1545  // Save result handler returned by signature handler.
1546  __ z_lgr(Rresult_handler, Z_RET);
1547
1548  // Reload method (the slow signature handler may block for GC).
1549  __ get_method(Rmethod);
1550
1551  // Pass mirror handle if static call.
1552  {
1553    Label method_is_not_static;
1554    __ testbit(method2_(Rmethod, access_flags), JVM_ACC_STATIC_BIT);
1555    __ z_bfalse(method_is_not_static);
1556    // Get mirror.
1557    __ load_mirror(Z_R1, Rmethod);
1558    // z_ijava_state.oop_temp = pool_holder->klass_part()->java_mirror();
1559    __ z_stg(Z_R1, oop_tmp_offset, Z_fp);
1560    // Pass handle to mirror as 2nd argument to JNI method.
1561    __ add2reg(Z_ARG2, oop_tmp_offset, Z_fp);
1562    __ bind(method_is_not_static);
1563  }
1564
1565  // Pass JNIEnv address as first parameter.
1566  __ add2reg(Z_ARG1, in_bytes(JavaThread::jni_environment_offset()), Z_thread);
1567
1568  // Note: last java frame has been set above already. The pc from there
1569  // is precise enough.
1570
1571  // Get native function entry point before we change the thread state.
1572  __ z_lg(Z_R1/*native_method_entry*/, method2_(Rmethod, native_function));
1573
1574  //=============================================================================
1575  // Transition from _thread_in_Java to _thread_in_native. As soon as
1576  // we make this change the safepoint code needs to be certain that
1577  // the last Java frame we established is good. The pc in that frame
1578  // just need to be near here not an actual return address.
1579#ifdef ASSERT
1580  {
1581    NearLabel L;
1582    __ mem2reg_opt(Z_R14, Address(Z_thread, JavaThread::thread_state_offset()), false /*32 bits*/);
1583    __ compareU32_and_branch(Z_R14, _thread_in_Java, Assembler::bcondEqual, L);
1584    reentry = __ stop_chain_static(reentry, "Wrong thread state in native stub");
1585    __ bind(L);
1586  }
1587#endif
1588
1589  // Memory ordering: Z does not reorder store/load with subsequent load. That's strong enough.
1590  __ set_thread_state(_thread_in_native);
1591
1592  //=============================================================================
1593  // Call the native method. Argument registers must not have been
1594  // overwritten since "__ call_stub(signature_handler);" (except for
1595  // ARG1 and ARG2 for static methods).
1596
1597  __ call_c(Z_R1/*native_method_entry*/);
1598
1599  // NOTE: frame::interpreter_frame_result() depends on these stores.
1600  __ z_stg(Z_RET, _z_ijava_state_neg(lresult), Z_fp);
1601  __ freg2mem_opt(Z_FRET, Address(Z_fp, _z_ijava_state_neg(fresult)));
1602  const Register Rlresult = signature_handler_entry;
1603  assert(Rlresult->is_nonvolatile(), "Rlresult must be in a non-volatile register");
1604  __ z_lgr(Rlresult, Z_RET);
1605
1606  // Z_method may no longer be valid, because of GC.
1607
1608  // Block, if necessary, before resuming in _thread_in_Java state.
1609  // In order for GC to work, don't clear the last_Java_sp until after
1610  // blocking.
1611
1612  //=============================================================================
1613  // Switch thread to "native transition" state before reading the
1614  // synchronization state. This additional state is necessary
1615  // because reading and testing the synchronization state is not
1616  // atomic w.r.t. GC, as this scenario demonstrates: Java thread A,
1617  // in _thread_in_native state, loads _not_synchronized and is
1618  // preempted. VM thread changes sync state to synchronizing and
1619  // suspends threads for GC. Thread A is resumed to finish this
1620  // native method, but doesn't block here since it didn't see any
1621  // synchronization is progress, and escapes.
1622
1623  __ set_thread_state(_thread_in_native_trans);
1624  if (UseMembar) {
1625    __ z_fence();
1626  } else {
1627    // Write serialization page so VM thread can do a pseudo remote
1628    // membar. We use the current thread pointer to calculate a thread
1629    // specific offset to write to within the page. This minimizes bus
1630    // traffic due to cache line collision.
1631    __ serialize_memory(Z_thread, Z_R1, Z_R0);
1632  }
1633  // Now before we return to java we must look for a current safepoint
1634  // (a new safepoint can not start since we entered native_trans).
1635  // We must check here because a current safepoint could be modifying
1636  // the callers registers right this moment.
1637
1638  // Check for safepoint operation in progress and/or pending suspend requests.
1639  {
1640    Label Continue, do_safepoint;
1641    __ generate_safepoint_check(do_safepoint, Z_R1, true);
1642    // Check for suspend.
1643    __ load_and_test_int(Z_R0/*suspend_flags*/, thread_(suspend_flags));
1644    __ z_bre(Continue); // 0 -> no flag set -> not suspended
1645    __ bind(do_safepoint);
1646    __ z_lgr(Z_ARG1, Z_thread);
1647    __ call_c(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans));
1648    __ bind(Continue);
1649  }
1650
1651  //=============================================================================
1652  // Back in Interpreter Frame.
1653
1654  // We are in thread_in_native_trans here and back in the normal
1655  // interpreter frame. We don't have to do anything special about
1656  // safepoints and we can switch to Java mode anytime we are ready.
1657
1658  // Note: frame::interpreter_frame_result has a dependency on how the
1659  // method result is saved across the call to post_method_exit. For
1660  // native methods it assumes that the non-FPU/non-void result is
1661  // saved in z_ijava_state.lresult and a FPU result in z_ijava_state.fresult. If
1662  // this changes then the interpreter_frame_result implementation
1663  // will need to be updated too.
1664
1665  //=============================================================================
1666  // Back in Java.
1667
1668  // Memory ordering: Z does not reorder store/load with subsequent
1669  // load. That's strong enough.
1670  __ set_thread_state(_thread_in_Java);
1671
1672  __ reset_last_Java_frame();
1673
1674  // We reset the JNI handle block only after unboxing the result; see below.
1675
1676  // The method register is junk from after the thread_in_native transition
1677  // until here. Also can't call_VM until the bcp has been
1678  // restored. Need bcp for throwing exception below so get it now.
1679  __ get_method(Rmethod);
1680
1681  // Restore Z_bcp to have legal interpreter frame,
1682  // i.e., bci == 0 <=> Z_bcp == code_base().
1683  __ z_lg(Z_bcp, Address(Rmethod, Method::const_offset())); // get constMethod
1684  __ add2reg(Z_bcp, in_bytes(ConstMethod::codes_offset())); // get codebase
1685
1686  if (CheckJNICalls) {
1687    // clear_pending_jni_exception_check
1688    __ clear_mem(Address(Z_thread, JavaThread::pending_jni_exception_check_fn_offset()), sizeof(oop));
1689  }
1690
1691  // Check if the native method returns an oop, and if so, move it
1692  // from the jni handle to z_ijava_state.oop_temp. This is
1693  // necessary, because we reset the jni handle block below.
1694  // NOTE: frame::interpreter_frame_result() depends on this, too.
1695  { NearLabel no_oop_result;
1696  __ load_absolute_address(Z_R1, AbstractInterpreter::result_handler(T_OBJECT));
1697  __ compareU64_and_branch(Z_R1, Rresult_handler, Assembler::bcondNotEqual, no_oop_result);
1698  __ resolve_jobject(Rlresult, /* tmp1 */ Rmethod, /* tmp2 */ Z_R1);
1699  __ z_stg(Rlresult, oop_tmp_offset, Z_fp);
1700  __ bind(no_oop_result);
1701  }
1702
1703  // Reset handle block.
1704  __ z_lg(Z_R1/*active_handles*/, thread_(active_handles));
1705  __ clear_mem(Address(Z_R1, JNIHandleBlock::top_offset_in_bytes()), 4);
1706
1707  // Bandle exceptions (exception handling will handle unlocking!).
1708  {
1709    Label L;
1710    __ load_and_test_long(Z_R0/*pending_exception*/, thread_(pending_exception));
1711    __ z_bre(L);
1712    __ MacroAssembler::call_VM(noreg,
1713                               CAST_FROM_FN_PTR(address,
1714                               InterpreterRuntime::throw_pending_exception));
1715    __ should_not_reach_here();
1716    __ bind(L);
1717  }
1718
1719  if (synchronized) {
1720    Register Rfirst_monitor = Z_ARG2;
1721    __ add2reg(Rfirst_monitor, -(frame::z_ijava_state_size + (int)sizeof(BasicObjectLock)), Z_fp);
1722#ifdef ASSERT
1723    NearLabel ok;
1724    __ z_lg(Z_R1, _z_ijava_state_neg(monitors), Z_fp);
1725    __ compareU64_and_branch(Rfirst_monitor, Z_R1, Assembler::bcondEqual, ok);
1726    reentry = __ stop_chain_static(reentry, "native_entry:unlock: inconsistent z_ijava_state.monitors");
1727    __ bind(ok);
1728#endif
1729    __ unlock_object(Rfirst_monitor);
1730  }
1731
1732  // JVMTI support. Result has already been saved above to the frame.
1733  __ notify_method_exit(true/*native_method*/, ilgl, InterpreterMacroAssembler::NotifyJVMTI);
1734
1735  // Move native method result back into proper registers and return.
1736  // C++ interpreter does not use result handler. So do we need to here? TODO(ZASM): check if correct.
1737  { NearLabel no_oop_or_null;
1738  __ mem2freg_opt(Z_FRET, Address(Z_fp, _z_ijava_state_neg(fresult)));
1739  __ load_and_test_long(Z_RET, Address(Z_fp, _z_ijava_state_neg(lresult)));
1740  __ z_bre(no_oop_or_null); // No unboxing if the result is NULL.
1741  __ load_absolute_address(Z_R1, AbstractInterpreter::result_handler(T_OBJECT));
1742  __ compareU64_and_branch(Z_R1, Rresult_handler, Assembler::bcondNotEqual, no_oop_or_null);
1743  __ z_lg(Z_RET, oop_tmp_offset, Z_fp);
1744  __ verify_oop(Z_RET);
1745  __ bind(no_oop_or_null);
1746  }
1747
1748  // Pop the native method's interpreter frame.
1749  __ pop_interpreter_frame(Z_R14 /*return_pc*/, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/);
1750
1751  // Return to caller.
1752  __ z_br(Z_R14);
1753
1754  if (inc_counter) {
1755    // Handle overflow of counter and compile method.
1756    __ bind(invocation_counter_overflow);
1757    generate_counter_overflow(continue_after_compile);
1758  }
1759
1760  BLOCK_COMMENT("} native_entry");
1761
1762  return entry_point;
1763}
1764
1765//
1766// Generic interpreted method entry to template interpreter.
1767//
1768address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
1769  address entry_point = __ pc();
1770
1771  bool inc_counter = UseCompiler || CountCompiledCalls || LogTouchedMethods;
1772
1773  // Interpreter entry for ordinary Java methods.
1774  //
1775  // Registers alive
1776  //   Z_SP       - stack pointer
1777  //   Z_thread   - JavaThread*
1778  //   Z_method   - callee's method (method to be invoked)
1779  //   Z_esp      - operand (or expression) stack pointer of caller. one slot above last arg.
1780  //   Z_R10      - sender sp (before modifications, e.g. by c2i adapter
1781  //                           and as well by generate_fixed_frame below)
1782  //   Z_R14      - return address to caller (call_stub or c2i_adapter)
1783  //
1784  // Registers updated
1785  //   Z_SP       - stack pointer
1786  //   Z_fp       - callee's framepointer
1787  //   Z_esp      - callee's operand stack pointer
1788  //                points to the slot above the value on top
1789  //   Z_locals   - used to access locals: locals[i] := *(Z_locals - i*BytesPerWord)
1790  //   Z_tos      - integer result, if any
1791  //   z_ftos     - floating point result, if any
1792  //
1793  //
1794  // stack layout at this point:
1795  //
1796  //   F1      [TOP_IJAVA_FRAME_ABI]         <-- Z_SP, Z_R10 (Z_R10 will be below Z_SP if
1797  //                                                          frame was extended by c2i adapter)
1798  //           [outgoing Java arguments]     <-- Z_esp
1799  //           ...
1800  //   PARENT  [PARENT_IJAVA_FRAME_ABI]
1801  //           ...
1802  //
1803  // stack layout before dispatching the first bytecode:
1804  //
1805  //   F0      [TOP_IJAVA_FRAME_ABI]         <-- Z_SP
1806  //           [operand stack]               <-- Z_esp
1807  //           monitor (optional, can grow)
1808  //           [IJAVA_STATE]
1809  //   F1      [PARENT_IJAVA_FRAME_ABI]      <-- Z_fp (== *Z_SP)
1810  //           [F0's locals]                 <-- Z_locals
1811  //           [F1's operand stack]
1812  //           [F1's monitors] (optional)
1813  //           [IJAVA_STATE]
1814
1815  // Make sure registers are different!
1816  assert_different_registers(Z_thread, Z_method, Z_esp);
1817
1818  BLOCK_COMMENT("normal_entry {");
1819
1820  // Make sure method is not native and not abstract.
1821  // Rethink these assertions - they can be simplified and shared.
1822#ifdef ASSERT
1823  address reentry = NULL;
1824  { Label L;
1825    __ testbit(method_(access_flags), JVM_ACC_NATIVE_BIT);
1826    __ z_bfalse(L);
1827    reentry = __ stop_chain_static(reentry, "tried to execute native method as non-native");
1828    __ bind(L);
1829  }
1830  { Label L;
1831    __ testbit(method_(access_flags), JVM_ACC_ABSTRACT_BIT);
1832    __ z_bfalse(L);
1833    reentry = __ stop_chain_static(reentry, "tried to execute abstract method as non-abstract");
1834    __ bind(L);
1835  }
1836#endif // ASSERT
1837
1838#ifdef ASSERT
1839  // Save the return PC into the callers frame for assertion in generate_fixed_frame.
1840  __ save_return_pc(Z_R14);
1841#endif
1842
1843  // Generate the code to allocate the interpreter stack frame.
1844  generate_fixed_frame(false);
1845
1846  const Address do_not_unlock_if_synchronized(Z_thread, JavaThread::do_not_unlock_if_synchronized_offset());
1847  // Since at this point in the method invocation the exception handler
1848  // would try to exit the monitor of synchronized methods which hasn't
1849  // been entered yet, we set the thread local variable
1850  // _do_not_unlock_if_synchronized to true. If any exception was thrown by
1851  // runtime, exception handling i.e. unlock_if_synchronized_method will
1852  // check this thread local flag.
1853  __ z_mvi(do_not_unlock_if_synchronized, true);
1854
1855  __ profile_parameters_type(Z_tmp_2, Z_ARG3, Z_ARG4);
1856
1857  // Increment invocation counter and check for overflow.
1858  //
1859  // Note: checking for negative value instead of overflow so we have a 'sticky'
1860  // overflow test (may be of importance as soon as we have true MT/MP).
1861  NearLabel invocation_counter_overflow;
1862  NearLabel profile_method;
1863  NearLabel profile_method_continue;
1864  NearLabel Lcontinue;
1865  if (inc_counter) {
1866    generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
1867    if (ProfileInterpreter) {
1868      __ bind(profile_method_continue);
1869    }
1870  }
1871  __ bind(Lcontinue);
1872
1873  bang_stack_shadow_pages(false);
1874
1875  // Reset the _do_not_unlock_if_synchronized flag.
1876  __ z_mvi(do_not_unlock_if_synchronized, false);
1877
1878  // Check for synchronized methods.
1879  // Must happen AFTER invocation_counter check and stack overflow check,
1880  // so method is not locked if overflows.
1881  if (synchronized) {
1882    // Allocate monitor and lock method.
1883    lock_method();
1884  } else {
1885#ifdef ASSERT
1886    { Label L;
1887      __ get_method(Z_R1_scratch);
1888      __ testbit(method2_(Z_R1_scratch, access_flags), JVM_ACC_SYNCHRONIZED_BIT);
1889      __ z_bfalse(L);
1890      reentry = __ stop_chain_static(reentry, "method needs synchronization");
1891      __ bind(L);
1892    }
1893#endif // ASSERT
1894  }
1895
1896  // start execution
1897
1898#ifdef ASSERT
1899  __ verify_esp(Z_esp, Z_R1_scratch);
1900
1901  __ verify_thread();
1902#endif
1903
1904  // jvmti support
1905  __ notify_method_entry();
1906
1907  // Start executing instructions.
1908  __ dispatch_next(vtos);
1909  // Dispatch_next does not return.
1910  DEBUG_ONLY(__ should_not_reach_here());
1911
1912  // Invocation counter overflow.
1913  if (inc_counter) {
1914    if (ProfileInterpreter) {
1915      // We have decided to profile this method in the interpreter.
1916      __ bind(profile_method);
1917
1918      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1919      __ set_method_data_pointer_for_bcp();
1920      __ z_bru(profile_method_continue);
1921    }
1922
1923    // Handle invocation counter overflow.
1924    __ bind(invocation_counter_overflow);
1925    generate_counter_overflow(Lcontinue);
1926  }
1927
1928  BLOCK_COMMENT("} normal_entry");
1929
1930  return entry_point;
1931}
1932
1933// Method entry for static native methods:
1934//   int java.util.zip.CRC32.update(int crc, int b)
1935address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
1936
1937  if (UseCRC32Intrinsics) {
1938    uint64_t entry_off = __ offset();
1939    Label    slow_path;
1940
1941    // If we need a safepoint check, generate full interpreter entry.
1942    __ generate_safepoint_check(slow_path, Z_R1, false);
1943
1944    BLOCK_COMMENT("CRC32_update {");
1945
1946    // We don't generate local frame and don't align stack because
1947    // we not even call stub code (we generate the code inline)
1948    // and there is no safepoint on this path.
1949
1950    // Load java parameters.
1951    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
1952    const Register argP    = Z_esp;
1953    const Register crc     = Z_ARG1;  // crc value
1954    const Register data    = Z_ARG2;  // address of java byte value (kernel_crc32 needs address)
1955    const Register dataLen = Z_ARG3;  // source data len (1 byte). Not used because calling the single-byte emitter.
1956    const Register table   = Z_ARG4;  // address of crc32 table
1957
1958    // Arguments are reversed on java expression stack.
1959    __ z_la(data, 3+1*wordSize, argP);  // byte value (stack address).
1960                                        // Being passed as an int, the single byte is at offset +3.
1961    __ z_llgf(crc, 2 * wordSize, argP); // Current crc state, zero extend to 64 bit to have a clean register.
1962
1963    StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
1964    __ kernel_crc32_singleByte(crc, data, dataLen, table, Z_R1, true);
1965
1966    // Restore caller sp for c2i case.
1967    __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
1968
1969    __ z_br(Z_R14);
1970
1971    BLOCK_COMMENT("} CRC32_update");
1972
1973    // Use a previously generated vanilla native entry as the slow path.
1974    BIND(slow_path);
1975    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), Z_R1);
1976    return __ addr_at(entry_off);
1977  }
1978
1979  return NULL;
1980}
1981
1982
1983// Method entry for static native methods:
1984//   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
1985//   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
1986address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
1987
1988  if (UseCRC32Intrinsics) {
1989    uint64_t entry_off = __ offset();
1990    Label    slow_path;
1991
1992    // If we need a safepoint check, generate full interpreter entry.
1993    __ generate_safepoint_check(slow_path, Z_R1, false);
1994
1995    // We don't generate local frame and don't align stack because
1996    // we call stub code and there is no safepoint on this path.
1997
1998    // Load parameters.
1999    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
2000    const Register argP    = Z_esp;
2001    const Register crc     = Z_ARG1;  // crc value
2002    const Register data    = Z_ARG2;  // address of java byte array
2003    const Register dataLen = Z_ARG3;  // source data len
2004    const Register table   = Z_ARG4;  // address of crc32 table
2005    const Register t0      = Z_R10;   // work reg for kernel* emitters
2006    const Register t1      = Z_R11;   // work reg for kernel* emitters
2007    const Register t2      = Z_R12;   // work reg for kernel* emitters
2008    const Register t3      = Z_R13;   // work reg for kernel* emitters
2009
2010    // Arguments are reversed on java expression stack.
2011    // Calculate address of start element.
2012    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { // Used for "updateByteBuffer direct".
2013      // crc     @ (SP + 5W) (32bit)
2014      // buf     @ (SP + 3W) (64bit ptr to long array)
2015      // off     @ (SP + 2W) (32bit)
2016      // dataLen @ (SP + 1W) (32bit)
2017      // data = buf + off
2018      BLOCK_COMMENT("CRC32_updateByteBuffer {");
2019      __ z_llgf(crc,    5*wordSize, argP);  // current crc state
2020      __ z_lg(data,     3*wordSize, argP);  // start of byte buffer
2021      __ z_agf(data,    2*wordSize, argP);  // Add byte buffer offset.
2022      __ z_lgf(dataLen, 1*wordSize, argP);  // #bytes to process
2023    } else {                                                         // Used for "updateBytes update".
2024      // crc     @ (SP + 4W) (32bit)
2025      // buf     @ (SP + 3W) (64bit ptr to byte array)
2026      // off     @ (SP + 2W) (32bit)
2027      // dataLen @ (SP + 1W) (32bit)
2028      // data = buf + off + base_offset
2029      BLOCK_COMMENT("CRC32_updateBytes {");
2030      __ z_llgf(crc,    4*wordSize, argP);  // current crc state
2031      __ z_lg(data,     3*wordSize, argP);  // start of byte buffer
2032      __ z_agf(data,    2*wordSize, argP);  // Add byte buffer offset.
2033      __ z_lgf(dataLen, 1*wordSize, argP);  // #bytes to process
2034      __ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
2035    }
2036
2037    StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
2038
2039    __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
2040    __ z_stmg(t0, t3, 1*8, Z_SP);        // Spill regs 10..13 to make them available as work registers.
2041    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, true);
2042    __ z_lmg(t0, t3, 1*8, Z_SP);         // Spill regs 10..13 back from stack.
2043
2044    // Restore caller sp for c2i case.
2045    __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
2046
2047    __ z_br(Z_R14);
2048
2049    BLOCK_COMMENT("} CRC32_update{Bytes|ByteBuffer}");
2050
2051    // Use a previously generated vanilla native entry as the slow path.
2052    BIND(slow_path);
2053    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native), Z_R1);
2054    return __ addr_at(entry_off);
2055  }
2056
2057  return NULL;
2058}
2059
2060
2061// Method entry for static native methods:
2062//   int java.util.zip.CRC32C.updateBytes(int crc, byte[] b, int off, int len)
2063//   int java.util.zip.CRC32C.updateDirectByteBuffer(int crc, long buf, int off, int len)
2064address TemplateInterpreterGenerator::generate_CRC32C_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
2065
2066  if (UseCRC32CIntrinsics) {
2067    uint64_t entry_off = __ offset();
2068
2069    // We don't generate local frame and don't align stack because
2070    // we call stub code and there is no safepoint on this path.
2071
2072    // Load parameters.
2073    // Z_esp is callers operand stack pointer, i.e. it points to the parameters.
2074    const Register argP    = Z_esp;
2075    const Register crc     = Z_ARG1;  // crc value
2076    const Register data    = Z_ARG2;  // address of java byte array
2077    const Register dataLen = Z_ARG3;  // source data len
2078    const Register table   = Z_ARG4;  // address of crc32 table
2079    const Register t0      = Z_R10;   // work reg for kernel* emitters
2080    const Register t1      = Z_R11;   // work reg for kernel* emitters
2081    const Register t2      = Z_R12;   // work reg for kernel* emitters
2082    const Register t3      = Z_R13;   // work reg for kernel* emitters
2083
2084    // Arguments are reversed on java expression stack.
2085    // Calculate address of start element.
2086    if (kind == Interpreter::java_util_zip_CRC32C_updateDirectByteBuffer) { // Used for "updateByteBuffer direct".
2087      // crc     @ (SP + 5W) (32bit)
2088      // buf     @ (SP + 3W) (64bit ptr to long array)
2089      // off     @ (SP + 2W) (32bit)
2090      // dataLen @ (SP + 1W) (32bit)
2091      // data = buf + off
2092      BLOCK_COMMENT("CRC32C_updateDirectByteBuffer {");
2093      __ z_llgf(crc,    5*wordSize, argP);  // current crc state
2094      __ z_lg(data,     3*wordSize, argP);  // start of byte buffer
2095      __ z_agf(data,    2*wordSize, argP);  // Add byte buffer offset.
2096      __ z_lgf(dataLen, 1*wordSize, argP);  // #bytes to process
2097    } else {                                                                // Used for "updateBytes update".
2098      // crc     @ (SP + 4W) (32bit)
2099      // buf     @ (SP + 3W) (64bit ptr to byte array)
2100      // off     @ (SP + 2W) (32bit)
2101      // dataLen @ (SP + 1W) (32bit)
2102      // data = buf + off + base_offset
2103      BLOCK_COMMENT("CRC32C_updateBytes {");
2104      __ z_llgf(crc,    4*wordSize, argP);  // current crc state
2105      __ z_lg(data,     3*wordSize, argP);  // start of byte buffer
2106      __ z_agf(data,    2*wordSize, argP);  // Add byte buffer offset.
2107      __ z_lgf(dataLen, 1*wordSize, argP);  // #bytes to process
2108      __ z_aghi(data, arrayOopDesc::base_offset_in_bytes(T_BYTE));
2109    }
2110
2111    StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
2112
2113    __ resize_frame(-(6*8), Z_R0, true); // Resize frame to provide add'l space to spill 5 registers.
2114    __ z_stmg(t0, t3, 1*8, Z_SP);        // Spill regs 10..13 to make them available as work registers.
2115    __ kernel_crc32_1word(crc, data, dataLen, table, t0, t1, t2, t3, false);
2116    __ z_lmg(t0, t3, 1*8, Z_SP);         // Spill regs 10..13 back from stack.
2117
2118    // Restore caller sp for c2i case.
2119    __ resize_frame_absolute(Z_R10, Z_R0, true); // Cut the stack back to where the caller started.
2120
2121    __ z_br(Z_R14);
2122
2123    BLOCK_COMMENT("} CRC32C_update{Bytes|DirectByteBuffer}");
2124    return __ addr_at(entry_off);
2125  }
2126
2127  return NULL;
2128}
2129
2130void TemplateInterpreterGenerator::bang_stack_shadow_pages(bool native_call) {
2131  // Quick & dirty stack overflow checking: bang the stack & handle trap.
2132  // Note that we do the banging after the frame is setup, since the exception
2133  // handling code expects to find a valid interpreter frame on the stack.
2134  // Doing the banging earlier fails if the caller frame is not an interpreter
2135  // frame.
2136  // (Also, the exception throwing code expects to unlock any synchronized
2137  // method receiver, so do the banging after locking the receiver.)
2138
2139  // Bang each page in the shadow zone. We can't assume it's been done for
2140  // an interpreter frame with greater than a page of locals, so each page
2141  // needs to be checked. Only true for non-native. For native, we only bang the last page.
2142  if (UseStackBanging) {
2143    const int page_size      = os::vm_page_size();
2144    const int n_shadow_pages = (int)(JavaThread::stack_shadow_zone_size()/page_size);
2145    const int start_page_num = native_call ? n_shadow_pages : 1;
2146    for (int pages = start_page_num; pages <= n_shadow_pages; pages++) {
2147      __ bang_stack_with_offset(pages*page_size);
2148    }
2149  }
2150}
2151
2152//-----------------------------------------------------------------------------
2153// Exceptions
2154
2155void TemplateInterpreterGenerator::generate_throw_exception() {
2156
2157  BLOCK_COMMENT("throw_exception {");
2158
2159  // Entry point in previous activation (i.e., if the caller was interpreted).
2160  Interpreter::_rethrow_exception_entry = __ pc();
2161  __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Frame accessors use Z_fp.
2162  // Z_ARG1 (==Z_tos): exception
2163  // Z_ARG2          : Return address/pc that threw exception.
2164  __ restore_bcp();    // R13 points to call/send.
2165  __ restore_locals();
2166
2167  // Fallthrough, no need to restore Z_esp.
2168
2169  // Entry point for exceptions thrown within interpreter code.
2170  Interpreter::_throw_exception_entry = __ pc();
2171  // Expression stack is undefined here.
2172  // Z_ARG1 (==Z_tos): exception
2173  // Z_bcp: exception bcp
2174  __ verify_oop(Z_ARG1);
2175  __ z_lgr(Z_ARG2, Z_ARG1);
2176
2177  // Expression stack must be empty before entering the VM in case of
2178  // an exception.
2179  __ empty_expression_stack();
2180  // Find exception handler address and preserve exception oop.
2181  const Register Rpreserved_exc_oop = Z_tmp_1;
2182  __ call_VM(Rpreserved_exc_oop,
2183             CAST_FROM_FN_PTR(address, InterpreterRuntime::exception_handler_for_exception),
2184             Z_ARG2);
2185  // Z_RET: exception handler entry point
2186  // Z_bcp: bcp for exception handler
2187  __ push_ptr(Rpreserved_exc_oop); // Push exception which is now the only value on the stack.
2188  __ z_br(Z_RET); // Jump to exception handler (may be _remove_activation_entry!).
2189
2190  // If the exception is not handled in the current frame the frame is
2191  // removed and the exception is rethrown (i.e. exception
2192  // continuation is _rethrow_exception).
2193  //
2194  // Note: At this point the bci is still the bci for the instruction
2195  // which caused the exception and the expression stack is
2196  // empty. Thus, for any VM calls at this point, GC will find a legal
2197  // oop map (with empty expression stack).
2198
2199  //
2200  // JVMTI PopFrame support
2201  //
2202
2203  Interpreter::_remove_activation_preserving_args_entry = __ pc();
2204  __ z_lg(Z_fp, _z_parent_ijava_frame_abi(callers_sp), Z_SP);
2205  __ empty_expression_stack();
2206  // Set the popframe_processing bit in pending_popframe_condition
2207  // indicating that we are currently handling popframe, so that
2208  // call_VMs that may happen later do not trigger new popframe
2209  // handling cycles.
2210  __ load_sized_value(Z_tmp_1, Address(Z_thread, JavaThread::popframe_condition_offset()), 4, false /*signed*/);
2211  __ z_oill(Z_tmp_1, JavaThread::popframe_processing_bit);
2212  __ z_sty(Z_tmp_1, thread_(popframe_condition));
2213
2214  {
2215    // Check to see whether we are returning to a deoptimized frame.
2216    // (The PopFrame call ensures that the caller of the popped frame is
2217    // either interpreted or compiled and deoptimizes it if compiled.)
2218    // In this case, we can't call dispatch_next() after the frame is
2219    // popped, but instead must save the incoming arguments and restore
2220    // them after deoptimization has occurred.
2221    //
2222    // Note that we don't compare the return PC against the
2223    // deoptimization blob's unpack entry because of the presence of
2224    // adapter frames in C2.
2225    NearLabel caller_not_deoptimized;
2226    __ z_lg(Z_ARG1, _z_parent_ijava_frame_abi(return_pc), Z_fp);
2227    __ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), Z_ARG1);
2228    __ compareU64_and_branch(Z_RET, (intptr_t)0, Assembler::bcondNotEqual, caller_not_deoptimized);
2229
2230    // Compute size of arguments for saving when returning to
2231    // deoptimized caller.
2232    __ get_method(Z_ARG2);
2233    __ z_lg(Z_ARG2, Address(Z_ARG2, Method::const_offset()));
2234    __ z_llgh(Z_ARG2, Address(Z_ARG2, ConstMethod::size_of_parameters_offset()));
2235    __ z_sllg(Z_ARG2, Z_ARG2, Interpreter::logStackElementSize); // slots 2 bytes
2236    __ restore_locals();
2237    // Compute address of args to be saved.
2238    __ z_lgr(Z_ARG3, Z_locals);
2239    __ z_slgr(Z_ARG3, Z_ARG2);
2240    __ add2reg(Z_ARG3, wordSize);
2241    // Save these arguments.
2242    __ call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args),
2243                    Z_thread, Z_ARG2, Z_ARG3);
2244
2245    __ remove_activation(vtos, Z_R14,
2246                         /* throw_monitor_exception */ false,
2247                         /* install_monitor_exception */ false,
2248                         /* notify_jvmdi */ false);
2249
2250    // Inform deoptimization that it is responsible for restoring
2251    // these arguments.
2252    __ store_const(thread_(popframe_condition),
2253                   JavaThread::popframe_force_deopt_reexecution_bit,
2254                   Z_tmp_1, false);
2255
2256    // Continue in deoptimization handler.
2257    __ z_br(Z_R14);
2258
2259    __ bind(caller_not_deoptimized);
2260  }
2261
2262  // Clear the popframe condition flag.
2263  __ clear_mem(thread_(popframe_condition), sizeof(int));
2264
2265  __ remove_activation(vtos,
2266                       noreg,  // Retaddr is not used.
2267                       false,  // throw_monitor_exception
2268                       false,  // install_monitor_exception
2269                       false); // notify_jvmdi
2270  __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer.
2271  __ restore_bcp();
2272  __ restore_locals();
2273  __ restore_esp();
2274  // The method data pointer was incremented already during
2275  // call profiling. We have to restore the mdp for the current bcp.
2276  if (ProfileInterpreter) {
2277    __ set_method_data_pointer_for_bcp();
2278  }
2279#if INCLUDE_JVMTI
2280  {
2281    Label L_done;
2282
2283    __ z_cli(0, Z_bcp, Bytecodes::_invokestatic);
2284    __ z_brc(Assembler::bcondNotEqual, L_done);
2285
2286    // The member name argument must be restored if _invokestatic is
2287    // re-executed after a PopFrame call.  Detect such a case in the
2288    // InterpreterRuntime function and return the member name
2289    // argument, or NULL.
2290    __ z_lg(Z_ARG2, Address(Z_locals));
2291    __ get_method(Z_ARG3);
2292    __ call_VM(Z_tmp_1,
2293               CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
2294               Z_ARG2, Z_ARG3, Z_bcp);
2295
2296    __ z_ltgr(Z_tmp_1, Z_tmp_1);
2297    __ z_brc(Assembler::bcondEqual, L_done);
2298
2299    __ z_stg(Z_tmp_1, Address(Z_esp, wordSize));
2300    __ bind(L_done);
2301  }
2302#endif // INCLUDE_JVMTI
2303  __ dispatch_next(vtos);
2304  // End of PopFrame support.
2305  Interpreter::_remove_activation_entry = __ pc();
2306
2307  // In between activations - previous activation type unknown yet
2308  // compute continuation point - the continuation point expects the
2309  // following registers set up:
2310  //
2311  // Z_ARG1 (==Z_tos): exception
2312  // Z_ARG2          : return address/pc that threw exception
2313
2314  Register return_pc = Z_tmp_1;
2315  Register handler   = Z_tmp_2;
2316   assert(return_pc->is_nonvolatile(), "use non-volatile reg. to preserve exception pc");
2317   assert(handler->is_nonvolatile(),   "use non-volatile reg. to handler pc");
2318  __ asm_assert_ijava_state_magic(return_pc/*tmp*/); // The top frame should be an interpreter frame.
2319  __ z_lg(return_pc, _z_parent_ijava_frame_abi(return_pc), Z_fp);
2320
2321  // Moved removing the activation after VM call, because the new top
2322  // frame does not necessarily have the z_abi_160 required for a VM
2323  // call (e.g. if it is compiled).
2324
2325  __ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
2326                                         SharedRuntime::exception_handler_for_return_address),
2327                        Z_thread, return_pc);
2328  __ z_lgr(handler, Z_RET); // Save exception handler.
2329
2330  // Preserve exception over this code sequence.
2331  __ pop_ptr(Z_ARG1);
2332  __ set_vm_result(Z_ARG1);
2333  // Remove the activation (without doing throws on illegalMonitorExceptions).
2334  __ remove_activation(vtos, noreg/*ret.pc already loaded*/, false/*throw exc*/, true/*install exc*/, false/*notify jvmti*/);
2335  __ z_lg(Z_fp, _z_abi(callers_sp), Z_SP); // Restore frame pointer.
2336
2337  __ get_vm_result(Z_ARG1);     // Restore exception.
2338  __ verify_oop(Z_ARG1);
2339  __ z_lgr(Z_ARG2, return_pc);  // Restore return address.
2340
2341#ifdef ASSERT
2342  // The return_pc in the new top frame is dead... at least that's my
2343  // current understanding. To assert this I overwrite it.
2344  // Note: for compiled frames the handler is the deopt blob
2345  // which writes Z_ARG2 into the return_pc slot.
2346  __ load_const_optimized(return_pc, 0xb00b1);
2347  __ z_stg(return_pc, _z_parent_ijava_frame_abi(return_pc), Z_SP);
2348#endif
2349
2350  // Z_ARG1 (==Z_tos): exception
2351  // Z_ARG2          : return address/pc that threw exception
2352
2353  // Note that an "issuing PC" is actually the next PC after the call.
2354  __ z_br(handler);         // Jump to exception handler of caller.
2355
2356  BLOCK_COMMENT("} throw_exception");
2357}
2358
2359//
2360// JVMTI ForceEarlyReturn support
2361//
2362address TemplateInterpreterGenerator::generate_earlyret_entry_for (TosState state) {
2363  address entry = __ pc();
2364
2365  BLOCK_COMMENT("earlyret_entry {");
2366
2367  __ z_lg(Z_fp, _z_parent_ijava_frame_abi(callers_sp), Z_SP);
2368  __ restore_bcp();
2369  __ restore_locals();
2370  __ restore_esp();
2371  __ empty_expression_stack();
2372  __ load_earlyret_value(state);
2373
2374  Register RjvmtiState = Z_tmp_1;
2375  __ z_lg(RjvmtiState, thread_(jvmti_thread_state));
2376  __ store_const(Address(RjvmtiState, JvmtiThreadState::earlyret_state_offset()),
2377                 JvmtiThreadState::earlyret_inactive, 4, 4, Z_R0_scratch);
2378
2379  __ remove_activation(state,
2380                       Z_tmp_1, // retaddr
2381                       false,   // throw_monitor_exception
2382                       false,   // install_monitor_exception
2383                       true);   // notify_jvmdi
2384  __ z_br(Z_tmp_1);
2385
2386  BLOCK_COMMENT("} earlyret_entry");
2387
2388  return entry;
2389}
2390
2391//-----------------------------------------------------------------------------
2392// Helper for vtos entry point generation.
2393
2394void TemplateInterpreterGenerator::set_vtos_entry_points(Template* t,
2395                                                         address& bep,
2396                                                         address& cep,
2397                                                         address& sep,
2398                                                         address& aep,
2399                                                         address& iep,
2400                                                         address& lep,
2401                                                         address& fep,
2402                                                         address& dep,
2403                                                         address& vep) {
2404  assert(t->is_valid() && t->tos_in() == vtos, "illegal template");
2405  Label L;
2406  aep = __ pc(); __ push_ptr(); __ z_bru(L);
2407  fep = __ pc(); __ push_f();   __ z_bru(L);
2408  dep = __ pc(); __ push_d();   __ z_bru(L);
2409  lep = __ pc(); __ push_l();   __ z_bru(L);
2410  bep = cep = sep =
2411  iep = __ pc(); __ push_i();
2412  vep = __ pc();
2413  __ bind(L);
2414  generate_and_dispatch(t);
2415}
2416
2417//-----------------------------------------------------------------------------
2418
2419#ifndef PRODUCT
2420address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
2421  address entry = __ pc();
2422  NearLabel counter_below_trace_threshold;
2423
2424  if (TraceBytecodesAt > 0) {
2425    // Skip runtime call, if the trace threshold is not yet reached.
2426    __ load_absolute_address(Z_tmp_1, (address)&BytecodeCounter::_counter_value);
2427    __ load_absolute_address(Z_tmp_2, (address)&TraceBytecodesAt);
2428    __ load_sized_value(Z_tmp_1, Address(Z_tmp_1), 4, false /*signed*/);
2429    __ load_sized_value(Z_tmp_2, Address(Z_tmp_2), 8, false /*signed*/);
2430    __ compareU64_and_branch(Z_tmp_1, Z_tmp_2, Assembler::bcondLow, counter_below_trace_threshold);
2431  }
2432
2433  int offset2 = state == ltos || state == dtos ? 2 : 1;
2434
2435  __ push(state);
2436  // Preserved return pointer is in Z_R14.
2437  // InterpreterRuntime::trace_bytecode() preserved and returns the value passed as second argument.
2438  __ z_lgr(Z_ARG2, Z_R14);
2439  __ z_lg(Z_ARG3, Address(Z_esp, Interpreter::expr_offset_in_bytes(0)));
2440  if (WizardMode) {
2441    __ z_lgr(Z_ARG4, Z_esp); // Trace Z_esp in WizardMode.
2442  } else {
2443    __ z_lg(Z_ARG4, Address(Z_esp, Interpreter::expr_offset_in_bytes(offset2)));
2444  }
2445  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::trace_bytecode), Z_ARG2, Z_ARG3, Z_ARG4);
2446  __ z_lgr(Z_R14, Z_RET); // Estore return address (see above).
2447  __ pop(state);
2448
2449  __ bind(counter_below_trace_threshold);
2450  __ z_br(Z_R14); // return
2451
2452  return entry;
2453}
2454
2455// Make feasible for old CPUs.
2456void TemplateInterpreterGenerator::count_bytecode() {
2457  __ load_absolute_address(Z_R1_scratch, (address) &BytecodeCounter::_counter_value);
2458  __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
2459}
2460
2461void TemplateInterpreterGenerator::histogram_bytecode(Template * t) {
2462  __ load_absolute_address(Z_R1_scratch, (address)&BytecodeHistogram::_counters[ t->bytecode() ]);
2463  __ add2mem_32(Address(Z_R1_scratch), 1, Z_tmp_1);
2464}
2465
2466void TemplateInterpreterGenerator::histogram_bytecode_pair(Template * t) {
2467  Address  index_addr(Z_tmp_1, (intptr_t) 0);
2468  Register index = Z_tmp_2;
2469
2470  // Load previous index.
2471  __ load_absolute_address(Z_tmp_1, (address) &BytecodePairHistogram::_index);
2472  __ mem2reg_opt(index, index_addr, false);
2473
2474  // Mask with current bytecode and store as new previous index.
2475  __ z_srl(index, BytecodePairHistogram::log2_number_of_codes);
2476  __ load_const_optimized(Z_R0_scratch,
2477                          (int)t->bytecode() << BytecodePairHistogram::log2_number_of_codes);
2478  __ z_or(index, Z_R0_scratch);
2479  __ reg2mem_opt(index, index_addr, false);
2480
2481  // Load counter array's address.
2482  __ z_lgfr(index, index);   // Sign extend for addressing.
2483  __ z_sllg(index, index, LogBytesPerInt);  // index2bytes
2484  __ load_absolute_address(Z_R1_scratch,
2485                           (address) &BytecodePairHistogram::_counters);
2486  // Add index and increment counter.
2487  __ z_agr(Z_R1_scratch, index);
2488  __ add2mem_32(Address(Z_R1_scratch), 1, Z_tmp_1);
2489}
2490
2491void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
2492  // Call a little run-time stub to avoid blow-up for each bytecode.
2493  // The run-time runtime saves the right registers, depending on
2494  // the tosca in-state for the given template.
2495  address entry = Interpreter::trace_code(t->tos_in());
2496  guarantee(entry != NULL, "entry must have been generated");
2497  __ call_stub(entry);
2498}
2499
2500void TemplateInterpreterGenerator::stop_interpreter_at() {
2501  NearLabel L;
2502
2503  __ load_absolute_address(Z_tmp_1, (address)&BytecodeCounter::_counter_value);
2504  __ load_absolute_address(Z_tmp_2, (address)&StopInterpreterAt);
2505  __ load_sized_value(Z_tmp_1, Address(Z_tmp_1), 4, false /*signed*/);
2506  __ load_sized_value(Z_tmp_2, Address(Z_tmp_2), 8, false /*signed*/);
2507  __ compareU64_and_branch(Z_tmp_1, Z_tmp_2, Assembler::bcondLow, L);
2508  assert(Z_tmp_1->is_nonvolatile(), "must be nonvolatile to preserve Z_tos");
2509  assert(Z_F8->is_nonvolatile(), "must be nonvolatile to preserve Z_ftos");
2510  __ z_lgr(Z_tmp_1, Z_tos);      // Save tos.
2511  __ z_lgr(Z_tmp_2, Z_bytecode); // Save Z_bytecode.
2512  __ z_ldr(Z_F8, Z_ftos);        // Save ftos.
2513  // Use -XX:StopInterpreterAt=<num> to set the limit
2514  // and break at breakpoint().
2515  __ call_VM(noreg, CAST_FROM_FN_PTR(address, breakpoint), false);
2516  __ z_lgr(Z_tos, Z_tmp_1);      // Restore tos.
2517  __ z_lgr(Z_bytecode, Z_tmp_2); // Save Z_bytecode.
2518  __ z_ldr(Z_ftos, Z_F8);        // Restore ftos.
2519  __ bind(L);
2520}
2521
2522#endif // !PRODUCT
2523