templateTable_ppc_64.cpp revision 9898:2794bc7859f5
1/*
2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2013, 2015 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26#include "precompiled.hpp"
27#include "asm/macroAssembler.inline.hpp"
28#include "interpreter/interpreter.hpp"
29#include "interpreter/interpreterRuntime.hpp"
30#include "interpreter/interp_masm.hpp"
31#include "interpreter/templateInterpreter.hpp"
32#include "interpreter/templateTable.hpp"
33#include "memory/universe.inline.hpp"
34#include "oops/objArrayKlass.hpp"
35#include "oops/oop.inline.hpp"
36#include "prims/methodHandles.hpp"
37#include "runtime/sharedRuntime.hpp"
38#include "runtime/stubRoutines.hpp"
39#include "runtime/synchronizer.hpp"
40#include "utilities/macros.hpp"
41
42#undef __
43#define __ _masm->
44
45// ============================================================================
46// Misc helpers
47
48// Do an oop store like *(base + index) = val OR *(base + offset) = val
49// (only one of both variants is possible at the same time).
50// Index can be noreg.
51// Kills:
52//   Rbase, Rtmp
53static void do_oop_store(InterpreterMacroAssembler* _masm,
54                         Register           Rbase,
55                         RegisterOrConstant offset,
56                         Register           Rval,         // Noreg means always null.
57                         Register           Rtmp1,
58                         Register           Rtmp2,
59                         Register           Rtmp3,
60                         BarrierSet::Name   barrier,
61                         bool               precise,
62                         bool               check_null) {
63  assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
64
65  switch (barrier) {
66#if INCLUDE_ALL_GCS
67    case BarrierSet::G1SATBCTLogging:
68      {
69        // Load and record the previous value.
70        __ g1_write_barrier_pre(Rbase, offset,
71                                Rtmp3, /* holder of pre_val ? */
72                                Rtmp1, Rtmp2, false /* frame */);
73
74        Label Lnull, Ldone;
75        if (Rval != noreg) {
76          if (check_null) {
77            __ cmpdi(CCR0, Rval, 0);
78            __ beq(CCR0, Lnull);
79          }
80          __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
81          // Mark the card.
82          if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
83            __ add(Rbase, offset, Rbase);
84          }
85          __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
86          if (check_null) { __ b(Ldone); }
87        }
88
89        if (Rval == noreg || check_null) { // Store null oop.
90          Register Rnull = Rval;
91          __ bind(Lnull);
92          if (Rval == noreg) {
93            Rnull = Rtmp1;
94            __ li(Rnull, 0);
95          }
96          if (UseCompressedOops) {
97            __ stw(Rnull, offset, Rbase);
98          } else {
99            __ std(Rnull, offset, Rbase);
100          }
101        }
102        __ bind(Ldone);
103      }
104      break;
105#endif // INCLUDE_ALL_GCS
106    case BarrierSet::CardTableForRS:
107    case BarrierSet::CardTableExtension:
108      {
109        Label Lnull, Ldone;
110        if (Rval != noreg) {
111          if (check_null) {
112            __ cmpdi(CCR0, Rval, 0);
113            __ beq(CCR0, Lnull);
114          }
115          __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
116          // Mark the card.
117          if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
118            __ add(Rbase, offset, Rbase);
119          }
120          __ card_write_barrier_post(Rbase, Rval, Rtmp1);
121          if (check_null) {
122            __ b(Ldone);
123          }
124        }
125
126        if (Rval == noreg || check_null) { // Store null oop.
127          Register Rnull = Rval;
128          __ bind(Lnull);
129          if (Rval == noreg) {
130            Rnull = Rtmp1;
131            __ li(Rnull, 0);
132          }
133          if (UseCompressedOops) {
134            __ stw(Rnull, offset, Rbase);
135          } else {
136            __ std(Rnull, offset, Rbase);
137          }
138        }
139        __ bind(Ldone);
140      }
141      break;
142    case BarrierSet::ModRef:
143      ShouldNotReachHere();
144      break;
145    default:
146      ShouldNotReachHere();
147  }
148}
149
150// ============================================================================
151// Platform-dependent initialization
152
153void TemplateTable::pd_initialize() {
154  // No ppc64 specific initialization.
155}
156
157Address TemplateTable::at_bcp(int offset) {
158  // Not used on ppc.
159  ShouldNotReachHere();
160  return Address();
161}
162
163// Patches the current bytecode (ptr to it located in bcp)
164// in the bytecode stream with a new one.
165void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
166  // With sharing on, may need to test method flag.
167  if (!RewriteBytecodes) return;
168  Label L_patch_done;
169
170  switch (new_bc) {
171    case Bytecodes::_fast_aputfield:
172    case Bytecodes::_fast_bputfield:
173    case Bytecodes::_fast_cputfield:
174    case Bytecodes::_fast_dputfield:
175    case Bytecodes::_fast_fputfield:
176    case Bytecodes::_fast_iputfield:
177    case Bytecodes::_fast_lputfield:
178    case Bytecodes::_fast_sputfield:
179    {
180      // We skip bytecode quickening for putfield instructions when
181      // the put_code written to the constant pool cache is zero.
182      // This is required so that every execution of this instruction
183      // calls out to InterpreterRuntime::resolve_get_put to do
184      // additional, required work.
185      assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
186      assert(load_bc_into_bc_reg, "we use bc_reg as temp");
187      __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
188      // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
189#if defined(VM_LITTLE_ENDIAN)
190      __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
191#else
192      __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
193#endif
194      __ cmpwi(CCR0, Rnew_bc, 0);
195      __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
196      __ beq(CCR0, L_patch_done);
197      // __ isync(); // acquire not needed
198      break;
199    }
200
201    default:
202      assert(byte_no == -1, "sanity");
203      if (load_bc_into_bc_reg) {
204        __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
205      }
206  }
207
208  if (JvmtiExport::can_post_breakpoint()) {
209    Label L_fast_patch;
210    __ lbz(Rtemp, 0, R14_bcp);
211    __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
212    __ bne(CCR0, L_fast_patch);
213    // Perform the quickening, slowly, in the bowels of the breakpoint table.
214    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
215    __ b(L_patch_done);
216    __ bind(L_fast_patch);
217  }
218
219  // Patch bytecode.
220  __ stb(Rnew_bc, 0, R14_bcp);
221
222  __ bind(L_patch_done);
223}
224
225// ============================================================================
226// Individual instructions
227
228void TemplateTable::nop() {
229  transition(vtos, vtos);
230  // Nothing to do.
231}
232
233void TemplateTable::shouldnotreachhere() {
234  transition(vtos, vtos);
235  __ stop("shouldnotreachhere bytecode");
236}
237
238void TemplateTable::aconst_null() {
239  transition(vtos, atos);
240  __ li(R17_tos, 0);
241}
242
243void TemplateTable::iconst(int value) {
244  transition(vtos, itos);
245  assert(value >= -1 && value <= 5, "");
246  __ li(R17_tos, value);
247}
248
249void TemplateTable::lconst(int value) {
250  transition(vtos, ltos);
251  assert(value >= -1 && value <= 5, "");
252  __ li(R17_tos, value);
253}
254
255void TemplateTable::fconst(int value) {
256  transition(vtos, ftos);
257  static float zero = 0.0;
258  static float one  = 1.0;
259  static float two  = 2.0;
260  switch (value) {
261    default: ShouldNotReachHere();
262    case 0: {
263      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
264      __ lfs(F15_ftos, simm16_offset, R11_scratch1);
265      break;
266    }
267    case 1: {
268      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
269      __ lfs(F15_ftos, simm16_offset, R11_scratch1);
270      break;
271    }
272    case 2: {
273      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
274      __ lfs(F15_ftos, simm16_offset, R11_scratch1);
275      break;
276    }
277  }
278}
279
280void TemplateTable::dconst(int value) {
281  transition(vtos, dtos);
282  static double zero = 0.0;
283  static double one  = 1.0;
284  switch (value) {
285    case 0: {
286      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
287      __ lfd(F15_ftos, simm16_offset, R11_scratch1);
288      break;
289    }
290    case 1: {
291      int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
292      __ lfd(F15_ftos, simm16_offset, R11_scratch1);
293      break;
294    }
295    default: ShouldNotReachHere();
296  }
297}
298
299void TemplateTable::bipush() {
300  transition(vtos, itos);
301  __ lbz(R17_tos, 1, R14_bcp);
302  __ extsb(R17_tos, R17_tos);
303}
304
305void TemplateTable::sipush() {
306  transition(vtos, itos);
307  __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
308}
309
310void TemplateTable::ldc(bool wide) {
311  Register Rscratch1 = R11_scratch1,
312           Rscratch2 = R12_scratch2,
313           Rcpool    = R3_ARG1;
314
315  transition(vtos, vtos);
316  Label notInt, notClass, exit;
317
318  __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
319  if (wide) { // Read index.
320    __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
321  } else {
322    __ lbz(Rscratch1, 1, R14_bcp);
323  }
324
325  const int base_offset = ConstantPool::header_size() * wordSize;
326  const int tags_offset = Array<u1>::base_offset_in_bytes();
327
328  // Get type from tags.
329  __ addi(Rscratch2, Rscratch2, tags_offset);
330  __ lbzx(Rscratch2, Rscratch2, Rscratch1);
331
332  __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
333  __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
334  __ cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
335
336  // Resolved class - need to call vm to get java mirror of the class.
337  __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
338  __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
339  __ beq(CCR0, notClass);
340
341  __ li(R4, wide ? 1 : 0);
342  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
343  __ push(atos);
344  __ b(exit);
345
346  __ align(32, 12);
347  __ bind(notClass);
348  __ addi(Rcpool, Rcpool, base_offset);
349  __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
350  __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
351  __ bne(CCR0, notInt);
352  __ lwax(R17_tos, Rcpool, Rscratch1);
353  __ push(itos);
354  __ b(exit);
355
356  __ align(32, 12);
357  __ bind(notInt);
358#ifdef ASSERT
359  // String and Object are rewritten to fast_aldc
360  __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
361  __ asm_assert_eq("unexpected type", 0x8765);
362#endif
363  __ lfsx(F15_ftos, Rcpool, Rscratch1);
364  __ push(ftos);
365
366  __ align(32, 12);
367  __ bind(exit);
368}
369
370// Fast path for caching oop constants.
371void TemplateTable::fast_aldc(bool wide) {
372  transition(vtos, atos);
373
374  int index_size = wide ? sizeof(u2) : sizeof(u1);
375  const Register Rscratch = R11_scratch1;
376  Label is_null;
377
378  // We are resolved if the resolved reference cache entry contains a
379  // non-null object (CallSite, etc.)
380  __ get_cache_index_at_bcp(Rscratch, 1, index_size);  // Load index.
381  __ load_resolved_reference_at_index(R17_tos, Rscratch, &is_null);
382  __ verify_oop(R17_tos);
383  __ dispatch_epilog(atos, Bytecodes::length_for(bytecode()));
384
385  __ bind(is_null);
386  __ load_const_optimized(R3_ARG1, (int)bytecode());
387
388  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
389
390  // First time invocation - must resolve first.
391  __ call_VM(R17_tos, entry, R3_ARG1);
392  __ verify_oop(R17_tos);
393}
394
395void TemplateTable::ldc2_w() {
396  transition(vtos, vtos);
397  Label Llong, Lexit;
398
399  Register Rindex = R11_scratch1,
400           Rcpool = R12_scratch2,
401           Rtag   = R3_ARG1;
402  __ get_cpool_and_tags(Rcpool, Rtag);
403  __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
404
405  const int base_offset = ConstantPool::header_size() * wordSize;
406  const int tags_offset = Array<u1>::base_offset_in_bytes();
407  // Get type from tags.
408  __ addi(Rcpool, Rcpool, base_offset);
409  __ addi(Rtag, Rtag, tags_offset);
410
411  __ lbzx(Rtag, Rtag, Rindex);
412
413  __ sldi(Rindex, Rindex, LogBytesPerWord);
414  __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
415  __ bne(CCR0, Llong);
416  // A double can be placed at word-aligned locations in the constant pool.
417  // Check out Conversions.java for an example.
418  // Also ConstantPool::header_size() is 20, which makes it very difficult
419  // to double-align double on the constant pool. SG, 11/7/97
420  __ lfdx(F15_ftos, Rcpool, Rindex);
421  __ push(dtos);
422  __ b(Lexit);
423
424  __ bind(Llong);
425  __ ldx(R17_tos, Rcpool, Rindex);
426  __ push(ltos);
427
428  __ bind(Lexit);
429}
430
431// Get the locals index located in the bytecode stream at bcp + offset.
432void TemplateTable::locals_index(Register Rdst, int offset) {
433  __ lbz(Rdst, offset, R14_bcp);
434}
435
436void TemplateTable::iload() {
437  iload_internal();
438}
439
440void TemplateTable::nofast_iload() {
441  iload_internal(may_not_rewrite);
442}
443
444void TemplateTable::iload_internal(RewriteControl rc) {
445  transition(vtos, itos);
446
447  // Get the local value into tos
448  const Register Rindex = R22_tmp2;
449  locals_index(Rindex);
450
451  // Rewrite iload,iload  pair into fast_iload2
452  //         iload,caload pair into fast_icaload
453  if (RewriteFrequentPairs && rc == may_rewrite) {
454    Label Lrewrite, Ldone;
455    Register Rnext_byte  = R3_ARG1,
456             Rrewrite_to = R6_ARG4,
457             Rscratch    = R11_scratch1;
458
459    // get next byte
460    __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
461
462    // if _iload, wait to rewrite to iload2. We only want to rewrite the
463    // last two iloads in a pair. Comparing against fast_iload means that
464    // the next bytecode is neither an iload or a caload, and therefore
465    // an iload pair.
466    __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
467    __ beq(CCR0, Ldone);
468
469    __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
470    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
471    __ beq(CCR1, Lrewrite);
472
473    __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
474    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
475    __ beq(CCR0, Lrewrite);
476
477    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
478
479    __ bind(Lrewrite);
480    patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
481    __ bind(Ldone);
482  }
483
484  __ load_local_int(R17_tos, Rindex, Rindex);
485}
486
487// Load 2 integers in a row without dispatching
488void TemplateTable::fast_iload2() {
489  transition(vtos, itos);
490
491  __ lbz(R3_ARG1, 1, R14_bcp);
492  __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
493
494  __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
495  __ load_local_int(R17_tos, R12_scratch2, R17_tos);
496  __ push_i(R3_ARG1);
497}
498
499void TemplateTable::fast_iload() {
500  transition(vtos, itos);
501  // Get the local value into tos
502
503  const Register Rindex = R11_scratch1;
504  locals_index(Rindex);
505  __ load_local_int(R17_tos, Rindex, Rindex);
506}
507
508// Load a local variable type long from locals area to TOS cache register.
509// Local index resides in bytecodestream.
510void TemplateTable::lload() {
511  transition(vtos, ltos);
512
513  const Register Rindex = R11_scratch1;
514  locals_index(Rindex);
515  __ load_local_long(R17_tos, Rindex, Rindex);
516}
517
518void TemplateTable::fload() {
519  transition(vtos, ftos);
520
521  const Register Rindex = R11_scratch1;
522  locals_index(Rindex);
523  __ load_local_float(F15_ftos, Rindex, Rindex);
524}
525
526void TemplateTable::dload() {
527  transition(vtos, dtos);
528
529  const Register Rindex = R11_scratch1;
530  locals_index(Rindex);
531  __ load_local_double(F15_ftos, Rindex, Rindex);
532}
533
534void TemplateTable::aload() {
535  transition(vtos, atos);
536
537  const Register Rindex = R11_scratch1;
538  locals_index(Rindex);
539  __ load_local_ptr(R17_tos, Rindex, Rindex);
540}
541
542void TemplateTable::locals_index_wide(Register Rdst) {
543  // Offset is 2, not 1, because Lbcp points to wide prefix code.
544  __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
545}
546
547void TemplateTable::wide_iload() {
548  // Get the local value into tos.
549
550  const Register Rindex = R11_scratch1;
551  locals_index_wide(Rindex);
552  __ load_local_int(R17_tos, Rindex, Rindex);
553}
554
555void TemplateTable::wide_lload() {
556  transition(vtos, ltos);
557
558  const Register Rindex = R11_scratch1;
559  locals_index_wide(Rindex);
560  __ load_local_long(R17_tos, Rindex, Rindex);
561}
562
563void TemplateTable::wide_fload() {
564  transition(vtos, ftos);
565
566  const Register Rindex = R11_scratch1;
567  locals_index_wide(Rindex);
568  __ load_local_float(F15_ftos, Rindex, Rindex);
569}
570
571void TemplateTable::wide_dload() {
572  transition(vtos, dtos);
573
574  const Register Rindex = R11_scratch1;
575  locals_index_wide(Rindex);
576  __ load_local_double(F15_ftos, Rindex, Rindex);
577}
578
579void TemplateTable::wide_aload() {
580  transition(vtos, atos);
581
582  const Register Rindex = R11_scratch1;
583  locals_index_wide(Rindex);
584  __ load_local_ptr(R17_tos, Rindex, Rindex);
585}
586
587void TemplateTable::iaload() {
588  transition(itos, itos);
589
590  const Register Rload_addr = R3_ARG1,
591                 Rarray     = R4_ARG2,
592                 Rtemp      = R5_ARG3;
593  __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
594  __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
595}
596
597void TemplateTable::laload() {
598  transition(itos, ltos);
599
600  const Register Rload_addr = R3_ARG1,
601                 Rarray     = R4_ARG2,
602                 Rtemp      = R5_ARG3;
603  __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
604  __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
605}
606
607void TemplateTable::faload() {
608  transition(itos, ftos);
609
610  const Register Rload_addr = R3_ARG1,
611                 Rarray     = R4_ARG2,
612                 Rtemp      = R5_ARG3;
613  __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
614  __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
615}
616
617void TemplateTable::daload() {
618  transition(itos, dtos);
619
620  const Register Rload_addr = R3_ARG1,
621                 Rarray     = R4_ARG2,
622                 Rtemp      = R5_ARG3;
623  __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
624  __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
625}
626
627void TemplateTable::aaload() {
628  transition(itos, atos);
629
630  // tos: index
631  // result tos: array
632  const Register Rload_addr = R3_ARG1,
633                 Rarray     = R4_ARG2,
634                 Rtemp      = R5_ARG3;
635  __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
636  __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
637  __ verify_oop(R17_tos);
638  //__ dcbt(R17_tos); // prefetch
639}
640
641void TemplateTable::baload() {
642  transition(itos, itos);
643
644  const Register Rload_addr = R3_ARG1,
645                 Rarray     = R4_ARG2,
646                 Rtemp      = R5_ARG3;
647  __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
648  __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
649  __ extsb(R17_tos, R17_tos);
650}
651
652void TemplateTable::caload() {
653  transition(itos, itos);
654
655  const Register Rload_addr = R3_ARG1,
656                 Rarray     = R4_ARG2,
657                 Rtemp      = R5_ARG3;
658  __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
659  __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
660}
661
662// Iload followed by caload frequent pair.
663void TemplateTable::fast_icaload() {
664  transition(vtos, itos);
665
666  const Register Rload_addr = R3_ARG1,
667                 Rarray     = R4_ARG2,
668                 Rtemp      = R11_scratch1;
669
670  locals_index(R17_tos);
671  __ load_local_int(R17_tos, Rtemp, R17_tos);
672  __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
673  __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
674}
675
676void TemplateTable::saload() {
677  transition(itos, itos);
678
679  const Register Rload_addr = R11_scratch1,
680                 Rarray     = R12_scratch2,
681                 Rtemp      = R3_ARG1;
682  __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
683  __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
684}
685
686void TemplateTable::iload(int n) {
687  transition(vtos, itos);
688
689  __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
690}
691
692void TemplateTable::lload(int n) {
693  transition(vtos, ltos);
694
695  __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
696}
697
698void TemplateTable::fload(int n) {
699  transition(vtos, ftos);
700
701  __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
702}
703
704void TemplateTable::dload(int n) {
705  transition(vtos, dtos);
706
707  __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
708}
709
710void TemplateTable::aload(int n) {
711  transition(vtos, atos);
712
713  __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
714}
715
716void TemplateTable::aload_0() {
717  aload_0_internal();
718}
719
720void TemplateTable::nofast_aload_0() {
721  aload_0_internal(may_not_rewrite);
722}
723
724void TemplateTable::aload_0_internal(RewriteControl rc) {
725  transition(vtos, atos);
726  // According to bytecode histograms, the pairs:
727  //
728  // _aload_0, _fast_igetfield
729  // _aload_0, _fast_agetfield
730  // _aload_0, _fast_fgetfield
731  //
732  // occur frequently. If RewriteFrequentPairs is set, the (slow)
733  // _aload_0 bytecode checks if the next bytecode is either
734  // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
735  // rewrites the current bytecode into a pair bytecode; otherwise it
736  // rewrites the current bytecode into _0 that doesn't do
737  // the pair check anymore.
738  //
739  // Note: If the next bytecode is _getfield, the rewrite must be
740  //       delayed, otherwise we may miss an opportunity for a pair.
741  //
742  // Also rewrite frequent pairs
743  //   aload_0, aload_1
744  //   aload_0, iload_1
745  // These bytecodes with a small amount of code are most profitable
746  // to rewrite.
747
748  if (RewriteFrequentPairs && rc == may_rewrite) {
749
750    Label Lrewrite, Ldont_rewrite;
751    Register Rnext_byte  = R3_ARG1,
752             Rrewrite_to = R6_ARG4,
753             Rscratch    = R11_scratch1;
754
755    // Get next byte.
756    __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
757
758    // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
759    __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
760    __ beq(CCR0, Ldont_rewrite);
761
762    __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
763    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
764    __ beq(CCR1, Lrewrite);
765
766    __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
767    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
768    __ beq(CCR0, Lrewrite);
769
770    __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
771    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
772    __ beq(CCR1, Lrewrite);
773
774    __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
775
776    __ bind(Lrewrite);
777    patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
778    __ bind(Ldont_rewrite);
779  }
780
781  // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
782  aload(0);
783}
784
785void TemplateTable::istore() {
786  transition(itos, vtos);
787
788  const Register Rindex = R11_scratch1;
789  locals_index(Rindex);
790  __ store_local_int(R17_tos, Rindex);
791}
792
793void TemplateTable::lstore() {
794  transition(ltos, vtos);
795  const Register Rindex = R11_scratch1;
796  locals_index(Rindex);
797  __ store_local_long(R17_tos, Rindex);
798}
799
800void TemplateTable::fstore() {
801  transition(ftos, vtos);
802
803  const Register Rindex = R11_scratch1;
804  locals_index(Rindex);
805  __ store_local_float(F15_ftos, Rindex);
806}
807
808void TemplateTable::dstore() {
809  transition(dtos, vtos);
810
811  const Register Rindex = R11_scratch1;
812  locals_index(Rindex);
813  __ store_local_double(F15_ftos, Rindex);
814}
815
816void TemplateTable::astore() {
817  transition(vtos, vtos);
818
819  const Register Rindex = R11_scratch1;
820  __ pop_ptr();
821  __ verify_oop_or_return_address(R17_tos, Rindex);
822  locals_index(Rindex);
823  __ store_local_ptr(R17_tos, Rindex);
824}
825
826void TemplateTable::wide_istore() {
827  transition(vtos, vtos);
828
829  const Register Rindex = R11_scratch1;
830  __ pop_i();
831  locals_index_wide(Rindex);
832  __ store_local_int(R17_tos, Rindex);
833}
834
835void TemplateTable::wide_lstore() {
836  transition(vtos, vtos);
837
838  const Register Rindex = R11_scratch1;
839  __ pop_l();
840  locals_index_wide(Rindex);
841  __ store_local_long(R17_tos, Rindex);
842}
843
844void TemplateTable::wide_fstore() {
845  transition(vtos, vtos);
846
847  const Register Rindex = R11_scratch1;
848  __ pop_f();
849  locals_index_wide(Rindex);
850  __ store_local_float(F15_ftos, Rindex);
851}
852
853void TemplateTable::wide_dstore() {
854  transition(vtos, vtos);
855
856  const Register Rindex = R11_scratch1;
857  __ pop_d();
858  locals_index_wide(Rindex);
859  __ store_local_double(F15_ftos, Rindex);
860}
861
862void TemplateTable::wide_astore() {
863  transition(vtos, vtos);
864
865  const Register Rindex = R11_scratch1;
866  __ pop_ptr();
867  __ verify_oop_or_return_address(R17_tos, Rindex);
868  locals_index_wide(Rindex);
869  __ store_local_ptr(R17_tos, Rindex);
870}
871
872void TemplateTable::iastore() {
873  transition(itos, vtos);
874
875  const Register Rindex      = R3_ARG1,
876                 Rstore_addr = R4_ARG2,
877                 Rarray      = R5_ARG3,
878                 Rtemp       = R6_ARG4;
879  __ pop_i(Rindex);
880  __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
881  __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
882  }
883
884void TemplateTable::lastore() {
885  transition(ltos, vtos);
886
887  const Register Rindex      = R3_ARG1,
888                 Rstore_addr = R4_ARG2,
889                 Rarray      = R5_ARG3,
890                 Rtemp       = R6_ARG4;
891  __ pop_i(Rindex);
892  __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
893  __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
894  }
895
896void TemplateTable::fastore() {
897  transition(ftos, vtos);
898
899  const Register Rindex      = R3_ARG1,
900                 Rstore_addr = R4_ARG2,
901                 Rarray      = R5_ARG3,
902                 Rtemp       = R6_ARG4;
903  __ pop_i(Rindex);
904  __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
905  __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
906  }
907
908void TemplateTable::dastore() {
909  transition(dtos, vtos);
910
911  const Register Rindex      = R3_ARG1,
912                 Rstore_addr = R4_ARG2,
913                 Rarray      = R5_ARG3,
914                 Rtemp       = R6_ARG4;
915  __ pop_i(Rindex);
916  __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
917  __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
918  }
919
920// Pop 3 values from the stack and...
921void TemplateTable::aastore() {
922  transition(vtos, vtos);
923
924  Label Lstore_ok, Lis_null, Ldone;
925  const Register Rindex    = R3_ARG1,
926                 Rarray    = R4_ARG2,
927                 Rscratch  = R11_scratch1,
928                 Rscratch2 = R12_scratch2,
929                 Rarray_klass = R5_ARG3,
930                 Rarray_element_klass = Rarray_klass,
931                 Rvalue_klass = R6_ARG4,
932                 Rstore_addr = R31;    // Use register which survives VM call.
933
934  __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
935  __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
936  __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp);  // Get array.
937
938  __ verify_oop(R17_tos);
939  __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
940  // Rindex is dead!
941  Register Rscratch3 = Rindex;
942
943  // Do array store check - check for NULL value first.
944  __ cmpdi(CCR0, R17_tos, 0);
945  __ beq(CCR0, Lis_null);
946
947  __ load_klass(Rarray_klass, Rarray);
948  __ load_klass(Rvalue_klass, R17_tos);
949
950  // Do fast instanceof cache test.
951  __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
952
953  // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
954  __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
955
956  // Fell through: subtype check failed => throw an exception.
957  __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
958  __ mtctr(R11_scratch1);
959  __ bctr();
960
961  __ bind(Lis_null);
962  do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
963               Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
964  __ profile_null_seen(Rscratch, Rscratch2);
965  __ b(Ldone);
966
967  // Store is OK.
968  __ bind(Lstore_ok);
969  do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
970               Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
971
972  __ bind(Ldone);
973  // Adjust sp (pops array, index and value).
974  __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
975}
976
977void TemplateTable::bastore() {
978  transition(itos, vtos);
979
980  const Register Rindex   = R11_scratch1,
981                 Rarray   = R12_scratch2,
982                 Rscratch = R3_ARG1;
983  __ pop_i(Rindex);
984  // tos: val
985  // Rarray: array ptr (popped by index_check)
986  __ index_check(Rarray, Rindex, 0, Rscratch, Rarray);
987  __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
988}
989
990void TemplateTable::castore() {
991  transition(itos, vtos);
992
993  const Register Rindex   = R11_scratch1,
994                 Rarray   = R12_scratch2,
995                 Rscratch = R3_ARG1;
996  __ pop_i(Rindex);
997  // tos: val
998  // Rarray: array ptr (popped by index_check)
999  __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
1000  __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
1001}
1002
1003void TemplateTable::sastore() {
1004  castore();
1005}
1006
1007void TemplateTable::istore(int n) {
1008  transition(itos, vtos);
1009  __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1010}
1011
1012void TemplateTable::lstore(int n) {
1013  transition(ltos, vtos);
1014  __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1015}
1016
1017void TemplateTable::fstore(int n) {
1018  transition(ftos, vtos);
1019  __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
1020}
1021
1022void TemplateTable::dstore(int n) {
1023  transition(dtos, vtos);
1024  __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1025}
1026
1027void TemplateTable::astore(int n) {
1028  transition(vtos, vtos);
1029
1030  __ pop_ptr();
1031  __ verify_oop_or_return_address(R17_tos, R11_scratch1);
1032  __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1033}
1034
1035void TemplateTable::pop() {
1036  transition(vtos, vtos);
1037
1038  __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
1039}
1040
1041void TemplateTable::pop2() {
1042  transition(vtos, vtos);
1043
1044  __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
1045}
1046
1047void TemplateTable::dup() {
1048  transition(vtos, vtos);
1049
1050  __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
1051  __ push_ptr(R11_scratch1);
1052}
1053
1054void TemplateTable::dup_x1() {
1055  transition(vtos, vtos);
1056
1057  Register Ra = R11_scratch1,
1058           Rb = R12_scratch2;
1059  // stack: ..., a, b
1060  __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1061  __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1062  __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1063  __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1064  __ push_ptr(Rb);
1065  // stack: ..., b, a, b
1066}
1067
1068void TemplateTable::dup_x2() {
1069  transition(vtos, vtos);
1070
1071  Register Ra = R11_scratch1,
1072           Rb = R12_scratch2,
1073           Rc = R3_ARG1;
1074
1075  // stack: ..., a, b, c
1076  __ ld(Rc, Interpreter::stackElementSize,     R15_esp);  // load c
1077  __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);  // load a
1078  __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
1079  __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);  // load b
1080  // stack: ..., c, b, c
1081  __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
1082  // stack: ..., c, a, c
1083  __ std(Rb, Interpreter::stackElementSize,     R15_esp); // store b in c
1084  __ push_ptr(Rc);                                        // push c
1085  // stack: ..., c, a, b, c
1086}
1087
1088void TemplateTable::dup2() {
1089  transition(vtos, vtos);
1090
1091  Register Ra = R11_scratch1,
1092           Rb = R12_scratch2;
1093  // stack: ..., a, b
1094  __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1095  __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1096  __ push_2ptrs(Ra, Rb);
1097  // stack: ..., a, b, a, b
1098}
1099
1100void TemplateTable::dup2_x1() {
1101  transition(vtos, vtos);
1102
1103  Register Ra = R11_scratch1,
1104           Rb = R12_scratch2,
1105           Rc = R3_ARG1;
1106  // stack: ..., a, b, c
1107  __ ld(Rc, Interpreter::stackElementSize,     R15_esp);
1108  __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
1109  __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
1110  __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
1111  __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1112  __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
1113  // stack: ..., b, c, a
1114  __ push_2ptrs(Rb, Rc);
1115  // stack: ..., b, c, a, b, c
1116}
1117
1118void TemplateTable::dup2_x2() {
1119  transition(vtos, vtos);
1120
1121  Register Ra = R11_scratch1,
1122           Rb = R12_scratch2,
1123           Rc = R3_ARG1,
1124           Rd = R4_ARG2;
1125  // stack: ..., a, b, c, d
1126  __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
1127  __ ld(Rd, Interpreter::stackElementSize,     R15_esp);
1128  __ std(Rb, Interpreter::stackElementSize,     R15_esp);  // store b in d
1129  __ std(Rd, Interpreter::stackElementSize * 3, R15_esp);  // store d in b
1130  __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
1131  __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
1132  __ std(Ra, Interpreter::stackElementSize * 2, R15_esp);  // store a in c
1133  __ std(Rc, Interpreter::stackElementSize * 4, R15_esp);  // store c in a
1134  // stack: ..., c, d, a, b
1135  __ push_2ptrs(Rc, Rd);
1136  // stack: ..., c, d, a, b, c, d
1137}
1138
1139void TemplateTable::swap() {
1140  transition(vtos, vtos);
1141  // stack: ..., a, b
1142
1143  Register Ra = R11_scratch1,
1144           Rb = R12_scratch2;
1145  // stack: ..., a, b
1146  __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1147  __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1148  __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1149  __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1150  // stack: ..., b, a
1151}
1152
1153void TemplateTable::iop2(Operation op) {
1154  transition(itos, itos);
1155
1156  Register Rscratch = R11_scratch1;
1157
1158  __ pop_i(Rscratch);
1159  // tos  = number of bits to shift
1160  // Rscratch = value to shift
1161  switch (op) {
1162    case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1163    case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1164    case  mul:   __ mullw(R17_tos, Rscratch, R17_tos); break;
1165    case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1166    case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1167    case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1168    case  shl:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
1169    case  shr:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
1170    case  ushr:  __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
1171    default:     ShouldNotReachHere();
1172  }
1173}
1174
1175void TemplateTable::lop2(Operation op) {
1176  transition(ltos, ltos);
1177
1178  Register Rscratch = R11_scratch1;
1179  __ pop_l(Rscratch);
1180  switch (op) {
1181    case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1182    case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1183    case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1184    case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1185    case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1186    default:     ShouldNotReachHere();
1187  }
1188}
1189
1190void TemplateTable::idiv() {
1191  transition(itos, itos);
1192
1193  Label Lnormal, Lexception, Ldone;
1194  Register Rdividend = R11_scratch1; // Used by irem.
1195
1196  __ addi(R0, R17_tos, 1);
1197  __ cmplwi(CCR0, R0, 2);
1198  __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1199
1200  __ cmpwi(CCR1, R17_tos, 0);
1201  __ beq(CCR1, Lexception); // divisor == 0
1202
1203  __ pop_i(Rdividend);
1204  __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
1205  __ b(Ldone);
1206
1207  __ bind(Lexception);
1208  __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1209  __ mtctr(R11_scratch1);
1210  __ bctr();
1211
1212  __ align(32, 12);
1213  __ bind(Lnormal);
1214  __ pop_i(Rdividend);
1215  __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1216  __ bind(Ldone);
1217}
1218
1219void TemplateTable::irem() {
1220  transition(itos, itos);
1221
1222  __ mr(R12_scratch2, R17_tos);
1223  idiv();
1224  __ mullw(R17_tos, R17_tos, R12_scratch2);
1225  __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
1226}
1227
1228void TemplateTable::lmul() {
1229  transition(ltos, ltos);
1230
1231  __ pop_l(R11_scratch1);
1232  __ mulld(R17_tos, R11_scratch1, R17_tos);
1233}
1234
1235void TemplateTable::ldiv() {
1236  transition(ltos, ltos);
1237
1238  Label Lnormal, Lexception, Ldone;
1239  Register Rdividend = R11_scratch1; // Used by lrem.
1240
1241  __ addi(R0, R17_tos, 1);
1242  __ cmpldi(CCR0, R0, 2);
1243  __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1244
1245  __ cmpdi(CCR1, R17_tos, 0);
1246  __ beq(CCR1, Lexception); // divisor == 0
1247
1248  __ pop_l(Rdividend);
1249  __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
1250  __ b(Ldone);
1251
1252  __ bind(Lexception);
1253  __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1254  __ mtctr(R11_scratch1);
1255  __ bctr();
1256
1257  __ align(32, 12);
1258  __ bind(Lnormal);
1259  __ pop_l(Rdividend);
1260  __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1261  __ bind(Ldone);
1262}
1263
1264void TemplateTable::lrem() {
1265  transition(ltos, ltos);
1266
1267  __ mr(R12_scratch2, R17_tos);
1268  ldiv();
1269  __ mulld(R17_tos, R17_tos, R12_scratch2);
1270  __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
1271}
1272
1273void TemplateTable::lshl() {
1274  transition(itos, ltos);
1275
1276  __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1277  __ pop_l(R11_scratch1);
1278  __ sld(R17_tos, R11_scratch1, R17_tos);
1279}
1280
1281void TemplateTable::lshr() {
1282  transition(itos, ltos);
1283
1284  __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1285  __ pop_l(R11_scratch1);
1286  __ srad(R17_tos, R11_scratch1, R17_tos);
1287}
1288
1289void TemplateTable::lushr() {
1290  transition(itos, ltos);
1291
1292  __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1293  __ pop_l(R11_scratch1);
1294  __ srd(R17_tos, R11_scratch1, R17_tos);
1295}
1296
1297void TemplateTable::fop2(Operation op) {
1298  transition(ftos, ftos);
1299
1300  switch (op) {
1301    case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
1302    case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1303    case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
1304    case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1305    case rem:
1306      __ pop_f(F1_ARG1);
1307      __ fmr(F2_ARG2, F15_ftos);
1308      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1309      __ fmr(F15_ftos, F1_RET);
1310      break;
1311
1312    default: ShouldNotReachHere();
1313  }
1314}
1315
1316void TemplateTable::dop2(Operation op) {
1317  transition(dtos, dtos);
1318
1319  switch (op) {
1320    case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
1321    case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
1322    case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
1323    case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
1324    case rem:
1325      __ pop_d(F1_ARG1);
1326      __ fmr(F2_ARG2, F15_ftos);
1327      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1328      __ fmr(F15_ftos, F1_RET);
1329      break;
1330
1331    default: ShouldNotReachHere();
1332  }
1333}
1334
1335// Negate the value in the TOS cache.
1336void TemplateTable::ineg() {
1337  transition(itos, itos);
1338
1339  __ neg(R17_tos, R17_tos);
1340}
1341
1342// Negate the value in the TOS cache.
1343void TemplateTable::lneg() {
1344  transition(ltos, ltos);
1345
1346  __ neg(R17_tos, R17_tos);
1347}
1348
1349void TemplateTable::fneg() {
1350  transition(ftos, ftos);
1351
1352  __ fneg(F15_ftos, F15_ftos);
1353}
1354
1355void TemplateTable::dneg() {
1356  transition(dtos, dtos);
1357
1358  __ fneg(F15_ftos, F15_ftos);
1359}
1360
1361// Increments a local variable in place.
1362void TemplateTable::iinc() {
1363  transition(vtos, vtos);
1364
1365  const Register Rindex     = R11_scratch1,
1366                 Rincrement = R0,
1367                 Rvalue     = R12_scratch2;
1368
1369  locals_index(Rindex);              // Load locals index from bytecode stream.
1370  __ lbz(Rincrement, 2, R14_bcp);    // Load increment from the bytecode stream.
1371  __ extsb(Rincrement, Rincrement);
1372
1373  __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
1374
1375  __ add(Rvalue, Rincrement, Rvalue);
1376  __ stw(Rvalue, 0, Rindex);
1377}
1378
1379void TemplateTable::wide_iinc() {
1380  transition(vtos, vtos);
1381
1382  Register Rindex       = R11_scratch1,
1383           Rlocals_addr = Rindex,
1384           Rincr        = R12_scratch2;
1385  locals_index_wide(Rindex);
1386  __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
1387  __ load_local_int(R17_tos, Rlocals_addr, Rindex);
1388  __ add(R17_tos, Rincr, R17_tos);
1389  __ stw(R17_tos, 0, Rlocals_addr);
1390}
1391
1392void TemplateTable::convert() {
1393  // %%%%% Factor this first part accross platforms
1394#ifdef ASSERT
1395  TosState tos_in  = ilgl;
1396  TosState tos_out = ilgl;
1397  switch (bytecode()) {
1398    case Bytecodes::_i2l: // fall through
1399    case Bytecodes::_i2f: // fall through
1400    case Bytecodes::_i2d: // fall through
1401    case Bytecodes::_i2b: // fall through
1402    case Bytecodes::_i2c: // fall through
1403    case Bytecodes::_i2s: tos_in = itos; break;
1404    case Bytecodes::_l2i: // fall through
1405    case Bytecodes::_l2f: // fall through
1406    case Bytecodes::_l2d: tos_in = ltos; break;
1407    case Bytecodes::_f2i: // fall through
1408    case Bytecodes::_f2l: // fall through
1409    case Bytecodes::_f2d: tos_in = ftos; break;
1410    case Bytecodes::_d2i: // fall through
1411    case Bytecodes::_d2l: // fall through
1412    case Bytecodes::_d2f: tos_in = dtos; break;
1413    default             : ShouldNotReachHere();
1414  }
1415  switch (bytecode()) {
1416    case Bytecodes::_l2i: // fall through
1417    case Bytecodes::_f2i: // fall through
1418    case Bytecodes::_d2i: // fall through
1419    case Bytecodes::_i2b: // fall through
1420    case Bytecodes::_i2c: // fall through
1421    case Bytecodes::_i2s: tos_out = itos; break;
1422    case Bytecodes::_i2l: // fall through
1423    case Bytecodes::_f2l: // fall through
1424    case Bytecodes::_d2l: tos_out = ltos; break;
1425    case Bytecodes::_i2f: // fall through
1426    case Bytecodes::_l2f: // fall through
1427    case Bytecodes::_d2f: tos_out = ftos; break;
1428    case Bytecodes::_i2d: // fall through
1429    case Bytecodes::_l2d: // fall through
1430    case Bytecodes::_f2d: tos_out = dtos; break;
1431    default             : ShouldNotReachHere();
1432  }
1433  transition(tos_in, tos_out);
1434#endif
1435
1436  // Conversion
1437  Label done;
1438  switch (bytecode()) {
1439    case Bytecodes::_i2l:
1440      __ extsw(R17_tos, R17_tos);
1441      break;
1442
1443    case Bytecodes::_l2i:
1444      // Nothing to do, we'll continue to work with the lower bits.
1445      break;
1446
1447    case Bytecodes::_i2b:
1448      __ extsb(R17_tos, R17_tos);
1449      break;
1450
1451    case Bytecodes::_i2c:
1452      __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
1453      break;
1454
1455    case Bytecodes::_i2s:
1456      __ extsh(R17_tos, R17_tos);
1457      break;
1458
1459    case Bytecodes::_i2d:
1460      __ extsw(R17_tos, R17_tos);
1461    case Bytecodes::_l2d:
1462      __ push_l_pop_d();
1463      __ fcfid(F15_ftos, F15_ftos);
1464      break;
1465
1466    case Bytecodes::_i2f:
1467      __ extsw(R17_tos, R17_tos);
1468      __ push_l_pop_d();
1469      if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1470        // Comment: alternatively, load with sign extend could be done by lfiwax.
1471        __ fcfids(F15_ftos, F15_ftos);
1472      } else {
1473        __ fcfid(F15_ftos, F15_ftos);
1474        __ frsp(F15_ftos, F15_ftos);
1475      }
1476      break;
1477
1478    case Bytecodes::_l2f:
1479      if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1480        __ push_l_pop_d();
1481        __ fcfids(F15_ftos, F15_ftos);
1482      } else {
1483        // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
1484        __ mr(R3_ARG1, R17_tos);
1485        __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
1486        __ fmr(F15_ftos, F1_RET);
1487      }
1488      break;
1489
1490    case Bytecodes::_f2d:
1491      // empty
1492      break;
1493
1494    case Bytecodes::_d2f:
1495      __ frsp(F15_ftos, F15_ftos);
1496      break;
1497
1498    case Bytecodes::_d2i:
1499    case Bytecodes::_f2i:
1500      __ fcmpu(CCR0, F15_ftos, F15_ftos);
1501      __ li(R17_tos, 0); // 0 in case of NAN
1502      __ bso(CCR0, done);
1503      __ fctiwz(F15_ftos, F15_ftos);
1504      __ push_d_pop_l();
1505      break;
1506
1507    case Bytecodes::_d2l:
1508    case Bytecodes::_f2l:
1509      __ fcmpu(CCR0, F15_ftos, F15_ftos);
1510      __ li(R17_tos, 0); // 0 in case of NAN
1511      __ bso(CCR0, done);
1512      __ fctidz(F15_ftos, F15_ftos);
1513      __ push_d_pop_l();
1514      break;
1515
1516    default: ShouldNotReachHere();
1517  }
1518  __ bind(done);
1519}
1520
1521// Long compare
1522void TemplateTable::lcmp() {
1523  transition(ltos, itos);
1524
1525  const Register Rscratch = R11_scratch1;
1526  __ pop_l(Rscratch); // first operand, deeper in stack
1527
1528  __ cmpd(CCR0, Rscratch, R17_tos); // compare
1529  __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1530  __ srwi(Rscratch, R17_tos, 30);
1531  __ srawi(R17_tos, R17_tos, 31);
1532  __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1533}
1534
1535// fcmpl/fcmpg and dcmpl/dcmpg bytecodes
1536// unordered_result == -1 => fcmpl or dcmpl
1537// unordered_result ==  1 => fcmpg or dcmpg
1538void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1539  const FloatRegister Rfirst  = F0_SCRATCH,
1540                      Rsecond = F15_ftos;
1541  const Register Rscratch = R11_scratch1;
1542
1543  if (is_float) {
1544    __ pop_f(Rfirst);
1545  } else {
1546    __ pop_d(Rfirst);
1547  }
1548
1549  Label Lunordered, Ldone;
1550  __ fcmpu(CCR0, Rfirst, Rsecond); // compare
1551  if (unordered_result) {
1552    __ bso(CCR0, Lunordered);
1553  }
1554  __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1555  __ srwi(Rscratch, R17_tos, 30);
1556  __ srawi(R17_tos, R17_tos, 31);
1557  __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1558  if (unordered_result) {
1559    __ b(Ldone);
1560    __ bind(Lunordered);
1561    __ load_const_optimized(R17_tos, unordered_result);
1562  }
1563  __ bind(Ldone);
1564}
1565
1566// Branch_conditional which takes TemplateTable::Condition.
1567void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
1568  bool positive = false;
1569  Assembler::Condition cond = Assembler::equal;
1570  switch (cc) {
1571    case TemplateTable::equal:         positive = true ; cond = Assembler::equal  ; break;
1572    case TemplateTable::not_equal:     positive = false; cond = Assembler::equal  ; break;
1573    case TemplateTable::less:          positive = true ; cond = Assembler::less   ; break;
1574    case TemplateTable::less_equal:    positive = false; cond = Assembler::greater; break;
1575    case TemplateTable::greater:       positive = true ; cond = Assembler::greater; break;
1576    case TemplateTable::greater_equal: positive = false; cond = Assembler::less   ; break;
1577    default: ShouldNotReachHere();
1578  }
1579  int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1580  int bi = Assembler::bi0(crx, cond);
1581  __ bc(bo, bi, L);
1582}
1583
1584void TemplateTable::branch(bool is_jsr, bool is_wide) {
1585
1586  // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1587  __ verify_thread();
1588
1589  const Register Rscratch1    = R11_scratch1,
1590                 Rscratch2    = R12_scratch2,
1591                 Rscratch3    = R3_ARG1,
1592                 R4_counters  = R4_ARG2,
1593                 bumped_count = R31,
1594                 Rdisp        = R22_tmp2;
1595
1596  __ profile_taken_branch(Rscratch1, bumped_count);
1597
1598  // Get (wide) offset.
1599  if (is_wide) {
1600    __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1601  } else {
1602    __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1603  }
1604
1605  // --------------------------------------------------------------------------
1606  // Handle all the JSR stuff here, then exit.
1607  // It's much shorter and cleaner than intermingling with the
1608  // non-JSR normal-branch stuff occurring below.
1609  if (is_jsr) {
1610    // Compute return address as bci in Otos_i.
1611    __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1612    __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
1613    __ subf(R17_tos, Rscratch1, Rscratch2);
1614
1615    // Bump bcp to target of JSR.
1616    __ add(R14_bcp, Rdisp, R14_bcp);
1617    // Push returnAddress for "ret" on stack.
1618    __ push_ptr(R17_tos);
1619    // And away we go!
1620    __ dispatch_next(vtos);
1621    return;
1622  }
1623
1624  // --------------------------------------------------------------------------
1625  // Normal (non-jsr) branch handling
1626
1627  // Bump bytecode pointer by displacement (take the branch).
1628  __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1629
1630  const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1631  if (increment_invocation_counter_for_backward_branches) {
1632    Label Lforward;
1633    __ dispatch_prolog(vtos);
1634
1635    // Check branch direction.
1636    __ cmpdi(CCR0, Rdisp, 0);
1637    __ bgt(CCR0, Lforward);
1638
1639    __ get_method_counters(R19_method, R4_counters, Lforward);
1640
1641    if (TieredCompilation) {
1642      Label Lno_mdo, Loverflow;
1643      const int increment = InvocationCounter::count_increment;
1644      if (ProfileInterpreter) {
1645        Register Rmdo = Rscratch1;
1646
1647        // If no method data exists, go to profile_continue.
1648        __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
1649        __ cmpdi(CCR0, Rmdo, 0);
1650        __ beq(CCR0, Lno_mdo);
1651
1652        // Increment backedge counter in the MDO.
1653        const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1654        __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
1655        __ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
1656        __ addi(Rscratch2, Rscratch2, increment);
1657        __ stw(Rscratch2, mdo_bc_offs, Rmdo);
1658        __ and_(Rscratch3, Rscratch2, Rscratch3);
1659        __ bne(CCR0, Lforward);
1660        __ b(Loverflow);
1661      }
1662
1663      // If there's no MDO, increment counter in method.
1664      const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1665      __ bind(Lno_mdo);
1666      __ lwz(Rscratch2, mo_bc_offs, R4_counters);
1667      __ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
1668      __ addi(Rscratch2, Rscratch2, increment);
1669      __ stw(Rscratch2, mo_bc_offs, R4_counters);
1670      __ and_(Rscratch3, Rscratch2, Rscratch3);
1671      __ bne(CCR0, Lforward);
1672
1673      __ bind(Loverflow);
1674
1675      // Notify point for loop, pass branch bytecode.
1676      __ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp).
1677      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
1678
1679      // Was an OSR adapter generated?
1680      __ cmpdi(CCR0, R3_RET, 0);
1681      __ beq(CCR0, Lforward);
1682
1683      // Has the nmethod been invalidated already?
1684      __ lbz(R0, nmethod::state_offset(), R3_RET);
1685      __ cmpwi(CCR0, R0, nmethod::in_use);
1686      __ bne(CCR0, Lforward);
1687
1688      // Migrate the interpreter frame off of the stack.
1689      // We can use all registers because we will not return to interpreter from this point.
1690
1691      // Save nmethod.
1692      const Register osr_nmethod = R31;
1693      __ mr(osr_nmethod, R3_RET);
1694      __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1695      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1696      __ reset_last_Java_frame();
1697      // OSR buffer is in ARG1.
1698
1699      // Remove the interpreter frame.
1700      __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1701
1702      // Jump to the osr code.
1703      __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1704      __ mtlr(R0);
1705      __ mtctr(R11_scratch1);
1706      __ bctr();
1707
1708    } else {
1709
1710      const Register invoke_ctr = Rscratch1;
1711      // Update Backedge branch separately from invocations.
1712      __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
1713
1714      if (ProfileInterpreter) {
1715        __ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward);
1716        if (UseOnStackReplacement) {
1717          __ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2);
1718        }
1719      } else {
1720        if (UseOnStackReplacement) {
1721          __ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2);
1722        }
1723      }
1724    }
1725
1726    __ bind(Lforward);
1727    __ dispatch_epilog(vtos);
1728
1729  } else {
1730    __ dispatch_next(vtos);
1731  }
1732}
1733
1734// Helper function for if_cmp* methods below.
1735// Factored out common compare and branch code.
1736void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
1737  Label Lnot_taken;
1738  // Note: The condition code we get is the condition under which we
1739  // *fall through*! So we have to inverse the CC here.
1740
1741  if (is_jint) {
1742    if (cmp0) {
1743      __ cmpwi(CCR0, Rfirst, 0);
1744    } else {
1745      __ cmpw(CCR0, Rfirst, Rsecond);
1746    }
1747  } else {
1748    if (cmp0) {
1749      __ cmpdi(CCR0, Rfirst, 0);
1750    } else {
1751      __ cmpd(CCR0, Rfirst, Rsecond);
1752    }
1753  }
1754  branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
1755
1756  // Conition is false => Jump!
1757  branch(false, false);
1758
1759  // Condition is not true => Continue.
1760  __ align(32, 12);
1761  __ bind(Lnot_taken);
1762  __ profile_not_taken_branch(Rscratch1, Rscratch2);
1763}
1764
1765// Compare integer values with zero and fall through if CC holds, branch away otherwise.
1766void TemplateTable::if_0cmp(Condition cc) {
1767  transition(itos, vtos);
1768
1769  if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
1770}
1771
1772// Compare integer values and fall through if CC holds, branch away otherwise.
1773//
1774// Interface:
1775//  - Rfirst: First operand  (older stack value)
1776//  - tos:    Second operand (younger stack value)
1777void TemplateTable::if_icmp(Condition cc) {
1778  transition(itos, vtos);
1779
1780  const Register Rfirst  = R0,
1781                 Rsecond = R17_tos;
1782
1783  __ pop_i(Rfirst);
1784  if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
1785}
1786
1787void TemplateTable::if_nullcmp(Condition cc) {
1788  transition(atos, vtos);
1789
1790  if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
1791}
1792
1793void TemplateTable::if_acmp(Condition cc) {
1794  transition(atos, vtos);
1795
1796  const Register Rfirst  = R0,
1797                 Rsecond = R17_tos;
1798
1799  __ pop_ptr(Rfirst);
1800  if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
1801}
1802
1803void TemplateTable::ret() {
1804  locals_index(R11_scratch1);
1805  __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
1806
1807  __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
1808
1809  __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1810  __ add(R11_scratch1, R17_tos, R11_scratch1);
1811  __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1812  __ dispatch_next(vtos);
1813}
1814
1815void TemplateTable::wide_ret() {
1816  transition(vtos, vtos);
1817
1818  const Register Rindex = R3_ARG1,
1819                 Rscratch1 = R11_scratch1,
1820                 Rscratch2 = R12_scratch2;
1821
1822  locals_index_wide(Rindex);
1823  __ load_local_ptr(R17_tos, R17_tos, Rindex);
1824  __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
1825  // Tos now contains the bci, compute the bcp from that.
1826  __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1827  __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
1828  __ add(R14_bcp, Rscratch1, Rscratch2);
1829  __ dispatch_next(vtos);
1830}
1831
1832void TemplateTable::tableswitch() {
1833  transition(itos, vtos);
1834
1835  Label Ldispatch, Ldefault_case;
1836  Register Rlow_byte         = R3_ARG1,
1837           Rindex            = Rlow_byte,
1838           Rhigh_byte        = R4_ARG2,
1839           Rdef_offset_addr  = R5_ARG3, // is going to contain address of default offset
1840           Rscratch1         = R11_scratch1,
1841           Rscratch2         = R12_scratch2,
1842           Roffset           = R6_ARG4;
1843
1844  // Align bcp.
1845  __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1846  __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1847
1848  // Load lo & hi.
1849  __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1850  __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
1851
1852  // Check for default case (=index outside [low,high]).
1853  __ cmpw(CCR0, R17_tos, Rlow_byte);
1854  __ cmpw(CCR1, R17_tos, Rhigh_byte);
1855  __ blt(CCR0, Ldefault_case);
1856  __ bgt(CCR1, Ldefault_case);
1857
1858  // Lookup dispatch offset.
1859  __ sub(Rindex, R17_tos, Rlow_byte);
1860  __ extsw(Rindex, Rindex);
1861  __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
1862  __ sldi(Rindex, Rindex, LogBytesPerInt);
1863  __ addi(Rindex, Rindex, 3 * BytesPerInt);
1864#if defined(VM_LITTLE_ENDIAN)
1865  __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
1866  __ extsw(Roffset, Roffset);
1867#else
1868  __ lwax(Roffset, Rdef_offset_addr, Rindex);
1869#endif
1870  __ b(Ldispatch);
1871
1872  __ bind(Ldefault_case);
1873  __ profile_switch_default(Rhigh_byte, Rscratch1);
1874  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1875
1876  __ bind(Ldispatch);
1877
1878  __ add(R14_bcp, Roffset, R14_bcp);
1879  __ dispatch_next(vtos);
1880}
1881
1882void TemplateTable::lookupswitch() {
1883  transition(itos, itos);
1884  __ stop("lookupswitch bytecode should have been rewritten");
1885}
1886
1887// Table switch using linear search through cases.
1888// Bytecode stream format:
1889// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1890// Note: Everything is big-endian format here.
1891void TemplateTable::fast_linearswitch() {
1892  transition(itos, vtos);
1893
1894  Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
1895  Register Rcount           = R3_ARG1,
1896           Rcurrent_pair    = R4_ARG2,
1897           Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
1898           Roffset          = R31,     // Might need to survive C call.
1899           Rvalue           = R12_scratch2,
1900           Rscratch         = R11_scratch1,
1901           Rcmp_value       = R17_tos;
1902
1903  // Align bcp.
1904  __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1905  __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1906
1907  // Setup loop counter and limit.
1908  __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1909  __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
1910
1911  __ mtctr(Rcount);
1912  __ cmpwi(CCR0, Rcount, 0);
1913  __ bne(CCR0, Lloop_entry);
1914
1915  // Default case
1916  __ bind(Ldefault_case);
1917  __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1918  if (ProfileInterpreter) {
1919    __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
1920  }
1921  __ b(Lcontinue_execution);
1922
1923  // Next iteration
1924  __ bind(Lsearch_loop);
1925  __ bdz(Ldefault_case);
1926  __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
1927  __ bind(Lloop_entry);
1928  __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
1929  __ cmpw(CCR0, Rvalue, Rcmp_value);
1930  __ bne(CCR0, Lsearch_loop);
1931
1932  // Found, load offset.
1933  __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
1934  // Calculate case index and profile
1935  __ mfctr(Rcurrent_pair);
1936  if (ProfileInterpreter) {
1937    __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
1938    __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
1939  }
1940
1941  __ bind(Lcontinue_execution);
1942  __ add(R14_bcp, Roffset, R14_bcp);
1943  __ dispatch_next(vtos);
1944}
1945
1946// Table switch using binary search (value/offset pairs are ordered).
1947// Bytecode stream format:
1948// Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1949// Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
1950void TemplateTable::fast_binaryswitch() {
1951
1952  transition(itos, vtos);
1953  // Implementation using the following core algorithm: (copied from Intel)
1954  //
1955  // int binary_search(int key, LookupswitchPair* array, int n) {
1956  //   // Binary search according to "Methodik des Programmierens" by
1957  //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1958  //   int i = 0;
1959  //   int j = n;
1960  //   while (i+1 < j) {
1961  //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1962  //     // with      Q: for all i: 0 <= i < n: key < a[i]
1963  //     // where a stands for the array and assuming that the (inexisting)
1964  //     // element a[n] is infinitely big.
1965  //     int h = (i + j) >> 1;
1966  //     // i < h < j
1967  //     if (key < array[h].fast_match()) {
1968  //       j = h;
1969  //     } else {
1970  //       i = h;
1971  //     }
1972  //   }
1973  //   // R: a[i] <= key < a[i+1] or Q
1974  //   // (i.e., if key is within array, i is the correct index)
1975  //   return i;
1976  // }
1977
1978  // register allocation
1979  const Register Rkey     = R17_tos;          // already set (tosca)
1980  const Register Rarray   = R3_ARG1;
1981  const Register Ri       = R4_ARG2;
1982  const Register Rj       = R5_ARG3;
1983  const Register Rh       = R6_ARG4;
1984  const Register Rscratch = R11_scratch1;
1985
1986  const int log_entry_size = 3;
1987  const int entry_size = 1 << log_entry_size;
1988
1989  Label found;
1990
1991  // Find Array start,
1992  __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
1993  __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
1994
1995  // initialize i & j
1996  __ li(Ri,0);
1997  __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
1998
1999  // and start.
2000  Label entry;
2001  __ b(entry);
2002
2003  // binary search loop
2004  { Label loop;
2005    __ bind(loop);
2006    // int h = (i + j) >> 1;
2007    __ srdi(Rh, Rh, 1);
2008    // if (key < array[h].fast_match()) {
2009    //   j = h;
2010    // } else {
2011    //   i = h;
2012    // }
2013    __ sldi(Rscratch, Rh, log_entry_size);
2014#if defined(VM_LITTLE_ENDIAN)
2015    __ lwbrx(Rscratch, Rscratch, Rarray);
2016#else
2017    __ lwzx(Rscratch, Rscratch, Rarray);
2018#endif
2019
2020    // if (key < current value)
2021    //   Rh = Rj
2022    // else
2023    //   Rh = Ri
2024    Label Lgreater;
2025    __ cmpw(CCR0, Rkey, Rscratch);
2026    __ bge(CCR0, Lgreater);
2027    __ mr(Rj, Rh);
2028    __ b(entry);
2029    __ bind(Lgreater);
2030    __ mr(Ri, Rh);
2031
2032    // while (i+1 < j)
2033    __ bind(entry);
2034    __ addi(Rscratch, Ri, 1);
2035    __ cmpw(CCR0, Rscratch, Rj);
2036    __ add(Rh, Ri, Rj); // start h = i + j >> 1;
2037
2038    __ blt(CCR0, loop);
2039  }
2040
2041  // End of binary search, result index is i (must check again!).
2042  Label default_case;
2043  Label continue_execution;
2044  if (ProfileInterpreter) {
2045    __ mr(Rh, Ri);              // Save index in i for profiling.
2046  }
2047  // Ri = value offset
2048  __ sldi(Ri, Ri, log_entry_size);
2049  __ add(Ri, Ri, Rarray);
2050  __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
2051
2052  Label not_found;
2053  // Ri = offset offset
2054  __ cmpw(CCR0, Rkey, Rscratch);
2055  __ beq(CCR0, not_found);
2056  // entry not found -> j = default offset
2057  __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
2058  __ b(default_case);
2059
2060  __ bind(not_found);
2061  // entry found -> j = offset
2062  __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2063  __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
2064
2065  if (ProfileInterpreter) {
2066    __ b(continue_execution);
2067  }
2068
2069  __ bind(default_case); // fall through (if not profiling)
2070  __ profile_switch_default(Ri, Rscratch);
2071
2072  __ bind(continue_execution);
2073
2074  __ extsw(Rj, Rj);
2075  __ add(R14_bcp, Rj, R14_bcp);
2076  __ dispatch_next(vtos);
2077}
2078
2079void TemplateTable::_return(TosState state) {
2080  transition(state, state);
2081  assert(_desc->calls_vm(),
2082         "inconsistent calls_vm information"); // call in remove_activation
2083
2084  if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2085
2086    Register Rscratch     = R11_scratch1,
2087             Rklass       = R12_scratch2,
2088             Rklass_flags = Rklass;
2089    Label Lskip_register_finalizer;
2090
2091    // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
2092    assert(state == vtos, "only valid state");
2093    __ ld(R17_tos, 0, R18_locals);
2094
2095    // Load klass of this obj.
2096    __ load_klass(Rklass, R17_tos);
2097    __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
2098    __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
2099    __ bfalse(CCR0, Lskip_register_finalizer);
2100
2101    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
2102
2103    __ align(32, 12);
2104    __ bind(Lskip_register_finalizer);
2105  }
2106
2107  // Move the result value into the correct register and remove memory stack frame.
2108  __ remove_activation(state, /* throw_monitor_exception */ true);
2109  // Restoration of lr done by remove_activation.
2110  switch (state) {
2111    case ltos:
2112    case btos:
2113    case ctos:
2114    case stos:
2115    case atos:
2116    case itos: __ mr(R3_RET, R17_tos); break;
2117    case ftos:
2118    case dtos: __ fmr(F1_RET, F15_ftos); break;
2119    case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
2120               // to get visible before the reference to the object gets stored anywhere.
2121               __ membar(Assembler::StoreStore); break;
2122    default  : ShouldNotReachHere();
2123  }
2124  __ blr();
2125}
2126
2127// ============================================================================
2128// Constant pool cache access
2129//
2130// Memory ordering:
2131//
2132// Like done in C++ interpreter, we load the fields
2133//   - _indices
2134//   - _f12_oop
2135// acquired, because these are asked if the cache is already resolved. We don't
2136// want to float loads above this check.
2137// See also comments in ConstantPoolCacheEntry::bytecode_1(),
2138// ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2139
2140// Call into the VM if call site is not yet resolved
2141//
2142// Input regs:
2143//   - None, all passed regs are outputs.
2144//
2145// Returns:
2146//   - Rcache:  The const pool cache entry that contains the resolved result.
2147//   - Rresult: Either noreg or output for f1/f2.
2148//
2149// Kills:
2150//   - Rscratch
2151void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2152
2153  __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2154  Label Lresolved, Ldone;
2155
2156  Bytecodes::Code code = bytecode();
2157  switch (code) {
2158  case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2159  case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2160  }
2161
2162  assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2163  // We are resolved if the indices offset contains the current bytecode.
2164#if defined(VM_LITTLE_ENDIAN)
2165  __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2166#else
2167  __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2168#endif
2169  // Acquire by cmp-br-isync (see below).
2170  __ cmpdi(CCR0, Rscratch, (int)code);
2171  __ beq(CCR0, Lresolved);
2172
2173  address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2174  __ li(R4_ARG2, code);
2175  __ call_VM(noreg, entry, R4_ARG2, true);
2176
2177  // Update registers with resolved info.
2178  __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2179  __ b(Ldone);
2180
2181  __ bind(Lresolved);
2182  __ isync(); // Order load wrt. succeeding loads.
2183  __ bind(Ldone);
2184}
2185
2186// Load the constant pool cache entry at field accesses into registers.
2187// The Rcache and Rindex registers must be set before call.
2188// Input:
2189//   - Rcache, Rindex
2190// Output:
2191//   - Robj, Roffset, Rflags
2192void TemplateTable::load_field_cp_cache_entry(Register Robj,
2193                                              Register Rcache,
2194                                              Register Rindex /* unused on PPC64 */,
2195                                              Register Roffset,
2196                                              Register Rflags,
2197                                              bool is_static = false) {
2198  assert_different_registers(Rcache, Rflags, Roffset);
2199  // assert(Rindex == noreg, "parameter not used on PPC64");
2200
2201  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2202  __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
2203  __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
2204  if (is_static) {
2205    __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
2206    __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
2207    // Acquire not needed here. Following access has an address dependency on this value.
2208  }
2209}
2210
2211// Load the constant pool cache entry at invokes into registers.
2212// Resolve if necessary.
2213
2214// Input Registers:
2215//   - None, bcp is used, though
2216//
2217// Return registers:
2218//   - Rmethod       (f1 field or f2 if invokevirtual)
2219//   - Ritable_index (f2 field)
2220//   - Rflags        (flags field)
2221//
2222// Kills:
2223//   - R21
2224//
2225void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2226                                               Register Rmethod,
2227                                               Register Ritable_index,
2228                                               Register Rflags,
2229                                               bool is_invokevirtual,
2230                                               bool is_invokevfinal,
2231                                               bool is_invokedynamic) {
2232
2233  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2234  // Determine constant pool cache field offsets.
2235  assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2236  const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
2237  const int flags_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
2238  // Access constant pool cache fields.
2239  const int index_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
2240
2241  Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
2242
2243  if (is_invokevfinal) {
2244    assert(Ritable_index == noreg, "register not used");
2245    // Already resolved.
2246    __ get_cache_and_index_at_bcp(Rcache, 1);
2247  } else {
2248    resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2249  }
2250
2251  __ ld(Rmethod, method_offset, Rcache);
2252  __ ld(Rflags, flags_offset, Rcache);
2253
2254  if (Ritable_index != noreg) {
2255    __ ld(Ritable_index, index_offset, Rcache);
2256  }
2257}
2258
2259// ============================================================================
2260// Field access
2261
2262// Volatile variables demand their effects be made known to all CPU's
2263// in order. Store buffers on most chips allow reads & writes to
2264// reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2265// without some kind of memory barrier (i.e., it's not sufficient that
2266// the interpreter does not reorder volatile references, the hardware
2267// also must not reorder them).
2268//
2269// According to the new Java Memory Model (JMM):
2270// (1) All volatiles are serialized wrt to each other. ALSO reads &
2271//     writes act as aquire & release, so:
2272// (2) A read cannot let unrelated NON-volatile memory refs that
2273//     happen after the read float up to before the read. It's OK for
2274//     non-volatile memory refs that happen before the volatile read to
2275//     float down below it.
2276// (3) Similar a volatile write cannot let unrelated NON-volatile
2277//     memory refs that happen BEFORE the write float down to after the
2278//     write. It's OK for non-volatile memory refs that happen after the
2279//     volatile write to float up before it.
2280//
2281// We only put in barriers around volatile refs (they are expensive),
2282// not _between_ memory refs (that would require us to track the
2283// flavor of the previous memory refs). Requirements (2) and (3)
2284// require some barriers before volatile stores and after volatile
2285// loads. These nearly cover requirement (1) but miss the
2286// volatile-store-volatile-load case.  This final case is placed after
2287// volatile-stores although it could just as well go before
2288// volatile-loads.
2289
2290// The registers cache and index expected to be set before call.
2291// Correct values of the cache and index registers are preserved.
2292// Kills:
2293//   Rcache (if has_tos)
2294//   Rscratch
2295void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
2296
2297  assert_different_registers(Rcache, Rscratch);
2298
2299  if (JvmtiExport::can_post_field_access()) {
2300    ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2301    Label Lno_field_access_post;
2302
2303    // Check if post field access in enabled.
2304    int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
2305    __ lwz(Rscratch, offs, Rscratch);
2306
2307    __ cmpwi(CCR0, Rscratch, 0);
2308    __ beq(CCR0, Lno_field_access_post);
2309
2310    // Post access enabled - do it!
2311    __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2312    if (is_static) {
2313      __ li(R17_tos, 0);
2314    } else {
2315      if (has_tos) {
2316        // The fast bytecode versions have obj ptr in register.
2317        // Thus, save object pointer before call_VM() clobbers it
2318        // put object on tos where GC wants it.
2319        __ push_ptr(R17_tos);
2320      } else {
2321        // Load top of stack (do not pop the value off the stack).
2322        __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
2323      }
2324      __ verify_oop(R17_tos);
2325    }
2326    // tos:   object pointer or NULL if static
2327    // cache: cache entry pointer
2328    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
2329    if (!is_static && has_tos) {
2330      // Restore object pointer.
2331      __ pop_ptr(R17_tos);
2332      __ verify_oop(R17_tos);
2333    } else {
2334      // Cache is still needed to get class or obj.
2335      __ get_cache_and_index_at_bcp(Rcache, 1);
2336    }
2337
2338    __ align(32, 12);
2339    __ bind(Lno_field_access_post);
2340  }
2341}
2342
2343// kills R11_scratch1
2344void TemplateTable::pop_and_check_object(Register Roop) {
2345  Register Rtmp = R11_scratch1;
2346
2347  assert_different_registers(Rtmp, Roop);
2348  __ pop_ptr(Roop);
2349  // For field access must check obj.
2350  __ null_check_throw(Roop, -1, Rtmp);
2351  __ verify_oop(Roop);
2352}
2353
2354// PPC64: implement volatile loads as fence-store-acquire.
2355void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2356  transition(vtos, vtos);
2357
2358  Label Lacquire, Lisync;
2359
2360  const Register Rcache        = R3_ARG1,
2361                 Rclass_or_obj = R22_tmp2,
2362                 Roffset       = R23_tmp3,
2363                 Rflags        = R31,
2364                 Rbtable       = R5_ARG3,
2365                 Rbc           = R6_ARG4,
2366                 Rscratch      = R12_scratch2;
2367
2368  static address field_branch_table[number_of_states],
2369                 static_branch_table[number_of_states];
2370
2371  address* branch_table = (is_static || rc == may_not_rewrite) ? static_branch_table : field_branch_table;
2372
2373  // Get field offset.
2374  resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2375
2376  // JVMTI support
2377  jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2378
2379  // Load after possible GC.
2380  load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2381
2382  // Load pointer to branch table.
2383  __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2384
2385  // Get volatile flag.
2386  __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2387  // Note: sync is needed before volatile load on PPC64.
2388
2389  // Check field type.
2390  __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2391
2392#ifdef ASSERT
2393  Label LFlagInvalid;
2394  __ cmpldi(CCR0, Rflags, number_of_states);
2395  __ bge(CCR0, LFlagInvalid);
2396#endif
2397
2398  // Load from branch table and dispatch (volatile case: one instruction ahead).
2399  __ sldi(Rflags, Rflags, LogBytesPerWord);
2400  __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2401  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2402    __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2403  }
2404  __ ldx(Rbtable, Rbtable, Rflags);
2405
2406  // Get the obj from stack.
2407  if (!is_static) {
2408    pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2409  } else {
2410    __ verify_oop(Rclass_or_obj);
2411  }
2412
2413  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2414    __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2415  }
2416  __ mtctr(Rbtable);
2417  __ bctr();
2418
2419#ifdef ASSERT
2420  __ bind(LFlagInvalid);
2421  __ stop("got invalid flag", 0x654);
2422#endif
2423
2424  if (!is_static && rc == may_not_rewrite) {
2425    // We reuse the code from is_static.  It's jumped to via the table above.
2426    return;
2427  }
2428
2429#ifdef ASSERT
2430  // __ bind(Lvtos);
2431  address pc_before_fence = __ pc();
2432  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2433  assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2434  assert(branch_table[vtos] == 0, "can't compute twice");
2435  branch_table[vtos] = __ pc(); // non-volatile_entry point
2436  __ stop("vtos unexpected", 0x655);
2437#endif
2438
2439  __ align(32, 28, 28); // Align load.
2440  // __ bind(Ldtos);
2441  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2442  assert(branch_table[dtos] == 0, "can't compute twice");
2443  branch_table[dtos] = __ pc(); // non-volatile_entry point
2444  __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2445  __ push(dtos);
2446  if (!is_static && rc == may_rewrite) {
2447    patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2448  }
2449  {
2450    Label acquire_double;
2451    __ beq(CCR6, acquire_double); // Volatile?
2452    __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2453
2454    __ bind(acquire_double);
2455    __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2456    __ beq_predict_taken(CCR0, Lisync);
2457    __ b(Lisync); // In case of NAN.
2458  }
2459
2460  __ align(32, 28, 28); // Align load.
2461  // __ bind(Lftos);
2462  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2463  assert(branch_table[ftos] == 0, "can't compute twice");
2464  branch_table[ftos] = __ pc(); // non-volatile_entry point
2465  __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2466  __ push(ftos);
2467  if (!is_static && rc == may_rewrite) {
2468    patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch);
2469  }
2470  {
2471    Label acquire_float;
2472    __ beq(CCR6, acquire_float); // Volatile?
2473    __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2474
2475    __ bind(acquire_float);
2476    __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2477    __ beq_predict_taken(CCR0, Lisync);
2478    __ b(Lisync); // In case of NAN.
2479  }
2480
2481  __ align(32, 28, 28); // Align load.
2482  // __ bind(Litos);
2483  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2484  assert(branch_table[itos] == 0, "can't compute twice");
2485  branch_table[itos] = __ pc(); // non-volatile_entry point
2486  __ lwax(R17_tos, Rclass_or_obj, Roffset);
2487  __ push(itos);
2488  if (!is_static && rc == may_rewrite) {
2489    patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2490  }
2491  __ beq(CCR6, Lacquire); // Volatile?
2492  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2493
2494  __ align(32, 28, 28); // Align load.
2495  // __ bind(Lltos);
2496  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2497  assert(branch_table[ltos] == 0, "can't compute twice");
2498  branch_table[ltos] = __ pc(); // non-volatile_entry point
2499  __ ldx(R17_tos, Rclass_or_obj, Roffset);
2500  __ push(ltos);
2501  if (!is_static && rc == may_rewrite) {
2502    patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2503  }
2504  __ beq(CCR6, Lacquire); // Volatile?
2505  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2506
2507  __ align(32, 28, 28); // Align load.
2508  // __ bind(Lbtos);
2509  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2510  assert(branch_table[btos] == 0, "can't compute twice");
2511  branch_table[btos] = __ pc(); // non-volatile_entry point
2512  __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2513  __ extsb(R17_tos, R17_tos);
2514  __ push(btos);
2515  if (!is_static && rc == may_rewrite) {
2516    patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2517  }
2518  __ beq(CCR6, Lacquire); // Volatile?
2519  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2520
2521  __ align(32, 28, 28); // Align load.
2522  // __ bind(Lctos);
2523  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2524  assert(branch_table[ctos] == 0, "can't compute twice");
2525  branch_table[ctos] = __ pc(); // non-volatile_entry point
2526  __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2527  __ push(ctos);
2528  if (!is_static && rc == may_rewrite) {
2529    patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2530  }
2531  __ beq(CCR6, Lacquire); // Volatile?
2532  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2533
2534  __ align(32, 28, 28); // Align load.
2535  // __ bind(Lstos);
2536  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2537  assert(branch_table[stos] == 0, "can't compute twice");
2538  branch_table[stos] = __ pc(); // non-volatile_entry point
2539  __ lhax(R17_tos, Rclass_or_obj, Roffset);
2540  __ push(stos);
2541  if (!is_static && rc == may_rewrite) {
2542    patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2543  }
2544  __ beq(CCR6, Lacquire); // Volatile?
2545  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2546
2547  __ align(32, 28, 28); // Align load.
2548  // __ bind(Latos);
2549  __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2550  assert(branch_table[atos] == 0, "can't compute twice");
2551  branch_table[atos] = __ pc(); // non-volatile_entry point
2552  __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2553  __ verify_oop(R17_tos);
2554  __ push(atos);
2555  //__ dcbt(R17_tos); // prefetch
2556  if (!is_static && rc == may_rewrite) {
2557    patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2558  }
2559  __ beq(CCR6, Lacquire); // Volatile?
2560  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2561
2562  __ align(32, 12);
2563  __ bind(Lacquire);
2564  __ twi_0(R17_tos);
2565  __ bind(Lisync);
2566  __ isync(); // acquire
2567
2568#ifdef ASSERT
2569  for (int i = 0; i<number_of_states; ++i) {
2570    assert(branch_table[i], "get initialization");
2571    //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2572    //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2573  }
2574#endif
2575}
2576
2577void TemplateTable::getfield(int byte_no) {
2578  getfield_or_static(byte_no, false);
2579}
2580
2581void TemplateTable::nofast_getfield(int byte_no) {
2582  getfield_or_static(byte_no, false, may_not_rewrite);
2583}
2584
2585void TemplateTable::getstatic(int byte_no) {
2586  getfield_or_static(byte_no, true);
2587}
2588
2589// The registers cache and index expected to be set before call.
2590// The function may destroy various registers, just not the cache and index registers.
2591void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2592
2593  assert_different_registers(Rcache, Rscratch, R6_ARG4);
2594
2595  if (JvmtiExport::can_post_field_modification()) {
2596    Label Lno_field_mod_post;
2597
2598    // Check if post field access in enabled.
2599    int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2600    __ lwz(Rscratch, offs, Rscratch);
2601
2602    __ cmpwi(CCR0, Rscratch, 0);
2603    __ beq(CCR0, Lno_field_mod_post);
2604
2605    // Do the post
2606    ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2607    const Register Robj = Rscratch;
2608
2609    __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2610    if (is_static) {
2611      // Life is simple. Null out the object pointer.
2612      __ li(Robj, 0);
2613    } else {
2614      // In case of the fast versions, value lives in registers => put it back on tos.
2615      int offs = Interpreter::expr_offset_in_bytes(0);
2616      Register base = R15_esp;
2617      switch(bytecode()) {
2618        case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
2619        case Bytecodes::_fast_iputfield: // Fall through
2620        case Bytecodes::_fast_bputfield: // Fall through
2621        case Bytecodes::_fast_cputfield: // Fall through
2622        case Bytecodes::_fast_sputfield: __ push_i(); offs+=  Interpreter::stackElementSize; break;
2623        case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
2624        case Bytecodes::_fast_fputfield: __ push_f(); offs+=  Interpreter::stackElementSize; break;
2625        case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
2626        default: {
2627          offs = 0;
2628          base = Robj;
2629          const Register Rflags = Robj;
2630          Label is_one_slot;
2631          // Life is harder. The stack holds the value on top, followed by the
2632          // object. We don't know the size of the value, though; it could be
2633          // one or two words depending on its type. As a result, we must find
2634          // the type to determine where the object is.
2635          __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
2636          __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2637
2638          __ cmpwi(CCR0, Rflags, ltos);
2639          __ cmpwi(CCR1, Rflags, dtos);
2640          __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
2641          __ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal);
2642          __ beq(CCR0, is_one_slot);
2643          __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
2644          __ bind(is_one_slot);
2645          break;
2646        }
2647      }
2648      __ ld(Robj, offs, base);
2649      __ verify_oop(Robj);
2650    }
2651
2652    __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
2653    __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
2654    __ get_cache_and_index_at_bcp(Rcache, 1);
2655
2656    // In case of the fast versions, value lives in registers => put it back on tos.
2657    switch(bytecode()) {
2658      case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2659      case Bytecodes::_fast_iputfield: // Fall through
2660      case Bytecodes::_fast_bputfield: // Fall through
2661      case Bytecodes::_fast_cputfield: // Fall through
2662      case Bytecodes::_fast_sputfield: __ pop_i(); break;
2663      case Bytecodes::_fast_lputfield: __ pop_l(); break;
2664      case Bytecodes::_fast_fputfield: __ pop_f(); break;
2665      case Bytecodes::_fast_dputfield: __ pop_d(); break;
2666      default: break; // Nothin' to do.
2667    }
2668
2669    __ align(32, 12);
2670    __ bind(Lno_field_mod_post);
2671  }
2672}
2673
2674// PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2675void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2676  Label Lvolatile;
2677
2678  const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2679                 Rclass_or_obj = R31,      // Needs to survive C call.
2680                 Roffset       = R22_tmp2, // Needs to survive C call.
2681                 Rflags        = R3_ARG1,
2682                 Rbtable       = R4_ARG2,
2683                 Rscratch      = R11_scratch1,
2684                 Rscratch2     = R12_scratch2,
2685                 Rscratch3     = R6_ARG4,
2686                 Rbc           = Rscratch3;
2687  const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2688
2689  static address field_rw_branch_table[number_of_states],
2690                 field_norw_branch_table[number_of_states],
2691                 static_branch_table[number_of_states];
2692
2693  address* branch_table = is_static ? static_branch_table :
2694    (rc == may_rewrite ? field_rw_branch_table : field_norw_branch_table);
2695
2696  // Stack (grows up):
2697  //  value
2698  //  obj
2699
2700  // Load the field offset.
2701  resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2702  jvmti_post_field_mod(Rcache, Rscratch, is_static);
2703  load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2704
2705  // Load pointer to branch table.
2706  __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2707
2708  // Get volatile flag.
2709  __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2710
2711  // Check the field type.
2712  __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2713
2714#ifdef ASSERT
2715  Label LFlagInvalid;
2716  __ cmpldi(CCR0, Rflags, number_of_states);
2717  __ bge(CCR0, LFlagInvalid);
2718#endif
2719
2720  // Load from branch table and dispatch (volatile case: one instruction ahead).
2721  __ sldi(Rflags, Rflags, LogBytesPerWord);
2722  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2723    __ cmpwi(CR_is_vol, Rscratch, 1);  // Volatile?
2724  }
2725  __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2726  __ ldx(Rbtable, Rbtable, Rflags);
2727
2728  __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2729  __ mtctr(Rbtable);
2730  __ bctr();
2731
2732#ifdef ASSERT
2733  __ bind(LFlagInvalid);
2734  __ stop("got invalid flag", 0x656);
2735
2736  // __ bind(Lvtos);
2737  address pc_before_release = __ pc();
2738  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2739  assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2740  assert(branch_table[vtos] == 0, "can't compute twice");
2741  branch_table[vtos] = __ pc(); // non-volatile_entry point
2742  __ stop("vtos unexpected", 0x657);
2743#endif
2744
2745  __ align(32, 28, 28); // Align pop.
2746  // __ bind(Ldtos);
2747  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2748  assert(branch_table[dtos] == 0, "can't compute twice");
2749  branch_table[dtos] = __ pc(); // non-volatile_entry point
2750  __ pop(dtos);
2751  if (!is_static) {
2752    pop_and_check_object(Rclass_or_obj);  // Kills R11_scratch1.
2753  }
2754  __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2755  if (!is_static && rc == may_rewrite) {
2756    patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no);
2757  }
2758  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2759    __ beq(CR_is_vol, Lvolatile); // Volatile?
2760  }
2761  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2762
2763  __ align(32, 28, 28); // Align pop.
2764  // __ bind(Lftos);
2765  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2766  assert(branch_table[ftos] == 0, "can't compute twice");
2767  branch_table[ftos] = __ pc(); // non-volatile_entry point
2768  __ pop(ftos);
2769  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2770  __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2771  if (!is_static && rc == may_rewrite) {
2772    patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no);
2773  }
2774  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2775    __ beq(CR_is_vol, Lvolatile); // Volatile?
2776  }
2777  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2778
2779  __ align(32, 28, 28); // Align pop.
2780  // __ bind(Litos);
2781  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2782  assert(branch_table[itos] == 0, "can't compute twice");
2783  branch_table[itos] = __ pc(); // non-volatile_entry point
2784  __ pop(itos);
2785  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2786  __ stwx(R17_tos, Rclass_or_obj, Roffset);
2787  if (!is_static && rc == may_rewrite) {
2788    patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no);
2789  }
2790  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2791    __ beq(CR_is_vol, Lvolatile); // Volatile?
2792  }
2793  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2794
2795  __ align(32, 28, 28); // Align pop.
2796  // __ bind(Lltos);
2797  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2798  assert(branch_table[ltos] == 0, "can't compute twice");
2799  branch_table[ltos] = __ pc(); // non-volatile_entry point
2800  __ pop(ltos);
2801  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2802  __ stdx(R17_tos, Rclass_or_obj, Roffset);
2803  if (!is_static && rc == may_rewrite) {
2804    patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no);
2805  }
2806  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2807    __ beq(CR_is_vol, Lvolatile); // Volatile?
2808  }
2809  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2810
2811  __ align(32, 28, 28); // Align pop.
2812  // __ bind(Lbtos);
2813  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2814  assert(branch_table[btos] == 0, "can't compute twice");
2815  branch_table[btos] = __ pc(); // non-volatile_entry point
2816  __ pop(btos);
2817  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2818  __ stbx(R17_tos, Rclass_or_obj, Roffset);
2819  if (!is_static && rc == may_rewrite) {
2820    patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no);
2821  }
2822  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2823    __ beq(CR_is_vol, Lvolatile); // Volatile?
2824  }
2825  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2826
2827  __ align(32, 28, 28); // Align pop.
2828  // __ bind(Lctos);
2829  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2830  assert(branch_table[ctos] == 0, "can't compute twice");
2831  branch_table[ctos] = __ pc(); // non-volatile_entry point
2832  __ pop(ctos);
2833  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2834  __ sthx(R17_tos, Rclass_or_obj, Roffset);
2835  if (!is_static && rc == may_rewrite) {
2836    patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no);
2837  }
2838  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2839    __ beq(CR_is_vol, Lvolatile); // Volatile?
2840  }
2841  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2842
2843  __ align(32, 28, 28); // Align pop.
2844  // __ bind(Lstos);
2845  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2846  assert(branch_table[stos] == 0, "can't compute twice");
2847  branch_table[stos] = __ pc(); // non-volatile_entry point
2848  __ pop(stos);
2849  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2850  __ sthx(R17_tos, Rclass_or_obj, Roffset);
2851  if (!is_static && rc == may_rewrite) {
2852    patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no);
2853  }
2854  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2855    __ beq(CR_is_vol, Lvolatile); // Volatile?
2856  }
2857  __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2858
2859  __ align(32, 28, 28); // Align pop.
2860  // __ bind(Latos);
2861  __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2862  assert(branch_table[atos] == 0, "can't compute twice");
2863  branch_table[atos] = __ pc(); // non-volatile_entry point
2864  __ pop(atos);
2865  if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2866  do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2867  if (!is_static && rc == may_rewrite) {
2868    patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no);
2869  }
2870  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2871    __ beq(CR_is_vol, Lvolatile); // Volatile?
2872    __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2873
2874    __ align(32, 12);
2875    __ bind(Lvolatile);
2876    __ fence();
2877  }
2878  // fallthru: __ b(Lexit);
2879
2880#ifdef ASSERT
2881  for (int i = 0; i<number_of_states; ++i) {
2882    assert(branch_table[i], "put initialization");
2883    //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2884    //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2885  }
2886#endif
2887}
2888
2889void TemplateTable::putfield(int byte_no) {
2890  putfield_or_static(byte_no, false);
2891}
2892
2893void TemplateTable::nofast_putfield(int byte_no) {
2894  putfield_or_static(byte_no, false, may_not_rewrite);
2895}
2896
2897void TemplateTable::putstatic(int byte_no) {
2898  putfield_or_static(byte_no, true);
2899}
2900
2901// See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2902void TemplateTable::jvmti_post_fast_field_mod() {
2903  __ should_not_reach_here();
2904}
2905
2906void TemplateTable::fast_storefield(TosState state) {
2907  transition(state, vtos);
2908
2909  const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2910                 Rclass_or_obj = R31,      // Needs to survive C call.
2911                 Roffset       = R22_tmp2, // Needs to survive C call.
2912                 Rflags        = R3_ARG1,
2913                 Rscratch      = R11_scratch1,
2914                 Rscratch2     = R12_scratch2,
2915                 Rscratch3     = R4_ARG2;
2916  const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2917
2918  // Constant pool already resolved => Load flags and offset of field.
2919  __ get_cache_and_index_at_bcp(Rcache, 1);
2920  jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
2921  load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2922
2923  // Get the obj and the final store addr.
2924  pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2925
2926  // Get volatile flag.
2927  __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2928  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
2929  {
2930    Label LnotVolatile;
2931    __ beq(CCR0, LnotVolatile);
2932    __ release();
2933    __ align(32, 12);
2934    __ bind(LnotVolatile);
2935  }
2936
2937  // Do the store and fencing.
2938  switch(bytecode()) {
2939    case Bytecodes::_fast_aputfield:
2940      // Store into the field.
2941      do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2942      break;
2943
2944    case Bytecodes::_fast_iputfield:
2945      __ stwx(R17_tos, Rclass_or_obj, Roffset);
2946      break;
2947
2948    case Bytecodes::_fast_lputfield:
2949      __ stdx(R17_tos, Rclass_or_obj, Roffset);
2950      break;
2951
2952    case Bytecodes::_fast_bputfield:
2953      __ stbx(R17_tos, Rclass_or_obj, Roffset);
2954      break;
2955
2956    case Bytecodes::_fast_cputfield:
2957    case Bytecodes::_fast_sputfield:
2958      __ sthx(R17_tos, Rclass_or_obj, Roffset);
2959      break;
2960
2961    case Bytecodes::_fast_fputfield:
2962      __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2963      break;
2964
2965    case Bytecodes::_fast_dputfield:
2966      __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2967      break;
2968
2969    default: ShouldNotReachHere();
2970  }
2971
2972  if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2973    Label LVolatile;
2974    __ beq(CR_is_vol, LVolatile);
2975    __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2976
2977    __ align(32, 12);
2978    __ bind(LVolatile);
2979    __ fence();
2980  }
2981}
2982
2983void TemplateTable::fast_accessfield(TosState state) {
2984  transition(atos, state);
2985
2986  Label LisVolatile;
2987  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2988
2989  const Register Rcache        = R3_ARG1,
2990                 Rclass_or_obj = R17_tos,
2991                 Roffset       = R22_tmp2,
2992                 Rflags        = R23_tmp3,
2993                 Rscratch      = R12_scratch2;
2994
2995  // Constant pool already resolved. Get the field offset.
2996  __ get_cache_and_index_at_bcp(Rcache, 1);
2997  load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2998
2999  // JVMTI support
3000  jvmti_post_field_access(Rcache, Rscratch, false, true);
3001
3002  // Get the load address.
3003  __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3004
3005  // Get volatile flag.
3006  __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3007  __ bne(CCR0, LisVolatile);
3008
3009  switch(bytecode()) {
3010    case Bytecodes::_fast_agetfield:
3011    {
3012      __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3013      __ verify_oop(R17_tos);
3014      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3015
3016      __ bind(LisVolatile);
3017      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3018      __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3019      __ verify_oop(R17_tos);
3020      __ twi_0(R17_tos);
3021      __ isync();
3022      break;
3023    }
3024    case Bytecodes::_fast_igetfield:
3025    {
3026      __ lwax(R17_tos, Rclass_or_obj, Roffset);
3027      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3028
3029      __ bind(LisVolatile);
3030      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3031      __ lwax(R17_tos, Rclass_or_obj, Roffset);
3032      __ twi_0(R17_tos);
3033      __ isync();
3034      break;
3035    }
3036    case Bytecodes::_fast_lgetfield:
3037    {
3038      __ ldx(R17_tos, Rclass_or_obj, Roffset);
3039      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3040
3041      __ bind(LisVolatile);
3042      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3043      __ ldx(R17_tos, Rclass_or_obj, Roffset);
3044      __ twi_0(R17_tos);
3045      __ isync();
3046      break;
3047    }
3048    case Bytecodes::_fast_bgetfield:
3049    {
3050      __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3051      __ extsb(R17_tos, R17_tos);
3052      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3053
3054      __ bind(LisVolatile);
3055      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3056      __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3057      __ twi_0(R17_tos);
3058      __ extsb(R17_tos, R17_tos);
3059      __ isync();
3060      break;
3061    }
3062    case Bytecodes::_fast_cgetfield:
3063    {
3064      __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3065      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3066
3067      __ bind(LisVolatile);
3068      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3069      __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3070      __ twi_0(R17_tos);
3071      __ isync();
3072      break;
3073    }
3074    case Bytecodes::_fast_sgetfield:
3075    {
3076      __ lhax(R17_tos, Rclass_or_obj, Roffset);
3077      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3078
3079      __ bind(LisVolatile);
3080      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3081      __ lhax(R17_tos, Rclass_or_obj, Roffset);
3082      __ twi_0(R17_tos);
3083      __ isync();
3084      break;
3085    }
3086    case Bytecodes::_fast_fgetfield:
3087    {
3088      __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3089      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3090
3091      __ bind(LisVolatile);
3092      Label Ldummy;
3093      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3094      __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3095      __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3096      __ bne_predict_not_taken(CCR0, Ldummy);
3097      __ bind(Ldummy);
3098      __ isync();
3099      break;
3100    }
3101    case Bytecodes::_fast_dgetfield:
3102    {
3103      __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3104      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3105
3106      __ bind(LisVolatile);
3107      Label Ldummy;
3108      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3109      __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3110      __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3111      __ bne_predict_not_taken(CCR0, Ldummy);
3112      __ bind(Ldummy);
3113      __ isync();
3114      break;
3115    }
3116    default: ShouldNotReachHere();
3117  }
3118}
3119
3120void TemplateTable::fast_xaccess(TosState state) {
3121  transition(vtos, state);
3122
3123  Label LisVolatile;
3124  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3125  const Register Rcache        = R3_ARG1,
3126                 Rclass_or_obj = R17_tos,
3127                 Roffset       = R22_tmp2,
3128                 Rflags        = R23_tmp3,
3129                 Rscratch      = R12_scratch2;
3130
3131  __ ld(Rclass_or_obj, 0, R18_locals);
3132
3133  // Constant pool already resolved. Get the field offset.
3134  __ get_cache_and_index_at_bcp(Rcache, 2);
3135  load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3136
3137  // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3138
3139  // Needed to report exception at the correct bcp.
3140  __ addi(R14_bcp, R14_bcp, 1);
3141
3142  // Get the load address.
3143  __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3144
3145  // Get volatile flag.
3146  __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3147  __ bne(CCR0, LisVolatile);
3148
3149  switch(state) {
3150  case atos:
3151    {
3152      __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3153      __ verify_oop(R17_tos);
3154      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3155
3156      __ bind(LisVolatile);
3157      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3158      __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3159      __ verify_oop(R17_tos);
3160      __ twi_0(R17_tos);
3161      __ isync();
3162      break;
3163    }
3164  case itos:
3165    {
3166      __ lwax(R17_tos, Rclass_or_obj, Roffset);
3167      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3168
3169      __ bind(LisVolatile);
3170      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3171      __ lwax(R17_tos, Rclass_or_obj, Roffset);
3172      __ twi_0(R17_tos);
3173      __ isync();
3174      break;
3175    }
3176  case ftos:
3177    {
3178      __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3179      __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3180
3181      __ bind(LisVolatile);
3182      Label Ldummy;
3183      if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3184      __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3185      __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3186      __ bne_predict_not_taken(CCR0, Ldummy);
3187      __ bind(Ldummy);
3188      __ isync();
3189      break;
3190    }
3191  default: ShouldNotReachHere();
3192  }
3193  __ addi(R14_bcp, R14_bcp, -1);
3194}
3195
3196// ============================================================================
3197// Calls
3198
3199// Common code for invoke
3200//
3201// Input:
3202//   - byte_no
3203//
3204// Output:
3205//   - Rmethod:        The method to invoke next.
3206//   - Rret_addr:      The return address to return to.
3207//   - Rindex:         MethodType (invokehandle) or CallSite obj (invokedynamic)
3208//   - Rrecv:          Cache for "this" pointer, might be noreg if static call.
3209//   - Rflags:         Method flags from const pool cache.
3210//
3211//  Kills:
3212//   - Rscratch1
3213//
3214void TemplateTable::prepare_invoke(int byte_no,
3215                                   Register Rmethod,  // linked method (or i-klass)
3216                                   Register Rret_addr,// return address
3217                                   Register Rindex,   // itable index, MethodType, etc.
3218                                   Register Rrecv,    // If caller wants to see it.
3219                                   Register Rflags,   // If caller wants to test it.
3220                                   Register Rscratch
3221                                   ) {
3222  // Determine flags.
3223  const Bytecodes::Code code = bytecode();
3224  const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3225  const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3226  const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3227  const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3228  const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3229  const bool load_receiver       = (Rrecv != noreg);
3230  assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3231
3232  assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
3233  assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
3234  assert_different_registers(Rret_addr, Rscratch);
3235
3236  load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
3237
3238  // Saving of SP done in call_from_interpreter.
3239
3240  // Maybe push "appendix" to arguments.
3241  if (is_invokedynamic || is_invokehandle) {
3242    Label Ldone;
3243    __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
3244    __ beq(CCR0, Ldone);
3245    // Push "appendix" (MethodType, CallSite, etc.).
3246    // This must be done before we get the receiver,
3247    // since the parameter_size includes it.
3248    __ load_resolved_reference_at_index(Rscratch, Rindex);
3249    __ verify_oop(Rscratch);
3250    __ push_ptr(Rscratch);
3251    __ bind(Ldone);
3252  }
3253
3254  // Load receiver if needed (after appendix is pushed so parameter size is correct).
3255  if (load_receiver) {
3256    const Register Rparam_count = Rscratch;
3257    __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
3258    __ load_receiver(Rparam_count, Rrecv);
3259    __ verify_oop(Rrecv);
3260  }
3261
3262  // Get return address.
3263  {
3264    Register Rtable_addr = Rscratch;
3265    Register Rret_type = Rret_addr;
3266    address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3267
3268    // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3269    __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3270    __ load_dispatch_table(Rtable_addr, (address*)table_addr);
3271    __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3272    // Get return address.
3273    __ ldx(Rret_addr, Rtable_addr, Rret_type);
3274  }
3275}
3276
3277// Helper for virtual calls. Load target out of vtable and jump off!
3278// Kills all passed registers.
3279void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
3280
3281  assert_different_registers(Rrecv_klass, Rtemp, Rret);
3282  const Register Rtarget_method = Rindex;
3283
3284  // Get target method & entry point.
3285  const int base = InstanceKlass::vtable_start_offset() * wordSize;
3286  // Calc vtable addr scale the vtable index by 8.
3287  __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
3288  // Load target.
3289  __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
3290  __ ldx(Rtarget_method, Rindex, Rrecv_klass);
3291  // Argument and return type profiling.
3292  __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
3293  __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
3294}
3295
3296// Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
3297void TemplateTable::invokevirtual(int byte_no) {
3298  transition(vtos, vtos);
3299
3300  Register Rtable_addr = R11_scratch1,
3301           Rret_type = R12_scratch2,
3302           Rret_addr = R5_ARG3,
3303           Rflags = R22_tmp2, // Should survive C call.
3304           Rrecv = R3_ARG1,
3305           Rrecv_klass = Rrecv,
3306           Rvtableindex_or_method = R31, // Should survive C call.
3307           Rnum_params = R4_ARG2,
3308           Rnew_bc = R6_ARG4;
3309
3310  Label LnotFinal;
3311
3312  load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3313
3314  __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3315  __ bfalse(CCR0, LnotFinal);
3316
3317  if (RewriteBytecodes && !UseSharedSpaces) {
3318    patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3319  }
3320  invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3321
3322  __ align(32, 12);
3323  __ bind(LnotFinal);
3324  // Load "this" pointer (receiver).
3325  __ rldicl(Rnum_params, Rflags, 64, 48);
3326  __ load_receiver(Rnum_params, Rrecv);
3327  __ verify_oop(Rrecv);
3328
3329  // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3330  __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3331  __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3332  __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3333  __ ldx(Rret_addr, Rret_type, Rtable_addr);
3334  __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3335  __ load_klass(Rrecv_klass, Rrecv);
3336  __ verify_klass_ptr(Rrecv_klass);
3337  __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3338
3339  generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
3340}
3341
3342void TemplateTable::fast_invokevfinal(int byte_no) {
3343  transition(vtos, vtos);
3344
3345  assert(byte_no == f2_byte, "use this argument");
3346  Register Rflags  = R22_tmp2,
3347           Rmethod = R31;
3348  load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
3349  invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
3350}
3351
3352void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
3353
3354  assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
3355
3356  // Load receiver from stack slot.
3357  Register Rrecv = Rscratch2;
3358  Register Rnum_params = Rrecv;
3359
3360  __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
3361  __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
3362
3363  // Get return address.
3364  Register Rtable_addr = Rscratch1,
3365           Rret_addr   = Rflags,
3366           Rret_type   = Rret_addr;
3367  // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3368  __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3369  __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3370  __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3371  __ ldx(Rret_addr, Rret_type, Rtable_addr);
3372
3373  // Load receiver and receiver NULL check.
3374  __ load_receiver(Rnum_params, Rrecv);
3375  __ null_check_throw(Rrecv, -1, Rscratch1);
3376
3377  __ profile_final_call(Rrecv, Rscratch1);
3378  // Argument and return type profiling.
3379  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3380
3381  // Do the call.
3382  __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
3383}
3384
3385void TemplateTable::invokespecial(int byte_no) {
3386  assert(byte_no == f1_byte, "use this argument");
3387  transition(vtos, vtos);
3388
3389  Register Rtable_addr = R3_ARG1,
3390           Rret_addr   = R4_ARG2,
3391           Rflags      = R5_ARG3,
3392           Rreceiver   = R6_ARG4,
3393           Rmethod     = R31;
3394
3395  prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
3396
3397  // Receiver NULL check.
3398  __ null_check_throw(Rreceiver, -1, R11_scratch1);
3399
3400  __ profile_call(R11_scratch1, R12_scratch2);
3401  // Argument and return type profiling.
3402  __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
3403  __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
3404}
3405
3406void TemplateTable::invokestatic(int byte_no) {
3407  assert(byte_no == f1_byte, "use this argument");
3408  transition(vtos, vtos);
3409
3410  Register Rtable_addr = R3_ARG1,
3411           Rret_addr   = R4_ARG2,
3412           Rflags      = R5_ARG3;
3413
3414  prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
3415
3416  __ profile_call(R11_scratch1, R12_scratch2);
3417  // Argument and return type profiling.
3418  __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
3419  __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
3420}
3421
3422void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
3423                                                  Register Rret,
3424                                                  Register Rflags,
3425                                                  Register Rindex,
3426                                                  Register Rtemp1,
3427                                                  Register Rtemp2) {
3428
3429  assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
3430  Label LnotFinal;
3431
3432  // Check for vfinal.
3433  __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3434  __ bfalse(CCR0, LnotFinal);
3435
3436  Register Rscratch = Rflags; // Rflags is dead now.
3437
3438  // Final call case.
3439  __ profile_final_call(Rtemp1, Rscratch);
3440  // Argument and return type profiling.
3441  __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
3442  // Do the final call - the index (f2) contains the method.
3443  __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
3444
3445  // Non-final callc case.
3446  __ bind(LnotFinal);
3447  __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
3448  generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
3449}
3450
3451void TemplateTable::invokeinterface(int byte_no) {
3452  assert(byte_no == f1_byte, "use this argument");
3453  transition(vtos, vtos);
3454
3455  const Register Rscratch1        = R11_scratch1,
3456                 Rscratch2        = R12_scratch2,
3457                 Rscratch3        = R9_ARG7,
3458                 Rscratch4        = R10_ARG8,
3459                 Rtable_addr      = Rscratch2,
3460                 Rinterface_klass = R5_ARG3,
3461                 Rret_type        = R8_ARG6,
3462                 Rret_addr        = Rret_type,
3463                 Rindex           = R6_ARG4,
3464                 Rreceiver        = R4_ARG2,
3465                 Rrecv_klass      = Rreceiver,
3466                 Rflags           = R7_ARG5;
3467
3468  prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
3469
3470  // Get receiver klass.
3471  __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
3472  __ load_klass(Rrecv_klass, Rreceiver);
3473
3474  // Check corner case object method.
3475  Label LobjectMethod;
3476
3477  __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3478  __ btrue(CCR0, LobjectMethod);
3479
3480  // Fallthrough: The normal invokeinterface case.
3481  __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
3482
3483  // Find entry point to call.
3484  Label Lthrow_icc, Lthrow_ame;
3485  // Result will be returned in Rindex.
3486  __ mr(Rscratch4, Rrecv_klass);
3487  __ mr(Rscratch3, Rindex);
3488  __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
3489
3490  __ cmpdi(CCR0, Rindex, 0);
3491  __ beq(CCR0, Lthrow_ame);
3492  // Found entry. Jump off!
3493  // Argument and return type profiling.
3494  __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
3495  __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
3496
3497  // Vtable entry was NULL => Throw abstract method error.
3498  __ bind(Lthrow_ame);
3499  __ mr(Rrecv_klass, Rscratch4);
3500  __ mr(Rindex, Rscratch3);
3501  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3502
3503  // Interface was not found => Throw incompatible class change error.
3504  __ bind(Lthrow_icc);
3505  __ mr(Rrecv_klass, Rscratch4);
3506  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3507
3508  __ should_not_reach_here();
3509
3510  // Special case of invokeinterface called for virtual method of
3511  // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
3512  // The invokeinterface was rewritten to a invokevirtual, hence we have
3513  // to handle this corner case. This code isn't produced by javac, but could
3514  // be produced by another compliant java compiler.
3515  __ bind(LobjectMethod);
3516  invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
3517}
3518
3519void TemplateTable::invokedynamic(int byte_no) {
3520  transition(vtos, vtos);
3521
3522  const Register Rret_addr = R3_ARG1,
3523                 Rflags    = R4_ARG2,
3524                 Rmethod   = R22_tmp2,
3525                 Rscratch1 = R11_scratch1,
3526                 Rscratch2 = R12_scratch2;
3527
3528  prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
3529
3530  // Profile this call.
3531  __ profile_call(Rscratch1, Rscratch2);
3532
3533  // Off we go. With the new method handles, we don't jump to a method handle
3534  // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
3535  // to be the callsite object the bootstrap method returned. This is passed to a
3536  // "link" method which does the dispatch (Most likely just grabs the MH stored
3537  // inside the callsite and does an invokehandle).
3538  // Argument and return type profiling.
3539  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
3540  __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3541}
3542
3543void TemplateTable::invokehandle(int byte_no) {
3544  transition(vtos, vtos);
3545
3546  const Register Rret_addr = R3_ARG1,
3547                 Rflags    = R4_ARG2,
3548                 Rrecv     = R5_ARG3,
3549                 Rmethod   = R22_tmp2,
3550                 Rscratch1 = R11_scratch1,
3551                 Rscratch2 = R12_scratch2;
3552
3553  prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
3554  __ verify_method_ptr(Rmethod);
3555  __ null_check_throw(Rrecv, -1, Rscratch2);
3556
3557  __ profile_final_call(Rrecv, Rscratch1);
3558
3559  // Still no call from handle => We call the method handle interpreter here.
3560  // Argument and return type profiling.
3561  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
3562  __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3563}
3564
3565// =============================================================================
3566// Allocation
3567
3568// Puts allocated obj ref onto the expression stack.
3569void TemplateTable::_new() {
3570  transition(vtos, atos);
3571
3572  Label Lslow_case,
3573        Ldone,
3574        Linitialize_header,
3575        Lallocate_shared,
3576        Linitialize_object;  // Including clearing the fields.
3577
3578  const Register RallocatedObject = R17_tos,
3579                 RinstanceKlass   = R9_ARG7,
3580                 Rscratch         = R11_scratch1,
3581                 Roffset          = R8_ARG6,
3582                 Rinstance_size   = Roffset,
3583                 Rcpool           = R4_ARG2,
3584                 Rtags            = R3_ARG1,
3585                 Rindex           = R5_ARG3;
3586
3587  const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
3588
3589  // --------------------------------------------------------------------------
3590  // Check if fast case is possible.
3591
3592  // Load pointers to const pool and const pool's tags array.
3593  __ get_cpool_and_tags(Rcpool, Rtags);
3594  // Load index of constant pool entry.
3595  __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
3596
3597  if (UseTLAB) {
3598    // Make sure the class we're about to instantiate has been resolved
3599    // This is done before loading instanceKlass to be consistent with the order
3600    // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
3601    __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3602    __ lbzx(Rtags, Rindex, Rtags);
3603
3604    __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3605    __ bne(CCR0, Lslow_case);
3606
3607    // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
3608    __ sldi(Roffset, Rindex, LogBytesPerWord);
3609    __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
3610    __ isync(); // Order load of instance Klass wrt. tags.
3611    __ ldx(RinstanceKlass, Roffset, Rscratch);
3612
3613    // Make sure klass is fully initialized and get instance_size.
3614    __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
3615    __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
3616
3617    __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
3618    // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
3619    __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
3620
3621    __ crnand(CCR0, Assembler::equal, CCR1, Assembler::equal); // slow path bit set or not fully initialized?
3622    __ beq(CCR0, Lslow_case);
3623
3624    // --------------------------------------------------------------------------
3625    // Fast case:
3626    // Allocate the instance.
3627    // 1) Try to allocate in the TLAB.
3628    // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
3629    // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
3630
3631    Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
3632    Register RnewTopValue = R6_ARG4;
3633    Register RendValue    = R7_ARG5;
3634
3635    // Check if we can allocate in the TLAB.
3636    __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3637    __ ld(RendValue,    in_bytes(JavaThread::tlab_end_offset()), R16_thread);
3638
3639    __ add(RnewTopValue, Rinstance_size, RoldTopValue);
3640
3641    // If there is enough space, we do not CAS and do not clear.
3642    __ cmpld(CCR0, RnewTopValue, RendValue);
3643    __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
3644
3645    __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3646
3647    if (ZeroTLAB) {
3648      // The fields have already been cleared.
3649      __ b(Linitialize_header);
3650    } else {
3651      // Initialize both the header and fields.
3652      __ b(Linitialize_object);
3653    }
3654
3655    // Fall through: TLAB was too small.
3656    if (allow_shared_alloc) {
3657      Register RtlabWasteLimitValue = R10_ARG8;
3658      Register RfreeValue = RnewTopValue;
3659
3660      __ bind(Lallocate_shared);
3661      // Check if tlab should be discarded (refill_waste_limit >= free).
3662      __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3663      __ subf(RfreeValue, RoldTopValue, RendValue);
3664      __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
3665      __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
3666      __ bge(CCR0, Lslow_case);
3667
3668      // Increment waste limit to prevent getting stuck on this slow path.
3669      __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
3670      __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3671    }
3672    // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
3673  }
3674  // else: Always go the slow path.
3675
3676  // --------------------------------------------------------------------------
3677  // slow case
3678  __ bind(Lslow_case);
3679  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
3680
3681  if (UseTLAB) {
3682    __ b(Ldone);
3683    // --------------------------------------------------------------------------
3684    // Init1: Zero out newly allocated memory.
3685
3686    if (!ZeroTLAB || allow_shared_alloc) {
3687      // Clear object fields.
3688      __ bind(Linitialize_object);
3689
3690      // Initialize remaining object fields.
3691      Register Rbase = Rtags;
3692      __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
3693      __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
3694      __ srdi(Rinstance_size, Rinstance_size, 3);
3695
3696      // Clear out object skipping header. Takes also care of the zero length case.
3697      __ clear_memory_doubleword(Rbase, Rinstance_size);
3698      // fallthru: __ b(Linitialize_header);
3699    }
3700
3701    // --------------------------------------------------------------------------
3702    // Init2: Initialize the header: mark, klass
3703    __ bind(Linitialize_header);
3704
3705    // Init mark.
3706    if (UseBiasedLocking) {
3707      __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
3708    } else {
3709      __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
3710    }
3711    __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
3712
3713    // Init klass.
3714    __ store_klass_gap(RallocatedObject);
3715    __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
3716
3717    // Check and trigger dtrace event.
3718    {
3719      SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
3720      __ push(atos);
3721      __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
3722      __ pop(atos);
3723    }
3724  }
3725
3726  // continue
3727  __ bind(Ldone);
3728
3729  // Must prevent reordering of stores for object initialization with stores that publish the new object.
3730  __ membar(Assembler::StoreStore);
3731}
3732
3733void TemplateTable::newarray() {
3734  transition(itos, atos);
3735
3736  __ lbz(R4, 1, R14_bcp);
3737  __ extsw(R5, R17_tos);
3738  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
3739
3740  // Must prevent reordering of stores for object initialization with stores that publish the new object.
3741  __ membar(Assembler::StoreStore);
3742}
3743
3744void TemplateTable::anewarray() {
3745  transition(itos, atos);
3746
3747  __ get_constant_pool(R4);
3748  __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
3749  __ extsw(R6, R17_tos); // size
3750  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
3751
3752  // Must prevent reordering of stores for object initialization with stores that publish the new object.
3753  __ membar(Assembler::StoreStore);
3754}
3755
3756// Allocate a multi dimensional array
3757void TemplateTable::multianewarray() {
3758  transition(vtos, atos);
3759
3760  Register Rptr = R31; // Needs to survive C call.
3761
3762  // Put ndims * wordSize into frame temp slot
3763  __ lbz(Rptr, 3, R14_bcp);
3764  __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
3765  // Esp points past last_dim, so set to R4 to first_dim address.
3766  __ add(R4, Rptr, R15_esp);
3767  call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
3768  // Pop all dimensions off the stack.
3769  __ add(R15_esp, Rptr, R15_esp);
3770
3771  // Must prevent reordering of stores for object initialization with stores that publish the new object.
3772  __ membar(Assembler::StoreStore);
3773}
3774
3775void TemplateTable::arraylength() {
3776  transition(atos, itos);
3777
3778  Label LnoException;
3779  __ verify_oop(R17_tos);
3780  __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
3781  __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
3782}
3783
3784// ============================================================================
3785// Typechecks
3786
3787void TemplateTable::checkcast() {
3788  transition(atos, atos);
3789
3790  Label Ldone, Lis_null, Lquicked, Lresolved;
3791  Register Roffset         = R6_ARG4,
3792           RobjKlass       = R4_ARG2,
3793           RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
3794           Rcpool          = R11_scratch1,
3795           Rtags           = R12_scratch2;
3796
3797  // Null does not pass.
3798  __ cmpdi(CCR0, R17_tos, 0);
3799  __ beq(CCR0, Lis_null);
3800
3801  // Get constant pool tag to find out if the bytecode has already been "quickened".
3802  __ get_cpool_and_tags(Rcpool, Rtags);
3803
3804  __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3805
3806  __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3807  __ lbzx(Rtags, Rtags, Roffset);
3808
3809  __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3810  __ beq(CCR0, Lquicked);
3811
3812  // Call into the VM to "quicken" instanceof.
3813  __ push_ptr();  // for GC
3814  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3815  __ get_vm_result_2(RspecifiedKlass);
3816  __ pop_ptr();   // Restore receiver.
3817  __ b(Lresolved);
3818
3819  // Extract target class from constant pool.
3820  __ bind(Lquicked);
3821  __ sldi(Roffset, Roffset, LogBytesPerWord);
3822  __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3823  __ isync(); // Order load of specified Klass wrt. tags.
3824  __ ldx(RspecifiedKlass, Rcpool, Roffset);
3825
3826  // Do the checkcast.
3827  __ bind(Lresolved);
3828  // Get value klass in RobjKlass.
3829  __ load_klass(RobjKlass, R17_tos);
3830  // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3831  __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3832
3833  // Not a subtype; so must throw exception
3834  // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
3835  __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
3836  __ mtctr(R11_scratch1);
3837  __ bctr();
3838
3839  // Profile the null case.
3840  __ align(32, 12);
3841  __ bind(Lis_null);
3842  __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
3843
3844  __ align(32, 12);
3845  __ bind(Ldone);
3846}
3847
3848// Output:
3849//   - tos == 0: Obj was null or not an instance of class.
3850//   - tos == 1: Obj was an instance of class.
3851void TemplateTable::instanceof() {
3852  transition(atos, itos);
3853
3854  Label Ldone, Lis_null, Lquicked, Lresolved;
3855  Register Roffset         = R6_ARG4,
3856           RobjKlass       = R4_ARG2,
3857           RspecifiedKlass = R5_ARG3,
3858           Rcpool          = R11_scratch1,
3859           Rtags           = R12_scratch2;
3860
3861  // Null does not pass.
3862  __ cmpdi(CCR0, R17_tos, 0);
3863  __ beq(CCR0, Lis_null);
3864
3865  // Get constant pool tag to find out if the bytecode has already been "quickened".
3866  __ get_cpool_and_tags(Rcpool, Rtags);
3867
3868  __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3869
3870  __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3871  __ lbzx(Rtags, Rtags, Roffset);
3872
3873  __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3874  __ beq(CCR0, Lquicked);
3875
3876  // Call into the VM to "quicken" instanceof.
3877  __ push_ptr();  // for GC
3878  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3879  __ get_vm_result_2(RspecifiedKlass);
3880  __ pop_ptr();   // Restore receiver.
3881  __ b(Lresolved);
3882
3883  // Extract target class from constant pool.
3884  __ bind(Lquicked);
3885  __ sldi(Roffset, Roffset, LogBytesPerWord);
3886  __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3887  __ isync(); // Order load of specified Klass wrt. tags.
3888  __ ldx(RspecifiedKlass, Rcpool, Roffset);
3889
3890  // Do the checkcast.
3891  __ bind(Lresolved);
3892  // Get value klass in RobjKlass.
3893  __ load_klass(RobjKlass, R17_tos);
3894  // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3895  __ li(R17_tos, 1);
3896  __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3897  __ li(R17_tos, 0);
3898
3899  if (ProfileInterpreter) {
3900    __ b(Ldone);
3901  }
3902
3903  // Profile the null case.
3904  __ align(32, 12);
3905  __ bind(Lis_null);
3906  __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
3907
3908  __ align(32, 12);
3909  __ bind(Ldone);
3910}
3911
3912// =============================================================================
3913// Breakpoints
3914
3915void TemplateTable::_breakpoint() {
3916  transition(vtos, vtos);
3917
3918  // Get the unpatched byte code.
3919  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
3920  __ mr(R31, R3_RET);
3921
3922  // Post the breakpoint event.
3923  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
3924
3925  // Complete the execution of original bytecode.
3926  __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
3927}
3928
3929// =============================================================================
3930// Exceptions
3931
3932void TemplateTable::athrow() {
3933  transition(atos, vtos);
3934
3935  // Exception oop is in tos
3936  __ verify_oop(R17_tos);
3937
3938  __ null_check_throw(R17_tos, -1, R11_scratch1);
3939
3940  // Throw exception interpreter entry expects exception oop to be in R3.
3941  __ mr(R3_RET, R17_tos);
3942  __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
3943  __ mtctr(R11_scratch1);
3944  __ bctr();
3945}
3946
3947// =============================================================================
3948// Synchronization
3949// Searches the basic object lock list on the stack for a free slot
3950// and uses it to lock the obect in tos.
3951//
3952// Recursive locking is enabled by exiting the search if the same
3953// object is already found in the list. Thus, a new basic lock obj lock
3954// is allocated "higher up" in the stack and thus is found first
3955// at next monitor exit.
3956void TemplateTable::monitorenter() {
3957  transition(atos, vtos);
3958
3959  __ verify_oop(R17_tos);
3960
3961  Register Rcurrent_monitor  = R11_scratch1,
3962           Rcurrent_obj      = R12_scratch2,
3963           Robj_to_lock      = R17_tos,
3964           Rscratch1         = R3_ARG1,
3965           Rscratch2         = R4_ARG2,
3966           Rscratch3         = R5_ARG3,
3967           Rcurrent_obj_addr = R6_ARG4;
3968
3969  // ------------------------------------------------------------------------------
3970  // Null pointer exception.
3971  __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
3972
3973  // Try to acquire a lock on the object.
3974  // Repeat until succeeded (i.e., until monitorenter returns true).
3975
3976  // ------------------------------------------------------------------------------
3977  // Find a free slot in the monitor block.
3978  Label Lfound, Lexit, Lallocate_new;
3979  ConditionRegister found_free_slot = CCR0,
3980                    found_same_obj  = CCR1,
3981                    reached_limit   = CCR6;
3982  {
3983    Label Lloop, Lentry;
3984    Register Rlimit = Rcurrent_monitor;
3985
3986    // Set up search loop - start with topmost monitor.
3987    __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
3988
3989    __ ld(Rlimit, 0, R1_SP);
3990    __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
3991
3992    // Check if any slot is present => short cut to allocation if not.
3993    __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3994    __ bgt(reached_limit, Lallocate_new);
3995
3996    // Pre-load topmost slot.
3997    __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3998    __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3999    // The search loop.
4000    __ bind(Lloop);
4001    // Found free slot?
4002    __ cmpdi(found_free_slot, Rcurrent_obj, 0);
4003    // Is this entry for same obj? If so, stop the search and take the found
4004    // free slot or allocate a new one to enable recursive locking.
4005    __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
4006    __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
4007    __ beq(found_free_slot, Lexit);
4008    __ beq(found_same_obj, Lallocate_new);
4009    __ bgt(reached_limit, Lallocate_new);
4010    // Check if last allocated BasicLockObj reached.
4011    __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4012    __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4013    // Next iteration if unchecked BasicObjectLocks exist on the stack.
4014    __ b(Lloop);
4015  }
4016
4017  // ------------------------------------------------------------------------------
4018  // Check if we found a free slot.
4019  __ bind(Lexit);
4020
4021  __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4022  __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
4023  __ b(Lfound);
4024
4025  // We didn't find a free BasicObjLock => allocate one.
4026  __ align(32, 12);
4027  __ bind(Lallocate_new);
4028  __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
4029  __ mr(Rcurrent_monitor, R26_monitor);
4030  __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4031
4032  // ------------------------------------------------------------------------------
4033  // We now have a slot to lock.
4034  __ bind(Lfound);
4035
4036  // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
4037  // The object has already been poped from the stack, so the expression stack looks correct.
4038  __ addi(R14_bcp, R14_bcp, 1);
4039
4040  __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
4041  __ lock_object(Rcurrent_monitor, Robj_to_lock);
4042
4043  // Check if there's enough space on the stack for the monitors after locking.
4044  Label Lskip_stack_check;
4045  // Optimization: If the monitors stack section is less then a std page size (4K) don't run
4046  // the stack check. There should be enough shadow pages to fit that in.
4047  __ ld(Rscratch3, 0, R1_SP);
4048  __ sub(Rscratch3, Rscratch3, R26_monitor);
4049  __ cmpdi(CCR0, Rscratch3, 4*K);
4050  __ blt(CCR0, Lskip_stack_check);
4051
4052  DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
4053  __ li(Rscratch1, 0);
4054  __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
4055
4056  __ align(32, 12);
4057  __ bind(Lskip_stack_check);
4058
4059  // The bcp has already been incremented. Just need to dispatch to next instruction.
4060  __ dispatch_next(vtos);
4061}
4062
4063void TemplateTable::monitorexit() {
4064  transition(atos, vtos);
4065  __ verify_oop(R17_tos);
4066
4067  Register Rcurrent_monitor  = R11_scratch1,
4068           Rcurrent_obj      = R12_scratch2,
4069           Robj_to_lock      = R17_tos,
4070           Rcurrent_obj_addr = R3_ARG1,
4071           Rlimit            = R4_ARG2;
4072  Label Lfound, Lillegal_monitor_state;
4073
4074  // Check corner case: unbalanced monitorEnter / Exit.
4075  __ ld(Rlimit, 0, R1_SP);
4076  __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
4077
4078  // Null pointer check.
4079  __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4080
4081  __ cmpld(CCR0, R26_monitor, Rlimit);
4082  __ bgt(CCR0, Lillegal_monitor_state);
4083
4084  // Find the corresponding slot in the monitors stack section.
4085  {
4086    Label Lloop;
4087
4088    // Start with topmost monitor.
4089    __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4090    __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
4091    __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4092    __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4093
4094    __ bind(Lloop);
4095    // Is this entry for same obj?
4096    __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
4097    __ beq(CCR0, Lfound);
4098
4099    // Check if last allocated BasicLockObj reached.
4100
4101    __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4102    __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
4103    __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4104
4105    // Next iteration if unchecked BasicObjectLocks exist on the stack.
4106    __ ble(CCR0, Lloop);
4107  }
4108
4109  // Fell through without finding the basic obj lock => throw up!
4110  __ bind(Lillegal_monitor_state);
4111  call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4112  __ should_not_reach_here();
4113
4114  __ align(32, 12);
4115  __ bind(Lfound);
4116  __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
4117          -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4118  __ unlock_object(Rcurrent_monitor);
4119}
4120
4121// ============================================================================
4122// Wide bytecodes
4123
4124// Wide instructions. Simply redirects to the wide entry point for that instruction.
4125void TemplateTable::wide() {
4126  transition(vtos, vtos);
4127
4128  const Register Rtable = R11_scratch1,
4129                 Rindex = R12_scratch2,
4130                 Rtmp   = R0;
4131
4132  __ lbz(Rindex, 1, R14_bcp);
4133
4134  __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
4135
4136  __ slwi(Rindex, Rindex, LogBytesPerWord);
4137  __ ldx(Rtmp, Rtable, Rindex);
4138  __ mtctr(Rtmp);
4139  __ bctr();
4140  // Note: the bcp increment step is part of the individual wide bytecode implementations.
4141}
4142